# load data:
data("NicholasEtAl2019")
# show data:
head(NicholasEtAl2019)
if (FALSE) {
# compute effect sizes (logarithmic odds) from count data
# (note: effect of potential drop-outs is ignored here):
es <- escalc(measure="PLO",
xi=patients*(prog.percent/100), ni=patients,
slab=study, data=NicholasEtAl2019)
# illustrate estimates (log-odds):
forestplot(es, zero=NA, xlab="log(odds)", title="Nicholas et al. (2019) data")
# set up regressor matrix
# (note: "year" variable is re-scaled so that the intercept
# corresponds to the log-odds at year=2000):
X <- cbind("intercept2000" = 1, "year" = (es$year-2000))
# perform analysis:
bmr01 <- bmr(es, X=X)
# show results:
print(bmr01)
plot(bmr01)
# illustrate the data and time trend;
# first derive predictions from the model
# and specify corresponding "regressor matrix":
newx <- cbind(1, (1989:2019)-2000)
# compute credible intervals for the mean:
pred <- cbind("median"=bmr01$qpred(0.5, x=newx),
bmr01$pred.interval(x=newx))
# compute prediction intervals:
map <- cbind("median"=bmr01$qpred(0.5, x=newx, mean=FALSE),
bmr01$pred.interval(x=newx, mean=FALSE))
# draw empty plot:
plot(range(newx[,2]), range(map), type="n",
xlab="publication year - 2000", ylab="log(odds)")
# show the 26 studies' estimates (and 95 percent CIs):
matlines(rbind(es$year, es$year)-2000,
rbind(es$yi-qnorm(0.975)*sqrt(es$vi), es$yi+qnorm(0.975)*sqrt(es$vi)),
col=1, lty=1)
points(es$year-2000, es$yi)
# show trend lines (and 95 percent intervals):
matlines(newx[,2], map, col="blue", lty=c(1,2,2))
matlines(newx[,2], pred, col="red", lty=c(1,2,2))
legend("topright", pch=15, col=c("red","blue"), c("mean","prediction"))
}
Run the code above in your browser using DataLab