### copy data into 'dat' and examine data
dat <- dat.bakdash2021
head(dat[c(1,2,6,8:12)])
if (FALSE) {
#start.time <- Sys.time()
### load metafor
library(metafor)
### multilevel meta-analytic model to get the overall pooled effect
res.overall <- rma.mv(es.z, vi.z, mods = ~ 1,
random = ~ 1 | SampleID / Outcome,
data = dat,
test = "t")
res.overall
### get prediction interval
predict(res.overall)
### cluster-robust variance estimation (CRVE) / cluster-robust inference
res.overall.crve <- robust(res.overall, cluster = SampleID)
res.overall.crve
### get prediction interval
res.overall.crve.pred <- predict(res.overall.crve)
res.overall.crve.pred
### multilevel meta-analytic model for SA measures
res.sa <- rma.mv(es.z, vi.z, mods = ~ 0 + SA.measure.type,
random = ~ 1 | SampleID / Outcome,
data = dat,
test = "t")
res.sa
### cluster-robust variance estimation (CRVE) / cluster-robust inference
res.sa.crve <- robust(res.sa, cluster = SampleID)
res.sa.crve
### profile likelihood plots
par(mfrow=c(2,1))
profile(res.sa.crve, progbar = FALSE)
### format and combine output of meta-analytic models for the forest plot
all.z <- c(res.sa.crve$beta, # SA measures
res.overall.crve$beta, # pooled effect for confidence interval (CI)
res.overall.crve$beta) # pooled effect for prediction interval (PI)
all.ci.lower <- c(res.sa.crve$ci.lb, # SA measures
res.overall.crve.pred$ci.lb, # pooled effect, lower CI
res.overall.crve.pred$pi.lb) # pooled effect, lower PI
all.ci.upper <- c(res.sa.crve$ci.ub, # SA measures
res.overall.crve.pred$ci.ub, # pooled effect, upper CI
res.overall.crve.pred$pi.ub) # pooled effect, upper PI
### note: there is no p-value for the PI
all.pvals <- c(res.sa.crve$pval, res.overall.crve$pval)
all.labels <- c(sort(unique(dat$SA.measure.type)), "Overall", "95% Prediction Interval")
### function to round p-values for the forest plot
pvals.round <- function(input) {
input <- ifelse(input < 0.001, "< 0.001",
ifelse(input < 0.01, "< 0.01",
ifelse(input < 0.05 & input >= 0.045, "< 0.05",
ifelse(round(input, 2) == 1.00, "0.99",
sprintf("%.2f", round(input, 2))))))}
all.pvals.rounded <- pvals.round(all.pvals)
### forest plot
plot.vals <- data.frame(all.labels, all.z, all.ci.lower, all.ci.upper)
par(mfrow=c(1,1), cex = 1.05)
forest(plot.vals$all.z,
ci.lb = plot.vals$all.ci.lower,
ci.ub = plot.vals$all.ci.upper,
slab = plot.vals$all.labels,
psize = 1,
efac = 0, xlim = c(-1.8, 2.5), clim = c(-1, 1),
transf = transf.ztor, # transform z to r
at = seq(-0.5, 1, by = 0.25),
xlab = expression("Correlation Coefficient"~"("*italic('r')*")"),
main = "\n\n\nSA Measures",
ilab = c(all.pvals.rounded, ""), ilab.xpos = 2.45, ilab.pos = 2.5,
digits = 2, refline = 0, annotate = FALSE, header = FALSE)
### keep trailing zero using sprintf
output <- cbind(sprintf("%.2f", round(transf.ztor(plot.vals$all.z), 2)),
sprintf("%.2f", round(transf.ztor(plot.vals$all.ci.lower), 2)),
sprintf("%.2f", round(transf.ztor(plot.vals$all.ci.upper), 2)))
### alignment kludge
annotext <- apply(output, 1, function(x) {paste0(" ", x[1], " [", x[2],", ", x[3], "]")})
text( 1.05, 12:1, annotext, pos = 4, cex = 1.05)
text(-1.475, 14.00, "SA Measure", cex = 1.05)
text( 2.30, 14.00, substitute(paste(italic('p-value'))), cex = 1.05)
text( 1.55, 14.00, "Correlation [95% CI]", cex = 1.05)
abline(h = 2.5)
### black polygon for overall mean CIs
addpoly(all.z[11], ci.lb = all.ci.lower[11], ci.ub = all.ci.upper[11],
rows = 2, annotate = FALSE, efac = 1.5, transf = transf.ztor)
### white polygon for PI
addpoly(all.z[12], ci.lb = all.ci.lower[12], ci.ub = all.ci.upper[12],
rows = 1, col = "white", border = "black",
annotate = FALSE, efac = 1.5, transf = transf.ztor)
par(mfrow=c(1,1), cex = 1) # reset graph parameters to default
### confidence intervals for the variance components
re.CI.variances <- confint(res.overall)
re.CI.variances
sigma1.z <- data.frame(re.CI.variances[[1]]["random"])
sigma2.z <- data.frame(re.CI.variances[[2]]["random"])
### fit model using alternative multivariate parameterization
res.overall.alt <- rma.mv(es.z, vi.z, mods = ~ 1,
random = ~ factor(Outcome) | factor(SampleID),
data = dat,
test = "t")
### confidence intervals for the total amount of heterogeneity variance component
res.overall.alt.tau <- confint(res.overall.alt, tau2=1)$random
### I^2: http://www.metafor-project.org/doku.php/tips:i2_multilevel_multivariate
W <- diag(1/dat$vi.z)
X <- model.matrix(res.overall)
P <- W - W %*% X %*% solve(t(X) %*% W %*% X) %*% t(X) %*% W
### I^2 (variance due to heterogeneity): 61%
I2 <- 100 * res.overall.alt$tau2 /
(res.overall.alt$tau2 + (res.overall$k-res.overall$p)/sum(diag(P)))
I2
### 95% CI for I^2 using uncertainty around tau^2
I2.CI.lb <- 100 * res.overall.alt.tau[1,2] /
(res.overall.alt.tau[1,2] + (res.overall$k-res.overall$p)/sum(diag(P)))
I2.CI.lb
I2.CI.ub <- 100 * res.overall.alt.tau[1,3] /
(res.overall.alt.tau[1,3] + (res.overall$k-res.overall$p)/sum(diag(P)))
I2.CI.ub
### total amount of heterogeneity (tau)
sqrt(res.overall.alt$tau2)
### heterogeneity table
table.heterogeneity <- data.frame(matrix(ncol = 3, nrow = 4))
colnames(table.heterogeneity) <- c("Parameter Value",
"Lower 95% CI",
"Upper 95% CI")
rownames(table.heterogeneity) <- c("Tau (Total)",
"Tau1 (Between paper)",
"Tau2 (Within paper)",
"I2 (%)")
table.heterogeneity[1,] <- res.overall.alt.tau[2,]
table.heterogeneity[2,] <- sigma1.z[2,]
table.heterogeneity[3,] <- sigma2.z[2,]
table.heterogeneity[4,] <- c(I2, I2.CI.lb, I2.CI.ub)
round(table.heterogeneity, 2)
#end.time <- Sys.time()
#end.time - start.time
}
Run the code above in your browser using DataLab