# Generate a data set to use as an example
n <- 100
exposure_prob <- .5
dat_gaus <- glm_data(Y ~ 1+2*X1-X2+3*A+1.6*A*X2,
X1 = rnorm(n),
X2 = rgamma(n, shape = 2),
A = rbinom(n, size = 1, prob = exposure_prob),
family = gaussian())
# ---------------------------------------------------------------------------
# Obtain out-of-sample (OOS) prediction using glm model
# ---------------------------------------------------------------------------
gaus1 <- dat_gaus[1:(n/2), ]
gaus2 <- dat_gaus[(n/2+1):n, ]
glm1 <- glm(Y ~ X1 + X2 + A, data = gaus1)
glm2 <- glm(Y ~ X1 + X2 + A, data = gaus2)
preds_glm1 <- predict(glm2, newdata = gaus1, type = "response")
preds_glm2 <- predict(glm1, newdata = gaus2, type = "response")
preds_glm <- c(preds_glm1, preds_glm2)
# Obtain power
power_marginaleffect(
response = dat_gaus$Y,
predictions = preds_glm,
target_effect = 2,
exposure_prob = exposure_prob
)
# ---------------------------------------------------------------------------
# Get OOS predictions using discrete super learner and adjust variance
# ---------------------------------------------------------------------------
learners <- list(
mars = list(
model = parsnip::set_engine(
parsnip::mars(
mode = "regression", prod_degree = 3
),
"earth"
)
),
lm = list(
model = parsnip::set_engine(
parsnip::linear_reg(),
"lm"
)
)
)
lrnr1 <- fit_best_learner(preproc = list(mod = Y ~ X1 + X2 + A),
data = gaus1,
learners = learners)
lrnr2 <- fit_best_learner(preproc = list(mod = Y ~ X1 + X2 + A),
data = gaus2,
learners = learners)
preds_lrnr1 <- dplyr::pull(predict(lrnr2, new_data = gaus1))
preds_lrnr2 <- dplyr::pull(predict(lrnr1, new_data = gaus2))
preds_lrnr <- c(preds_lrnr1, preds_lrnr2)
# Estimate the power AND adjust the assumed variance in the "unknown"
# group 1 to be 20% larger than in group 0
power_marginaleffect(
response = dat_gaus$Y,
predictions = preds_lrnr,
target_effect = 2,
exposure_prob = exposure_prob,
var1 = function(var0) 1.2 * var0
)
Run the code above in your browser using DataLab