library("polle")
### Single stage case
d1 <- sim_single_stage(5e2, seed=1)
pd1 <- policy_data(d1,
action="A",
covariates=list("Z", "B", "L"),
utility="U")
pd1
# available history variable names for the outcome regression:
get_history_names(pd1)
# evaluating the static policy a=1 using inverse
# propensity weighting based on the given Q-model:
pe1 <- policy_eval(type = "or",
policy_data = pd1,
policy = policy_def(1, name = "A=1"),
q_model = q_glm(formula = ~A*.))
pe1
# getting the fitted Q-function values
head(predict(get_q_functions(pe1), pd1))
### Two stages:
d2 <- sim_two_stage(5e2, seed=1)
pd2 <- policy_data(d2,
action = c("A_1", "A_2"),
covariates = list(L = c("L_1", "L_2"),
C = c("C_1", "C_2")),
utility = c("U_1", "U_2", "U_3"))
pd2
# available full history variable names at each stage:
get_history_names(pd2, stage = 1)
get_history_names(pd2, stage = 2)
# evaluating the static policy a=1 using outcome
# regression based on a glm model for each stage:
pe2 <- policy_eval(type = "or",
policy_data = pd2,
policy = policy_def(1, reuse = TRUE, name = "A=1"),
q_model = list(q_glm(~ A * L_1),
q_glm(~ A * (L_1 + L_2))),
q_full_history = TRUE)
pe2
# getting the fitted Q-function values
head(predict(get_q_functions(pe2), pd2))
Run the code above in your browser using DataLab