df_list <- freMTPLmini |> split_into_train_validate_test(seed = 9000)
# training with plenty of rounds allowed
iblm_model1 <- train_iblm_xgb(
df_list,
response_var = "ClaimRate",
family = "poisson",
params = list(max_depth = 6),
nrounds = 1000
)
xgb1 <- train_xgb_as_per_iblm(iblm_model1)
# training with severe restrictions (expected poorer results)
iblm_model2 <- train_iblm_xgb(
df_list,
response_var = "ClaimRate",
family = "poisson",
params = list(max_depth = 1),
nrounds = 5
)
xgb2 <- train_xgb_as_per_iblm(iblm_model2)
# comparison shows the poor training mirrored in second set:
get_pinball_scores(
df_list$test,
iblm_model1,
trim = NA_real_,
additional_models = list(iblm2 = iblm_model2, xgb1 = xgb1, xgb2 = xgb2)
)
Run the code above in your browser using DataLab