# NOT RUN {
# benchmarking with expand_grid()
tasks = mlr_tasks$mget(c("iris", "sonar"))
learners = mlr_learners$mget(c("classif.featureless", "classif.rpart"))
resamplings = mlr_resamplings$mget("cv3")
design = expand_grid(tasks, learners, resamplings)
print(design)
set.seed(123)
bmr = benchmark(design)
## data of all resamplings
head(as.data.table(bmr))
## aggregated performance values
aggr = bmr$aggregate()
print(aggr)
## Extract predictions of first resampling result
rr = aggr$resample_result[[1]]
as.data.table(rr$prediction)
# benchmarking with a custom design:
# - fit classif.featureless on iris with a 3-fold CV
# - fit classif.rpart on sonar using a holdout
design = data.table::data.table(
task = mlr_tasks$mget(c("iris", "sonar")),
learner = mlr_learners$mget(c("classif.featureless", "classif.rpart")),
resampling = mlr_resamplings$mget(c("cv3", "holdout"))
)
## instantiate resamplings
design$resampling = Map(
function(task, resampling) resampling$clone()$instantiate(task),
task = design$task, resampling = design$resampling
)
## calculate benchmark
bmr = benchmark(design)
print(bmr)
## get the training set of the 2nd iteration of the featureless learner on iris
rr = bmr$aggregate()[learner_id == "classif.featureless"]$resample_result[[1]]
rr$resampling$train_set(2)
# }
Run the code above in your browser using DataLab