training.set <- seq(1, nrow(iris), by = 2)
test.set <- seq(2, nrow(iris), by = 2)
task <- makeClassifTask(data = iris, target = "Species")
lrn <- makeLearner("classif.lda")
mod <- train(lrn, task, subset = training.set)
pred <- predict(mod, newdata = iris[test.set, ])
## Here we define the mean misclassification error (MMCE) as our performance measure
my.mmce <- function(task, model, pred, extra.args) {
length(which(pred$data$response != pred$data$truth)) / nrow(pred$data)
}
ms <- makeMeasure(id = "misclassification.rate",
minimize = TRUE,
classif = TRUE,
allowed.pred.types = "response",
fun = my.mmce)
performance(pred, ms, task, mod)
## Indeed the MMCE is already implemented in mlr beside other common performance measures
performance(pred, measure = mmce)
## Compute multiple performance measures at once
ms <- list("mmce" = mmce, "acc" = acc, "timetrain" = timetrain)
sapply(ms, function(the.ms) {
performance(pred, measure = the.ms, task, mod)
})
Run the code above in your browser using DataLab