# NOT RUN {
# Create the part of the workflow plan for the datasets.
datasets <- drake_plan(
small = simulate(5),
large = simulate(50))
# Create a template workflow plan for the analyses.
methods <- drake_plan(
regression1 = reg1(dataset__),
regression2 = reg2(dataset__))
# Evaluate the wildcards in the template
# to produce the actual part of the workflow plan
# that encodes the analyses of the datasets.
# Create one analysis for each combination of dataset and method.
evaluate_plan(methods, wildcard = "dataset__",
values = datasets$target)
# Only choose some combinations of dataset and analysis method.
ans <- evaluate_plan(methods, wildcard = "dataset__",
values = datasets$target, expand = FALSE)
ans
# For the complete workflow plan, row bind the pieces together.
my_plan <- rbind(datasets, ans)
my_plan
# Workflow plans can have multiple wildcards.
# Each combination of wildcard values will be used
# Except when expand is FALSE.
x <- drake_plan(draws = rnorm(mean = Mean, sd = Sd))
evaluate_plan(x, rules = list(Mean = 1:3, Sd = c(1, 10)))
# With the `trace` argument,
# you can generate columns that show how the wildcards
# were evaluated.
plan <- drake_plan(x = rnorm(n__), y = rexp(n__))
plan <- evaluate_plan(plan, wildcard = "n__", values = 1:2, trace = TRUE)
print(plan)
# With the `trace` argument,
# you can generate columns that show how the wildcards
# were evaluated. Then you can visualize the wildcard groups
# as clusters.
plan <- drake_plan(x = rnorm(n__), y = rexp(n__))
plan <- evaluate_plan(plan, wildcard = "n__", values = 1:2, trace = TRUE)
print(plan)
cache <- storr::storr_environment()
config <- drake_config(plan, cache = cache)
vis_drake_graph(config, group = "n__", clusters = "1")
vis_drake_graph(config, group = "n__", clusters = c("1", "2"))
make(plan, targets = c("x_1", "y_2"), cache = cache)
# Optionally cluster on columns supplied by `drake_graph_info()$nodes`.
vis_drake_graph(config, group = "status", clusters = "up to date")
# }
Run the code above in your browser using DataLab