# \donttest{
# Multi-GPU example
if (ggml_vulkan_available() && ggml_vulkan_device_count() >= 2) {
gpu1 <- ggml_vulkan_init(0)
gpu2 <- ggml_vulkan_init(1)
sched <- ggml_backend_sched_new(list(gpu1, gpu2))
ctx <- ggml_init(64 * 1024 * 1024)
a <- ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 10000)
b <- ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 10000)
ggml_set_f32(a, rnorm(10000))
ggml_set_f32(b, rnorm(10000))
c <- ggml_add(ctx, a, b)
graph <- ggml_build_forward_expand(ctx, c)
# Reserve memory
ggml_backend_sched_reserve(sched, graph)
# Compute using both GPUs
ggml_backend_sched_graph_compute(sched, graph)
result <- ggml_get_f32(c)
cat("Splits:", ggml_backend_sched_get_n_splits(sched), "\n")
cat("Copies:", ggml_backend_sched_get_n_copies(sched), "\n")
ggml_free(ctx)
ggml_backend_sched_free(sched)
ggml_vulkan_free(gpu1)
ggml_vulkan_free(gpu2)
}
# }
Run the code above in your browser using DataLab