if (FALSE) {
# Load model and create basic context
model <- model_load("path/to/model.gguf")
ctx <- context_create(model)
# Create context with larger buffer for long conversations
long_ctx <- context_create(model, n_ctx = 4096)
# High-performance context with more threads
fast_ctx <- context_create(model, n_ctx = 2048, n_threads = 8)
# Context for batch processing multiple conversations
batch_ctx <- context_create(model, n_ctx = 2048, n_seq_max = 4)
# Create context with minimal verbosity (quiet mode)
quiet_ctx <- context_create(model, verbosity = 2L)
}
Run the code above in your browser using DataLab