if (FALSE) {
# Standard text call
config <- llm_config(provider = "openai", model = "gpt-4o-mini", api_key = "...")
messages <- list(list(role = "user", content = "Hello!"))
response <- call_llm(config, messages)
# Multimodal call (for supported providers like Gemini, Claude 3, GPT-4o)
# Make sure to use a vision-capable model in your config
multimodal_config <- llm_config(provider = "openai", model = "gpt-4o", api_key = "...")
multimodal_messages <- list(list(role = "user", content = list(
list(type = "text", text = "What is in this image?"),
list(type = "file", path = "path/to/your/image.png")
)))
image_response <- call_llm(multimodal_config, multimodal_messages)
}
Run the code above in your browser using DataLab