# \donttest{
if (interactive()) {
# Basic usage with Chat Completions API
openai <- create_openai(api_key = "sk-...")
model <- openai$language_model("gpt-4o")
result <- generate_text(model, "Hello!")
# Using Responses API for reasoning models
openai <- create_openai()
model <- openai$responses_model("o1")
result <- generate_text(model, "Solve this math problem...")
print(result$reasoning) # Access chain-of-thought
# Smart model selection (auto-detects best API)
model <- openai$smart_model("o3-mini") # Uses Responses API
model <- openai$smart_model("gpt-4o") # Uses Chat Completions API
# Token limits - unified interface
# For standard models: limits generated content
result <- model$generate(messages = msgs, max_tokens = 1000)
# For o1/o3 models: automatically maps to max_completion_tokens
model_o1 <- openai$language_model("o1")
result <- model_o1$generate(messages = msgs, max_tokens = 2000)
# For Responses API: automatically maps to max_output_tokens (total limit)
model_resp <- openai$responses_model("o1")
result <- model_resp$generate(messages = msgs, max_tokens = 2000)
# Advanced: explicitly control answer-only limit (Volcengine Responses API)
result <- model_resp$generate(messages = msgs, max_answer_tokens = 500)
# Multi-turn conversation with Responses API
model <- openai$responses_model("o1")
result1 <- generate_text(model, "What is 2+2?")
result2 <- generate_text(model, "Now multiply that by 3") # Remembers context
model$reset() # Start fresh conversation
}
# }
Run the code above in your browser using DataLab