if (FALSE) {
## 1. Listing available models
# List all providers at once
all_mods <- list_models("all")
str(all_mods)
# List OpenAI-only, Groq-only, Anthropic-only
openai_mods <- list_models("openai")
groq_mods <- list_models("groq")
anthropic_mods<- list_models("anthropic", anthropic_api_version = "2023-06-01")
## 2. Single-prompt interface
# 2a. Basic usage
Sys.setenv(OPENAI_API_KEY = "sk-...")
res_basic <- call_llm(
prompt = "Hello, how are you?",
provider = "openai"
)
cat(res_basic)
# 2b. Adjust sampling and penalties
res_sampling <- call_llm(
prompt = "Write a haiku about winter",
provider = "openai",
temperature = 1.2,
top_p = 0.5,
presence_penalty = 0.6,
frequency_penalty = 0.4
)
cat(res_sampling)
# 2c. Control length and retries
res_len <- call_llm(
prompt = "List 5 uses for R",
provider = "openai",
max_tokens = 50,
n_tries = 5,
backoff = 0.5
)
cat(res_len)
# 2d. Using stop sequences
res_stop <- call_llm(
prompt = "Count from 1 to 10:",
provider = "openai",
stop = c("6")
)
cat(res_stop)
# 2e. Override API key for one call
res_override <- call_llm(
prompt = "Override test",
provider = "openai",
api_key = "sk-override",
max_tokens = 20
)
cat(res_override)
# 2f. Factory interface for repeated prompts
GitHubLLM <- call_llm(
provider = "github",
max_tokens = 60,
verbose = FALSE
)
# direct invocation
story1 <- GitHubLLM("Tell me a short story")
cat(story1)
## 3. Multi-message conversation
# 3a. Simple system + user
convo1 <- list(
list(role = "system", content = "You are a helpful assistant."),
list(role = "user", content = "Explain recursion.")
)
res1 <- call_llm(
messages = convo1,
provider = "openai",
max_tokens = 100
)
cat(res1)
# 3b. Continue an existing chat by appending a prompt
prev <- list(
list(role = "system", content = "You are concise."),
list(role = "user", content = "Summarize the plot of Hamlet.")
)
res2 <- call_llm(
messages = prev,
prompt = "Now give me three bullet points."
)
cat(res2)
# 3c. Use stop sequence in multi-message
convo2 <- list(
list(role = "system", content = "You list items."),
list(role = "user", content = "Name three colors.")
)
res3 <- call_llm(
messages = convo2,
stop = c(".")
)
cat(res3)
# 3d. Multi-message via factory interface
ScopedLLM <- call_llm(provider = "openai", temperature = 0.3)
chat_ctx <- list(
list(role = "system", content = "You are a math tutor.")
)
ans1 <- ScopedLLM(messages = chat_ctx, prompt = "Solve 2+2.")
cat(ans1)
ans2 <- ScopedLLM("What about 10*10?")
cat(ans2)
}
Run the code above in your browser using DataLab