if (FALSE) {
# Make sure to set your needed API keys in environment variables
# OpenAI Embedding Example (overwriting api_url):
openai_embed_config <- llm_config(
provider = "openai",
model = "text-embedding-3-small",
api_key = Sys.getenv("OPENAI_KEY"),
temperature = 0.3,
api_url = "https://api.openai.com/v1/embeddings"
)
text_input <- c("Political science is a useful subject",
"We love sociology",
"German elections are different",
"A student was always curious.")
embed_response <- call_llm(openai_embed_config, text_input)
# Voyage AI Example:
voyage_config <- llm_config(
provider = "voyage",
model = "voyage-large-2",
api_key = Sys.getenv("VOYAGE_API_KEY")
)
embedding_response <- call_llm(voyage_config, text_input)
embeddings <- parse_embeddings(embedding_response)
embeddings |> cor() |> print()
# Gemini Example
gemini_config <- llm_config(
provider = "gemini",
model = "gemini-pro", # Or another Gemini model
api_key = Sys.getenv("GEMINI_API_KEY"),
temperature = 0.9, # Controls randomness
max_tokens = 800, # Maximum tokens to generate
top_p = 0.9, # Nucleus sampling parameter
top_k = 10 # Top K sampling parameter
)
gemini_message <- list(
list(role = "user", content = "Explain the theory of relativity to a curious 3-year-old!")
)
gemini_response <- call_llm(
config = gemini_config,
messages = gemini_message,
json = TRUE # Get raw JSON for inspection if needed
)
# Display the generated text response
cat("Gemini Response:", gemini_response, "\n")
# Access and print the raw JSON response
raw_json_gemini_response <- attr(gemini_response, "raw_json")
print(raw_json_gemini_response)
}
if (FALSE) {
# Make sure to set your needed API keys in environment variables
# OpenAI Embedding Example (overwriting api_url):
openai_embed_config <- llm_config(
provider = "openai",
model = "text-embedding-3-small",
api_key = Sys.getenv("OPENAI_KEY"),
temperature = 0.3,
api_url = "https://api.openai.com/v1/embeddings"
)
text_input <- c("Political science is a useful subject",
"We love sociology",
"German elections are different",
"A student was always curious.")
embed_response <- call_llm(openai_embed_config, text_input)
# Voyage AI Example:
voyage_config <- llm_config(
provider = "voyage",
model = "voyage-large-2",
api_key = Sys.getenv("VOYAGE_API_KEY")
)
embedding_response <- call_llm(voyage_config, text_input)
embeddings <- parse_embeddings(embedding_response)
embeddings |> cor() |> print()
# Gemini Example
gemini_config <- llm_config(
provider = "gemini",
model = "gemini-pro",
api_key = Sys.getenv("GEMINI_API_KEY"),
temperature = 0.9,
max_tokens = 800,
top_p = 0.9,
top_k = 10
)
gemini_message <- list(
list(role = "user", content = "Explain the theory of relativity to a curious 3-year-old!")
)
gemini_response <- call_llm(
config = gemini_config,
messages = gemini_message,
json = TRUE
)
cat("Gemini Response:", gemini_response, "\n")
raw_json_gemini_response <- attr(gemini_response, "raw_json")
print(raw_json_gemini_response)
}
Run the code above in your browser using DataLab