Learn R Programming

LLMR (version 0.2.3)

call_llm: Call LLM API

Description

Sends a message to the specified LLM API and retrieves the response.

Sends a message or data to the specified LLM API and retrieves the response.

Usage

call_llm(config, messages, verbose = FALSE, json = FALSE)

call_llm(config, messages, verbose = FALSE, json = FALSE)

Value

The generated text response or embedding results with additional attributes.

The generated text response or embedding results with additional attributes.

Arguments

config

An `llm_config` object created by `llm_config()`.

messages

A list of message objects (for chat calls) or a character vector (for embeddings).

verbose

Logical. If `TRUE`, prints the full API response.

json

Logical. If `TRUE`, returns the raw JSON response as an attribute.

Examples

Run this code
if (FALSE) {
# Make sure to set your needed API keys in environment variables
  # OpenAI Embedding Example (overwriting api_url):
  openai_embed_config <- llm_config(
    provider = "openai",
    model = "text-embedding-3-small",
    api_key = Sys.getenv("OPENAI_KEY"),
    temperature = 0.3,
    api_url = "https://api.openai.com/v1/embeddings"
  )

  text_input <- c("Political science is a useful subject",
                  "We love sociology",
                  "German elections are different",
                  "A student was always curious.")

  embed_response <- call_llm(openai_embed_config, text_input)

  # Voyage AI Example:
  voyage_config <- llm_config(
    provider = "voyage",
    model = "voyage-large-2",
    api_key = Sys.getenv("VOYAGE_API_KEY")
  )

  embedding_response <- call_llm(voyage_config, text_input)
  embeddings <- parse_embeddings(embedding_response)
  embeddings |> cor() |> print()


  # Gemini Example
  gemini_config <- llm_config(
    provider = "gemini",
    model = "gemini-pro",          # Or another Gemini model
    api_key = Sys.getenv("GEMINI_API_KEY"),
    temperature = 0.9,               # Controls randomness
    max_tokens = 800,              # Maximum tokens to generate
    top_p = 0.9,                     # Nucleus sampling parameter
    top_k = 10                      # Top K sampling parameter
  )

  gemini_message <- list(
    list(role = "user", content = "Explain the theory of relativity to a curious 3-year-old!")
  )

  gemini_response <- call_llm(
    config = gemini_config,
    messages = gemini_message,
    json = TRUE # Get raw JSON for inspection if needed
  )

  # Display the generated text response
  cat("Gemini Response:", gemini_response, "\n")

  # Access and print the raw JSON response
  raw_json_gemini_response <- attr(gemini_response, "raw_json")
  print(raw_json_gemini_response)
}
if (FALSE) {
# Make sure to set your needed API keys in environment variables
  # OpenAI Embedding Example (overwriting api_url):
  openai_embed_config <- llm_config(
    provider = "openai",
    model = "text-embedding-3-small",
    api_key = Sys.getenv("OPENAI_KEY"),
    temperature = 0.3,
    api_url = "https://api.openai.com/v1/embeddings"
  )

  text_input <- c("Political science is a useful subject",
                  "We love sociology",
                  "German elections are different",
                  "A student was always curious.")

  embed_response <- call_llm(openai_embed_config, text_input)

  # Voyage AI Example:
  voyage_config <- llm_config(
    provider = "voyage",
    model = "voyage-large-2",
    api_key = Sys.getenv("VOYAGE_API_KEY")
  )

  embedding_response <- call_llm(voyage_config, text_input)
  embeddings <- parse_embeddings(embedding_response)
  embeddings |> cor() |> print()


  # Gemini Example
  gemini_config <- llm_config(
    provider = "gemini",
    model = "gemini-pro",
    api_key = Sys.getenv("GEMINI_API_KEY"),
    temperature = 0.9,
    max_tokens = 800,
    top_p = 0.9,
    top_k = 10
  )

  gemini_message <- list(
    list(role = "user", content = "Explain the theory of relativity to a curious 3-year-old!")
  )

  gemini_response <- call_llm(
    config = gemini_config,
    messages = gemini_message,
    json = TRUE
  )

  cat("Gemini Response:", gemini_response, "\n")
  raw_json_gemini_response <- attr(gemini_response, "raw_json")
  print(raw_json_gemini_response)
}

Run the code above in your browser using DataLab