Title: | 'Ollama' Language Models |
---|---|
Description: | An interface to easily run local language models with 'Ollama' <https://ollama.com> server and API endpoints (see <https://github.com/ollama/ollama/blob/main/docs/api.md> for details). It lets you run open-source large language models locally on your machine. |
Authors: | Hause Lin [aut, cre, cph] |
Maintainer: | Hause Lin <[email protected]> |
License: | MIT + file LICENSE |
Version: | 1.2.2.9000 |
Built: | 2025-01-28 04:29:06 UTC |
Source: | https://github.com/hauselin/ollama-r |
Appends a message (add to end of a list) to a list of messages. The role and content will be converted to a list and appended to the input list.
append_message(content, role = "user", x = NULL, ...)
append_message(content, role = "user", x = NULL, ...)
content |
The content of the message. |
role |
The role of the message. Can be "user", "system", "assistant". Default is "user". |
x |
A list of messages. Default is NULL. |
... |
Additional arguments such as images. |
A list of messages with the new message appended.
append_message("user", "Hello") append_message("system", "Always respond nicely")
append_message("user", "Hello") append_message("system", "Always respond nicely")
Generate a chat completion with message history
chat( model, messages, tools = list(), stream = FALSE, format = list(), keep_alive = "5m", output = c("resp", "jsonlist", "raw", "df", "text", "req", "tools", "structured"), endpoint = "/api/chat", host = NULL, ... )
chat( model, messages, tools = list(), stream = FALSE, format = list(), keep_alive = "5m", output = c("resp", "jsonlist", "raw", "df", "text", "req", "tools", "structured"), endpoint = "/api/chat", host = NULL, ... )
model |
A character string of the model name such as "llama3". |
messages |
A list with list of messages for the model (see examples below). |
tools |
Tools for the model to use if supported. Requires stream = FALSE. Default is an empty list. |
stream |
Enable response streaming. Default is FALSE. |
format |
Format to return a response in. Format can be json/list (structured response). |
keep_alive |
The duration to keep the connection alive. Default is "5m". |
output |
The output format. Default is "resp". Other options are "jsonlist", "raw", "df", "text", "req" (httr2_request object), "tools" (tool calling), "structured" (structured output) |
endpoint |
The endpoint to chat with the model. Default is "/api/chat". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
... |
Additional options to pass to the model. |
A response in the format specified in the output parameter.
# one message messages <- list( list(role = "user", content = "How are you doing?") ) chat("llama3", messages) # returns response by default chat("llama3", messages, output = "text") # returns text/vector chat("llama3", messages, temperature = 2.8) # additional options chat("llama3", messages, stream = TRUE) # stream response chat("llama3", messages, output = "df", stream = TRUE) # stream and return dataframe # multiple messages messages <- list( list(role = "user", content = "Hello!"), list(role = "assistant", content = "Hi! How are you?"), list(role = "user", content = "Who is the prime minister of the uk?"), list(role = "assistant", content = "Rishi Sunak"), list(role = "user", content = "List all the previous messages.") ) chat("llama3", messages, stream = TRUE) # image image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png") messages <- list( list(role = "user", content = "What is in the image?", images = image_path) ) chat("benzie/llava-phi-3", messages, output = 'text')
# one message messages <- list( list(role = "user", content = "How are you doing?") ) chat("llama3", messages) # returns response by default chat("llama3", messages, output = "text") # returns text/vector chat("llama3", messages, temperature = 2.8) # additional options chat("llama3", messages, stream = TRUE) # stream response chat("llama3", messages, output = "df", stream = TRUE) # stream and return dataframe # multiple messages messages <- list( list(role = "user", content = "Hello!"), list(role = "assistant", content = "Hi! How are you?"), list(role = "user", content = "Who is the prime minister of the uk?"), list(role = "assistant", content = "Rishi Sunak"), list(role = "user", content = "List all the previous messages.") ) chat("llama3", messages, stream = TRUE) # image image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png") messages <- list( list(role = "user", content = "What is in the image?", images = image_path) ) chat("benzie/llava-phi-3", messages, output = 'text')
Check if an option is valid
check_option_valid(opt)
check_option_valid(opt)
opt |
An option (character) to check. |
Returns TRUE if the option is valid, FALSE otherwise.
check_option_valid("mirostat") check_option_valid("invalid_option")
check_option_valid("mirostat") check_option_valid("invalid_option")
Check if a vector of options are valid
check_options(opts = NULL)
check_options(opts = NULL)
opts |
A vector of options to check. |
Returns a list with two elements: valid_options and invalid_options.
check_options(c("mirostat", "invalid_option")) check_options(c("mirostat", "num_predict"))
check_options(c("mirostat", "invalid_option")) check_options(c("mirostat", "num_predict"))
Creates a model with another name from an existing model.
copy(source, destination, endpoint = "/api/copy", host = NULL)
copy(source, destination, endpoint = "/api/copy", host = NULL)
source |
The name of the model to copy. |
destination |
The name for the new model. |
endpoint |
The endpoint to copy the model. Default is "/api/copy". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A httr2 response object.
copy("llama3", "llama3_copy") delete("llama3_copy") # delete the model was just got copied
copy("llama3", "llama3_copy") delete("llama3_copy") # delete the model was just got copied
Create a model from another model, a safetensors directory (not implemented), or a GGUF file (not implemented).
create( model, from, system = NULL, stream = FALSE, endpoint = "/api/create", host = NULL )
create( model, from, system = NULL, stream = FALSE, endpoint = "/api/create", host = NULL )
model |
Name of the model to create. |
from |
Name of an existing model to create the new model from. |
system |
System prompt for the model. Default is NULL. |
stream |
Enable response streaming. Default is FALSE. |
endpoint |
The endpoint to create the model. Default is "/api/create". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A response in the format specified in the output parameter.
create("mario", "deepseek-r1:1.5b", system = "You are Mario from Super Mario Bros.") model_avail("mario") # check mario model has been created list_models() # mario model has been created generate("mario", "who are you?", output = "text") # model should say it's Mario delete("mario") # delete the model created above model_avail("mario") # model no longer exists
create("mario", "deepseek-r1:1.5b", system = "You are Mario from Super Mario Bros.") model_avail("mario") # check mario model has been created list_models() # mario model has been created generate("mario", "who are you?", output = "text") # model should say it's Mario delete("mario") # delete the model created above model_avail("mario") # model no longer exists
Create a message
create_message(content, role = "user", ...)
create_message(content, role = "user", ...)
content |
The content of the message. |
role |
The role of the message. Can be "user", "system", "assistant". Default is "user". |
... |
Additional arguments such as images. |
A list of messages.
create_message("Hello", "user") create_message("Always respond nicely", "system") create_message("I am here to help", "assistant")
create_message("Hello", "user") create_message("Always respond nicely", "system") create_message("I am here to help", "assistant")
Create messages for chat()
function.
create_messages(...)
create_messages(...)
... |
A list of messages, each of list class. |
A list of messages, each of list class.
messages <- create_messages( create_message("be nice", "system"), create_message("tell me a 3-word joke") ) messages <- create_messages( list(role = "system", content = "be nice"), list(role = "user", content = "tell me a 3-word joke") )
messages <- create_messages( create_message("be nice", "system"), create_message("tell me a 3-word joke") ) messages <- create_messages( list(role = "system", content = "be nice"), list(role = "user", content = "tell me a 3-word joke") )
Creates a httr2 request object with base URL, headers and endpoint. Used by other functions in the package and not intended to be used directly.
create_request(endpoint, host = NULL)
create_request(endpoint, host = NULL)
endpoint |
The endpoint to create the request |
host |
The base URL to use. Default is NULL, which uses http://127.0.0.1:11434 |
A httr2 request object.
create_request("/api/tags") create_request("/api/chat") create_request("/api/embeddings")
create_request("/api/tags") create_request("/api/chat") create_request("/api/embeddings")
Delete a model from your local machine that you downloaded using the pull() function. To see which models are available, use the list_models() function.
delete(name, endpoint = "/api/delete", host = NULL)
delete(name, endpoint = "/api/delete", host = NULL)
name |
A character string of the model name such as "llama3". |
endpoint |
The endpoint to delete the model. Default is "/api/delete". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A httr2 response object.
## Not run: delete("llama3") ## End(Not run)
## Not run: delete("llama3") ## End(Not run)
Delete a message using positive or negative positions/indices. Negative positions/indices can be used to refer to elements/messages from the end of the sequence.
delete_message(x, position = -1)
delete_message(x, position = -1)
x |
A list of messages. |
position |
The position of the message to delete. |
A list of messages with the message at the specified position removed.
messages <- list( list(role = "system", content = "Be friendly"), list(role = "user", content = "How are you?") ) delete_message(messages, 1) # delete first message delete_message(messages, -2) # same as above (delete first message) delete_message(messages, 2) # delete second message delete_message(messages, -1) # same as above (delete second message)
messages <- list( list(role = "system", content = "Be friendly"), list(role = "user", content = "How are you?") ) delete_message(messages, 1) # delete first message delete_message(messages, -2) # same as above (delete first message) delete_message(messages, 2) # delete second message delete_message(messages, -1) # same as above (delete second message)
Supercedes the embeddings()
function.
embed( model, input, truncate = TRUE, normalize = TRUE, keep_alive = "5m", endpoint = "/api/embed", host = NULL, ... )
embed( model, input, truncate = TRUE, normalize = TRUE, keep_alive = "5m", endpoint = "/api/embed", host = NULL, ... )
model |
A character string of the model name such as "llama3". |
input |
A vector of characters that you want to get the embeddings for. |
truncate |
Truncates the end of each input to fit within context length. Returns error if FALSE and context length is exceeded. Defaults to TRUE. |
normalize |
Normalize the vector to length 1. Default is TRUE. |
keep_alive |
The time to keep the connection alive. Default is "5m" (5 minutes). |
endpoint |
The endpoint to get the vector embedding. Default is "/api/embeddings". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
... |
Additional options to pass to the model. |
A numeric matrix of the embedding. Each column is the embedding for one input.
embed("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.") # pass multiple inputs embed("nomic-embed-text:latest", c("Good bye", "Bye", "See you.")) # pass model options to the model embed("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3)
embed("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.") # pass multiple inputs embed("nomic-embed-text:latest", c("Good bye", "Bye", "See you.")) # pass model options to the model embed("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3)
embed()
This function will be deprecated over time and has been superceded by embed()
. See embed()
for more details.
embeddings( model, prompt, normalize = TRUE, keep_alive = "5m", endpoint = "/api/embeddings", host = NULL, ... )
embeddings( model, prompt, normalize = TRUE, keep_alive = "5m", endpoint = "/api/embeddings", host = NULL, ... )
model |
A character string of the model name such as "llama3". |
prompt |
A character string of the prompt that you want to get the vector embedding for. |
normalize |
Normalize the vector to length 1. Default is TRUE. |
keep_alive |
The time to keep the connection alive. Default is "5m" (5 minutes). |
endpoint |
The endpoint to get the vector embedding. Default is "/api/embeddings". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
... |
Additional options to pass to the model. |
A numeric vector of the embedding.
embeddings("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.") # pass model options to the model embeddings("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3)
embeddings("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.") # pass model options to the model embeddings("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3)
Encode images in messages to base64 format
encode_images_in_messages(messages)
encode_images_in_messages(messages)
messages |
A list of messages, each of list class. Generally used in the |
A list of messages with images encoded in base64 format.
image <- file.path(system.file("extdata", package = "ollamar"), "image1.png") message <- create_message(content = "what is in the image?", images = image) message_updated <- encode_images_in_messages(message)
image <- file.path(system.file("extdata", package = "ollamar"), "image1.png") message <- create_message(content = "what is in the image?", images = image) message_updated <- encode_images_in_messages(message)
Generate a response for a given prompt
generate( model, prompt, suffix = "", images = "", format = list(), system = "", template = "", context = list(), stream = FALSE, raw = FALSE, keep_alive = "5m", output = c("resp", "jsonlist", "raw", "df", "text", "req", "structured"), endpoint = "/api/generate", host = NULL, ... )
generate( model, prompt, suffix = "", images = "", format = list(), system = "", template = "", context = list(), stream = FALSE, raw = FALSE, keep_alive = "5m", output = c("resp", "jsonlist", "raw", "df", "text", "req", "structured"), endpoint = "/api/generate", host = NULL, ... )
model |
A character string of the model name such as "llama3". |
prompt |
A character string of the prompt like "The sky is..." |
suffix |
A character string after the model response. Default is "". |
images |
A path to an image file to include in the prompt. Default is "". |
format |
Format to return a response in. Format can be json/list (structured response). |
system |
A character string of the system prompt (overrides what is defined in the Modelfile). Default is "". |
template |
A character string of the prompt template (overrides what is defined in the Modelfile). Default is "". |
context |
A list of context from a previous response to include previous conversation in the prompt. Default is an empty list. |
stream |
Enable response streaming. Default is FALSE. |
raw |
If TRUE, no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API. Default is FALSE. |
keep_alive |
The time to keep the connection alive. Default is "5m" (5 minutes). |
output |
A character vector of the output format. Default is "resp". Options are "resp", "jsonlist", "raw", "df", "text", "req" (httr2_request object). |
endpoint |
The endpoint to generate the completion. Default is "/api/generate". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
... |
Additional options to pass to the model. |
A response in the format specified in the output parameter.
# text prompt generate("llama3", "The sky is...", stream = FALSE, output = "df") # stream and increase temperature generate("llama3", "The sky is...", stream = TRUE, output = "text", temperature = 2.0) # image prompt # something like "image1.png" image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png") # use vision or multimodal model such as https://ollama.com/benzie/llava-phi-3 generate("benzie/llava-phi-3:latest", "What is in the image?", images = image_path, output = "text")
# text prompt generate("llama3", "The sky is...", stream = FALSE, output = "df") # stream and increase temperature generate("llama3", "The sky is...", stream = TRUE, output = "text", temperature = 2.0) # image prompt # something like "image1.png" image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png") # use vision or multimodal model such as https://ollama.com/benzie/llava-phi-3 generate("benzie/llava-phi-3:latest", "What is in the image?", images = image_path, output = "text")
Read image file and encode it to base64
image_encode_base64(image_path)
image_encode_base64(image_path)
image_path |
The path to the image file. |
A base64 encoded string.
image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png") substr(image_encode_base64(image_path), 1, 5) # truncate output
image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png") substr(image_encode_base64(image_path), 1, 5) # truncate output
Inserts a message at a specified position in a list of messages. The role and content are converted to a list and inserted into the input list at the given position.
insert_message(content, role = "user", x = NULL, position = -1, ...)
insert_message(content, role = "user", x = NULL, position = -1, ...)
content |
The content of the message. |
role |
The role of the message. Can be "user", "system", "assistant". Default is "user". |
x |
A list of messages. Default is NULL. |
position |
The position at which to insert the new message. Default is -1 (end of list). |
... |
Additional arguments such as images. |
A list of messages with the new message inserted at the specified position.
messages <- list( list(role = "system", content = "Be friendly"), list(role = "user", content = "How are you?") ) insert_message("INSERT MESSAGE AT THE END", "user", messages) insert_message("INSERT MESSAGE AT THE BEGINNING", "user", messages, 2)
messages <- list( list(role = "system", content = "Be friendly"), list(role = "user", content = "How are you?") ) insert_message("INSERT MESSAGE AT THE END", "user", messages) insert_message("INSERT MESSAGE AT THE BEGINNING", "user", messages, 2)
List models that are available locally
list_models( output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/tags", host = NULL )
list_models( output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/tags", host = NULL )
output |
The output format. Default is "df". Other options are "resp", "jsonlist", "raw", "text". |
endpoint |
The endpoint to get the models. Default is "/api/tags". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A response in the format specified in the output parameter.
list_models() # returns dataframe list_models("df") # returns dataframe list_models("resp") # httr2 response object list_models("jsonlist") list_models("raw")
list_models() # returns dataframe list_models("df") # returns dataframe list_models("resp") # httr2 response object list_models("jsonlist") list_models("raw")
Check if model is available locally
model_avail(model)
model_avail(model)
model |
A character string of the model name such as "llama3". |
A logical value indicating if the model exists.
model_avail("codegemma:7b") model_avail("abc") model_avail("llama3")
model_avail("codegemma:7b") model_avail("abc") model_avail("llama3")
Model options
model_options
model_options
An object of class list
of length 13.
Chat with a model in real-time in R console
ohelp(model = "codegemma:7b", ...)
ohelp(model = "codegemma:7b", ...)
model |
A character string of the model name such as "llama3". Defaults to "codegemma:7b" which is a decent coding model as of 2024-07-27. |
... |
Additional options. No options are currently available at this time. |
Does not return anything. It prints the conversation in the console.
ohelp(first_prompt = "quit") # regular usage: ohelp()
ohelp(first_prompt = "quit") # regular usage: ohelp()
Package configuration
package_config
package_config
An object of class list
of length 3.
Prepends a message (add to beginning of a list) to a list of messages. The role and content will be converted to a list and prepended to the input list.
prepend_message(content, role = "user", x = NULL, ...)
prepend_message(content, role = "user", x = NULL, ...)
content |
The content of the message. |
role |
The role of the message. Can be "user", "system", "assistant". |
x |
A list of messages. Default is NULL. |
... |
Additional arguments such as images. |
A list of messages with the new message prepended.
prepend_message("user", "Hello") prepend_message("system", "Always respond nicely")
prepend_message("user", "Hello") prepend_message("system", "Always respond nicely")
List models that are currently loaded into memory
ps( output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/ps", host = NULL )
ps( output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/ps", host = NULL )
output |
The output format. Default is "df". Supported formats are "df", "resp", "jsonlist", "raw", and "text". |
endpoint |
The endpoint to list the running models. Default is "/api/ps". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A response in the format specified in the output parameter.
ps("text")
ps("text")
See https://ollama.com/library for a list of available models. Use the list_models() function to get the list of models already downloaded/installed on your machine. Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.
pull( name, stream = FALSE, insecure = FALSE, endpoint = "/api/pull", host = NULL )
pull( name, stream = FALSE, insecure = FALSE, endpoint = "/api/pull", host = NULL )
name |
A character string of the model name to download/pull, such as "llama3". |
stream |
Enable response streaming. Default is FALSE. |
insecure |
Allow insecure connections Only use this if you are pulling from your own library during development. Default is FALSE. |
endpoint |
The endpoint to pull the model. Default is "/api/pull". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A httr2 response object.
pull("llama3") pull("all-minilm", stream = FALSE)
pull("llama3") pull("all-minilm", stream = FALSE)
Push or upload a model to an Ollama model library. Requires registering for ollama.ai and adding a public key first.
push( name, insecure = FALSE, stream = FALSE, output = c("resp", "jsonlist", "raw", "text", "df"), endpoint = "/api/push", host = NULL )
push( name, insecure = FALSE, stream = FALSE, output = c("resp", "jsonlist", "raw", "text", "df"), endpoint = "/api/push", host = NULL )
name |
A character string of the model name to upload, in the form of |
insecure |
Allow insecure connections. Only use this if you are pushing to your own library during development. Default is FALSE. |
stream |
Enable response streaming. Default is FALSE. |
output |
The output format. Default is "resp". Other options are "jsonlist", "raw", "text", and "df". |
endpoint |
The endpoint to push the model. Default is "/api/push". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A httr2 response object.
push("mattw/pygmalion:latest")
push("mattw/pygmalion:latest")
Process httr2 response object
resp_process( resp, output = c("df", "jsonlist", "raw", "resp", "text", "tools") )
resp_process( resp, output = c("df", "jsonlist", "raw", "resp", "text", "tools") )
resp |
A httr2 response object. |
output |
The output format. Default is "df". Other options are "jsonlist", "raw", "resp" (httr2 response object), "text", "tools" (tool_calls), "structured" (structured output). |
A data frame, json list, raw or httr2 response object.
resp <- list_models("resp") resp_process(resp, "df") # parse response to dataframe/tibble resp_process(resp, "jsonlist") # parse response to list resp_process(resp, "raw") # parse response to raw string resp_process(resp, "text") # return text/character vector resp_process(resp, "tools") # return tool_calls
resp <- list_models("resp") resp_process(resp, "df") # parse response to dataframe/tibble resp_process(resp, "jsonlist") # parse response to list resp_process(resp, "raw") # parse response to raw string resp_process(resp, "text") # return text/character vector resp_process(resp, "tools") # return tool_calls
Search for options based on a query
search_options(query)
search_options(query)
query |
A query (character) to search for in the options. |
Returns a list of matching options.
search_options("learning rate") search_options("tokens") search_options("invalid query")
search_options("learning rate") search_options("tokens") search_options("invalid query")
Model information includes details, modelfile, template, parameters, license, system prompt.
show( name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), endpoint = "/api/show", host = NULL )
show( name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), endpoint = "/api/show", host = NULL )
name |
Name of the model to show |
verbose |
Returns full data for verbose response fields. Default is FALSE. |
output |
The output format. Default is "jsonlist". Other options are "resp", "raw". |
endpoint |
The endpoint to show the model. Default is "/api/show". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A response in the format specified in the output parameter.
# show("llama3") # returns jsonlist show("llama3", output = "resp") # returns response object
# show("llama3") # returns jsonlist show("llama3", output = "resp") # returns response object
Tests whether the Ollama server is running or not.
test_connection(url = "http://localhost:11434", logical = FALSE)
test_connection(url = "http://localhost:11434", logical = FALSE)
url |
The URL of the Ollama server. Default is http://localhost:11434 |
logical |
Logical. If TRUE, returns a boolean value. Default is FALSE. |
Boolean value or httr2 response object, where status_code is either 200 (success) or 503 (error).
test_connection(logical = TRUE) test_connection("http://localhost:11434") # default url test_connection("http://127.0.0.1:11434")
test_connection(logical = TRUE) test_connection("http://localhost:11434") # default url test_connection("http://127.0.0.1:11434")
Validate a message to ensure it has the required fields and the correct data types for the chat()
function.
validate_message(message)
validate_message(message)
message |
A list with a single message of list class. |
TRUE if message is valid, otherwise an error is thrown.
validate_message(create_message("Hello")) validate_message(list(role = "user", content = "Hello"))
validate_message(create_message("Hello")) validate_message(list(role = "user", content = "Hello"))
Validate a list of messages to ensure they have the required fields and the correct data types for the chat()
function.
validate_messages(messages)
validate_messages(messages)
messages |
A list of messages, each of list class. |
TRUE if all messages are valid, otherwise warning messages are printed and FALSE is returned.
validate_messages(create_messages( create_message("Be friendly", "system"), create_message("Hello") ))
validate_messages(create_messages( create_message("Be friendly", "system"), create_message("Hello") ))
Validate additional options or parameters provided to the API call
validate_options(...)
validate_options(...)
... |
Additional options or parameters provided to the API call |
TRUE if all additional options are valid, FALSE otherwise
validate_options(mirostat = 1, mirostat_eta = 0.2, num_ctx = 1024) validate_options(mirostat = 1, mirostat_eta = 0.2, invalid_opt = 1024)
validate_options(mirostat = 1, mirostat_eta = 0.2, num_ctx = 1024) validate_options(mirostat = 1, mirostat_eta = 0.2, invalid_opt = 1024)
Retrieve Ollama version
ver(endpoint = "/api/version", host = NULL)
ver(endpoint = "/api/version", host = NULL)
endpoint |
The endpoint to list the running models. Default is "/api/version". |
host |
The base URL to use. Default is NULL, which uses Ollama's default base URL. |
A character string of the Ollama version.
ver()
ver()