- Nim library to work with the Ollama API
- API baseurl defaults to
http://localhost:11434/api
- you may pass an alternate to
newOllamaAPI()
import llama_leap
let ollama = newOllamaAPI()
echo ollama.generate("llama2", "How are you today?")
- Only the non-streaming generate API is currently supported
- streaming is coming soon (TM)
# simple interface
echo ollama.generate("llama2", "How are you today?")
# structured interface
let req = GenerateReq(
model: "llama2",
prompt: "How are you today?",
options: option(ModelParameters(
temperature: option(0.0f),
)),
system: option("Please talk like a pirate. You are Longbeard the llama.")
)
let resp = ollama.generate(req)
echo "> " & resp.response
let req = ChatReq(
model: "llama2",
messages: @[
ChatMessage(
role: "system",
content: "Please talk like a pirate. You are Longbeard the llama."
),
ChatMessage(
role: "user",
content: "How are you today?"
),
],
options: option(ModelParameters(
temperature: option(0.0f),
seed: option(42)
))
)
let resp = ollama.chat(req)
echo "> " & resp.message.content.strip()
ollama.pullModel("llama2")
let resp = ollama.generateEmbeddings("llama2", "How are you today?")
echo "Embedding Length: " & $resp.embedding.len
- ensure ollama is running on the default port
- run
nimble test
-
llama_leap is a Nim client for the OpenAI API.
-
vertex_leap is a client for Google's VertexAI API.
-
mono_llm is a higher-level Nim library that creates a unified interface for OpenAI, Ollama, and VertexAI.