Ollama integration
Ollama allow you to self-host quickly large language models. Our SDKs include automatic integrations with Ollama.
2
Monitor Ollama
With our SDKs, tracking Ollama calls is super simple.
from openai import OpenAI
import lunary
client = OpenAI(
base_url='http://localhost:11434/v1/', # replace by your Ollama base url
api_key='ollama', #required but ignored
)
lunary.monitor(client)
chat_completion = client.chat.completions.create(
messages=[
{
'role': 'user',
'content': 'Say this is a test',
}
],
model='llama3.2',
)
import OpenAI from 'openai'
import { monitorOpenAI } from "lunary/openai"
const openai = monitorOpenAI(new OpenAI({
baseURL: 'http://localhost:11434/v1/', // replace by your Ollama base url
apiKey: 'ollama', // required but ignored
}))
const chatCompletion = await openai.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'llama3.2',
})