forked from wmantly/mc-bot-town
108 lines
2.3 KiB
JavaScript
108 lines
2.3 KiB
JavaScript
'use strict';
|
|
|
|
const axios = require('axios');
|
|
|
|
class OllamaProvider {
|
|
constructor(config) {
|
|
this.config = config;
|
|
this.baseUrl = config.baseUrl || 'http://localhost:11434';
|
|
this.model = config.model || 'llama3.2';
|
|
this.messages = [];
|
|
}
|
|
|
|
async start(history) {
|
|
// Convert Gemini-style history to Ollama format if needed
|
|
this.messages = history || [];
|
|
|
|
if (this.config.prompt) {
|
|
console.log('Ollama provider initialized with model:', this.model);
|
|
}
|
|
}
|
|
|
|
__settings() {
|
|
return {
|
|
temperature: this.config.temperature || 1,
|
|
top_p: this.config.topP || 0.95,
|
|
top_k: this.config.topK || 64,
|
|
num_predict: this.config.maxOutputTokens || 8192,
|
|
};
|
|
}
|
|
|
|
async chat(message, retryCount = 0) {
|
|
try {
|
|
// Build conversation from prompt + history
|
|
const messages = [
|
|
{
|
|
role: 'system',
|
|
content: this.config.prompt || 'You are a helpful assistant.'
|
|
},
|
|
...this.messages.map(msg => ({
|
|
role: msg.role === 'model' ? 'assistant' : 'user',
|
|
content: msg.parts ? msg.parts.map(p => p.text).join('') : (msg.content || '')
|
|
})),
|
|
{
|
|
role: 'user',
|
|
content: message
|
|
}
|
|
];
|
|
|
|
const response = await axios.post(
|
|
`${this.baseUrl}/api/chat`,
|
|
{
|
|
model: this.model,
|
|
messages: messages,
|
|
stream: false,
|
|
format: 'json', // Request JSON response
|
|
options: this.__settings()
|
|
},
|
|
{
|
|
timeout: this.config.timeout || 30000,
|
|
headers: {
|
|
'Content-Type': 'application/json'
|
|
}
|
|
}
|
|
);
|
|
|
|
// Update history
|
|
this.messages.push({
|
|
role: 'user',
|
|
parts: [{ text: message }],
|
|
content: message
|
|
});
|
|
|
|
this.messages.push({
|
|
role: 'model',
|
|
parts: [{ text: response.data.message.content }],
|
|
content: response.data.message.content
|
|
});
|
|
|
|
// Return in a format compatible with the Ai class
|
|
return {
|
|
response: {
|
|
text: () => response.data.message.content
|
|
}
|
|
};
|
|
} catch (error) {
|
|
if (retryCount > 3) {
|
|
throw new Error(`Ollama API error after ${retryCount} retries: ${error.message}`);
|
|
}
|
|
// Retry after delay
|
|
await new Promise(resolve => setTimeout(resolve, 500 * (retryCount + 1)));
|
|
return await this.chat(message, retryCount + 1);
|
|
}
|
|
}
|
|
|
|
setPrompt(prompt) {
|
|
this.config.prompt = prompt;
|
|
}
|
|
|
|
getResponse(result) {
|
|
return result.response.text();
|
|
}
|
|
|
|
async close() {
|
|
this.messages = [];
|
|
}
|
|
}
|
|
|
|
module.exports = OllamaProvider; |