craft fix

This commit is contained in:
2026-01-31 18:49:08 -05:00
parent 80bbfbe58e
commit 9acd38c94b
9 changed files with 1535 additions and 102 deletions

View File

@@ -0,0 +1,88 @@
'use strict';
const { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } = require("@google/generative-ai");
class GeminiProvider {
constructor(config) {
this.config = config;
this.session = null;
}
async start(history) {
const genAI = new GoogleGenerativeAI(this.config.key);
const model = genAI.getGenerativeModel({
model: this.config.model || "gemini-2.0-flash-exp",
});
this.session = model.startChat(this.__settings(history));
}
__settings(history) {
return {
generationConfig: {
temperature: this.config.temperature || 1,
topP: this.config.topP || 0.95,
topK: this.config.topK || 64,
maxOutputTokens: this.config.maxOutputTokens || 8192,
responseMimeType: "application/json",
},
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_NONE,
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_NONE,
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_NONE,
},
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_NONE,
},
],
history: history || [
{
role: "user",
parts: [{ text: this.config.prompt }],
},
{
role: "model",
parts: [{ text: "Chat stuff" }],
},
],
};
}
async chat(message, retryCount = 0) {
try {
let result = await this.session.sendMessage(message);
return result;
} catch (error) {
if (retryCount > 3) {
throw new Error(`Gemini API error after ${retryCount} retries: ${error.message}`);
}
// Recover by removing last history entry and restarting
this.session.params.history.pop();
await this.start(this.session.params.history);
return await this.chat(message, retryCount + 1);
}
}
setPrompt(prompt) {
this.config.prompt = prompt;
}
getResponse(result) {
return result.response.text();
}
async close() {
this.session = null;
}
}
module.exports = GeminiProvider;

View File

@@ -0,0 +1,25 @@
'use strict';
const GeminiProvider = require('./gemini');
const OllamaProvider = require('./ollama');
class ProviderFactory {
static create(config) {
const provider = config.provider || 'gemini';
switch (provider.toLowerCase()) {
case 'gemini':
return new GeminiProvider(config);
case 'ollama':
return new OllamaProvider(config);
default:
throw new Error(`Unknown AI provider: ${provider}. Supported: 'gemini', 'ollama'`);
}
}
}
module.exports = {
ProviderFactory,
GeminiProvider,
OllamaProvider
};

View File

@@ -0,0 +1,108 @@
'use strict';
const axios = require('axios');
class OllamaProvider {
constructor(config) {
this.config = config;
this.baseUrl = config.baseUrl || 'http://localhost:11434';
this.model = config.model || 'llama3.2';
this.messages = [];
}
async start(history) {
// Convert Gemini-style history to Ollama format if needed
this.messages = history || [];
if (this.config.prompt) {
console.log('Ollama provider initialized with model:', this.model);
}
}
__settings() {
return {
temperature: this.config.temperature || 1,
top_p: this.config.topP || 0.95,
top_k: this.config.topK || 64,
num_predict: this.config.maxOutputTokens || 8192,
};
}
async chat(message, retryCount = 0) {
try {
// Build conversation from prompt + history
const messages = [
{
role: 'system',
content: this.config.prompt || 'You are a helpful assistant.'
},
...this.messages.map(msg => ({
role: msg.role === 'model' ? 'assistant' : 'user',
content: msg.parts ? msg.parts.map(p => p.text).join('') : (msg.content || '')
})),
{
role: 'user',
content: message
}
];
const response = await axios.post(
`${this.baseUrl}/api/chat`,
{
model: this.model,
messages: messages,
stream: false,
format: 'json', // Request JSON response
options: this.__settings()
},
{
timeout: this.config.timeout || 30000,
headers: {
'Content-Type': 'application/json'
}
}
);
// Update history
this.messages.push({
role: 'user',
parts: [{ text: message }],
content: message
});
this.messages.push({
role: 'model',
parts: [{ text: response.data.message.content }],
content: response.data.message.content
});
// Return in a format compatible with the Ai class
return {
response: {
text: () => response.data.message.content
}
};
} catch (error) {
if (retryCount > 3) {
throw new Error(`Ollama API error after ${retryCount} retries: ${error.message}`);
}
// Retry after delay
await new Promise(resolve => setTimeout(resolve, 500 * (retryCount + 1)));
return await this.chat(message, retryCount + 1);
}
}
setPrompt(prompt) {
this.config.prompt = prompt;
}
getResponse(result) {
return result.response.text();
}
async close() {
this.messages = [];
}
}
module.exports = OllamaProvider;