craft fix
This commit is contained in:
@@ -1,16 +1,8 @@
|
||||
'use strict';
|
||||
|
||||
const axios = require('axios');
|
||||
|
||||
const conf = require('../conf');
|
||||
const {sleep} = require('../utils');
|
||||
|
||||
|
||||
const {
|
||||
GoogleGenerativeAI,
|
||||
HarmCategory,
|
||||
HarmBlockThreshold,
|
||||
} = require("@google/generative-ai");
|
||||
const { ProviderFactory } = require('./ai/providers');
|
||||
|
||||
|
||||
class Ai{
|
||||
@@ -21,6 +13,18 @@ class Ai{
|
||||
this.intervalLength = args.intervalLength || 30;
|
||||
this.intervalStop;
|
||||
this.messageListener;
|
||||
this.provider = null;
|
||||
|
||||
// Bot-specific AI config (overrides global config)
|
||||
this.botConfig = args.botConfig || {};
|
||||
}
|
||||
|
||||
// Get merged config: bot-specific settings override global settings
|
||||
__getConfig() {
|
||||
return {
|
||||
...conf.ai, // Global defaults
|
||||
...this.botConfig, // Bot-specific overrides
|
||||
};
|
||||
}
|
||||
|
||||
async init(){
|
||||
@@ -38,11 +42,10 @@ class Ai{
|
||||
console.log(`Message ${type}: ${message.toString()}`)
|
||||
messages.push('>', message.toString());
|
||||
});
|
||||
|
||||
|
||||
this.intervalStop = setInterval(async ()=>{
|
||||
let result;
|
||||
// if(messages.length ===0) return;
|
||||
|
||||
|
||||
try{
|
||||
result = await this.chat(JSON.stringify({
|
||||
messages, currentTime:Date.now()+1}
|
||||
@@ -55,9 +58,9 @@ class Ai{
|
||||
|
||||
try{
|
||||
messages = [''];
|
||||
if(!result.response.text()) return;
|
||||
if(!this.provider.getResponse(result)) return;
|
||||
|
||||
for(let message of JSON.parse(result.response.text())){
|
||||
for(let message of JSON.parse(this.provider.getResponse(result))){
|
||||
console.log('toSay', message.delay, message.text);
|
||||
if(message.text === '___') return;
|
||||
setTimeout(async (message)=>{
|
||||
@@ -66,12 +69,16 @@ class Ai{
|
||||
}
|
||||
}catch(error){
|
||||
console.log('Error in AI message loop', error, result);
|
||||
if(result || result.response || result.response.text()){
|
||||
console.log(result.response.text())
|
||||
try {
|
||||
if(result && this.provider.getResponse(result)){
|
||||
console.log(this.provider.getResponse(result))
|
||||
}
|
||||
} catch(e) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
}, this.intervalLength*1000);
|
||||
|
||||
|
||||
}catch(error){
|
||||
console.log('error in onReady', error);
|
||||
}
|
||||
@@ -83,107 +90,56 @@ class Ai{
|
||||
clearInterval(this.intervalStop);
|
||||
this.intervalStop = undefined;
|
||||
}
|
||||
this.messageListener();
|
||||
|
||||
if(this.messageListener){
|
||||
this.messageListener();
|
||||
}
|
||||
if(this.provider){
|
||||
await this.provider.close();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
__settings(history){
|
||||
return {
|
||||
generationConfig: {
|
||||
temperature: 1,
|
||||
topP: 0.95,
|
||||
topK: 64,
|
||||
maxOutputTokens: 8192,
|
||||
responseMimeType: "application/json",
|
||||
},
|
||||
safetySettings:[
|
||||
// See https://ai.google.dev/gemini-api/docs/safety-settings
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
],
|
||||
history: history || [
|
||||
{
|
||||
role: "user",
|
||||
parts: [
|
||||
{text: this.prompt},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "model",
|
||||
parts: [
|
||||
{text: "Chat stuff"},
|
||||
],
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
async start(history){
|
||||
const genAI = new GoogleGenerativeAI(conf.ai.key);
|
||||
const config = this.__getConfig();
|
||||
let bulbaItems = {};
|
||||
|
||||
const model = genAI.getGenerativeModel({
|
||||
model: "gemini-1.5-flash",
|
||||
console.log(`${this.bot.name} AI config:`, {
|
||||
provider: config.provider,
|
||||
model: config.model,
|
||||
promptName: this.promptName,
|
||||
baseUrl: config.baseUrl,
|
||||
});
|
||||
|
||||
let bulbaItems = await axios.get('https://webstore.bulbastore.uk/api/listings');
|
||||
bulbaItems = bulbaItems.data.listings.map(i=>i.listing_name);
|
||||
|
||||
console.log('AI for prompts', conf.ai.prompts)
|
||||
|
||||
this.prompt = conf.ai.prompts[this.promptName](
|
||||
const prompt = conf.ai.prompts[this.promptName](
|
||||
this.bot.bot.entity.username,
|
||||
conf.ai.interval,
|
||||
config.interval,
|
||||
Object.values(this.bot.getPlayers()).map(player=>`<[${player.lvl}] ${player.username}>`).join('\n'),
|
||||
bulbaItems,
|
||||
this.prompCustom,
|
||||
);
|
||||
|
||||
this.session = model.startChat({
|
||||
...this.__settings(history),
|
||||
// systemInstruction: this.prompt,
|
||||
// Create the provider instance with merged config and prompt
|
||||
this.provider = ProviderFactory.create({
|
||||
...config,
|
||||
prompt: prompt,
|
||||
});
|
||||
|
||||
await this.provider.start(history);
|
||||
console.log(`${this.bot.name} AI ${config.provider} provider started (model: ${config.model})`);
|
||||
}
|
||||
|
||||
async chat(message, retryCount=0){
|
||||
console.log('chat', retryCount)
|
||||
console.log(`chat ${this.bot.name}`, retryCount)
|
||||
try{
|
||||
let result = await this.session.sendMessage(message);
|
||||
|
||||
let result = await this.provider.chat(message);
|
||||
return result
|
||||
}catch(error){
|
||||
console.log('AI chat error', error)
|
||||
|
||||
if(retryCount > 3){
|
||||
console.log('hit retry count');
|
||||
return ;
|
||||
};
|
||||
await sleep(500);
|
||||
this.session.params.history.pop();
|
||||
this.start(this.session.params.history);
|
||||
return await this.chat(message, retryCount++)
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
module.exports = Ai;
|
||||
// run();
|
||||
module.exports = Ai;
|
||||
88
nodejs/controller/ai/providers/gemini.js
Normal file
88
nodejs/controller/ai/providers/gemini.js
Normal file
@@ -0,0 +1,88 @@
|
||||
'use strict';
|
||||
|
||||
const { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } = require("@google/generative-ai");
|
||||
|
||||
class GeminiProvider {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.session = null;
|
||||
}
|
||||
|
||||
async start(history) {
|
||||
const genAI = new GoogleGenerativeAI(this.config.key);
|
||||
const model = genAI.getGenerativeModel({
|
||||
model: this.config.model || "gemini-2.0-flash-exp",
|
||||
});
|
||||
|
||||
this.session = model.startChat(this.__settings(history));
|
||||
}
|
||||
|
||||
__settings(history) {
|
||||
return {
|
||||
generationConfig: {
|
||||
temperature: this.config.temperature || 1,
|
||||
topP: this.config.topP || 0.95,
|
||||
topK: this.config.topK || 64,
|
||||
maxOutputTokens: this.config.maxOutputTokens || 8192,
|
||||
responseMimeType: "application/json",
|
||||
},
|
||||
safetySettings: [
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE,
|
||||
},
|
||||
],
|
||||
history: history || [
|
||||
{
|
||||
role: "user",
|
||||
parts: [{ text: this.config.prompt }],
|
||||
},
|
||||
{
|
||||
role: "model",
|
||||
parts: [{ text: "Chat stuff" }],
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
async chat(message, retryCount = 0) {
|
||||
try {
|
||||
let result = await this.session.sendMessage(message);
|
||||
return result;
|
||||
} catch (error) {
|
||||
if (retryCount > 3) {
|
||||
throw new Error(`Gemini API error after ${retryCount} retries: ${error.message}`);
|
||||
}
|
||||
// Recover by removing last history entry and restarting
|
||||
this.session.params.history.pop();
|
||||
await this.start(this.session.params.history);
|
||||
return await this.chat(message, retryCount + 1);
|
||||
}
|
||||
}
|
||||
|
||||
setPrompt(prompt) {
|
||||
this.config.prompt = prompt;
|
||||
}
|
||||
|
||||
getResponse(result) {
|
||||
return result.response.text();
|
||||
}
|
||||
|
||||
async close() {
|
||||
this.session = null;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = GeminiProvider;
|
||||
25
nodejs/controller/ai/providers/index.js
Normal file
25
nodejs/controller/ai/providers/index.js
Normal file
@@ -0,0 +1,25 @@
|
||||
'use strict';
|
||||
|
||||
const GeminiProvider = require('./gemini');
|
||||
const OllamaProvider = require('./ollama');
|
||||
|
||||
class ProviderFactory {
|
||||
static create(config) {
|
||||
const provider = config.provider || 'gemini';
|
||||
|
||||
switch (provider.toLowerCase()) {
|
||||
case 'gemini':
|
||||
return new GeminiProvider(config);
|
||||
case 'ollama':
|
||||
return new OllamaProvider(config);
|
||||
default:
|
||||
throw new Error(`Unknown AI provider: ${provider}. Supported: 'gemini', 'ollama'`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ProviderFactory,
|
||||
GeminiProvider,
|
||||
OllamaProvider
|
||||
};
|
||||
108
nodejs/controller/ai/providers/ollama.js
Normal file
108
nodejs/controller/ai/providers/ollama.js
Normal file
@@ -0,0 +1,108 @@
|
||||
'use strict';
|
||||
|
||||
const axios = require('axios');
|
||||
|
||||
class OllamaProvider {
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.baseUrl = config.baseUrl || 'http://localhost:11434';
|
||||
this.model = config.model || 'llama3.2';
|
||||
this.messages = [];
|
||||
}
|
||||
|
||||
async start(history) {
|
||||
// Convert Gemini-style history to Ollama format if needed
|
||||
this.messages = history || [];
|
||||
|
||||
if (this.config.prompt) {
|
||||
console.log('Ollama provider initialized with model:', this.model);
|
||||
}
|
||||
}
|
||||
|
||||
__settings() {
|
||||
return {
|
||||
temperature: this.config.temperature || 1,
|
||||
top_p: this.config.topP || 0.95,
|
||||
top_k: this.config.topK || 64,
|
||||
num_predict: this.config.maxOutputTokens || 8192,
|
||||
};
|
||||
}
|
||||
|
||||
async chat(message, retryCount = 0) {
|
||||
try {
|
||||
// Build conversation from prompt + history
|
||||
const messages = [
|
||||
{
|
||||
role: 'system',
|
||||
content: this.config.prompt || 'You are a helpful assistant.'
|
||||
},
|
||||
...this.messages.map(msg => ({
|
||||
role: msg.role === 'model' ? 'assistant' : 'user',
|
||||
content: msg.parts ? msg.parts.map(p => p.text).join('') : (msg.content || '')
|
||||
})),
|
||||
{
|
||||
role: 'user',
|
||||
content: message
|
||||
}
|
||||
];
|
||||
|
||||
const response = await axios.post(
|
||||
`${this.baseUrl}/api/chat`,
|
||||
{
|
||||
model: this.model,
|
||||
messages: messages,
|
||||
stream: false,
|
||||
format: 'json', // Request JSON response
|
||||
options: this.__settings()
|
||||
},
|
||||
{
|
||||
timeout: this.config.timeout || 30000,
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Update history
|
||||
this.messages.push({
|
||||
role: 'user',
|
||||
parts: [{ text: message }],
|
||||
content: message
|
||||
});
|
||||
|
||||
this.messages.push({
|
||||
role: 'model',
|
||||
parts: [{ text: response.data.message.content }],
|
||||
content: response.data.message.content
|
||||
});
|
||||
|
||||
// Return in a format compatible with the Ai class
|
||||
return {
|
||||
response: {
|
||||
text: () => response.data.message.content
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
if (retryCount > 3) {
|
||||
throw new Error(`Ollama API error after ${retryCount} retries: ${error.message}`);
|
||||
}
|
||||
// Retry after delay
|
||||
await new Promise(resolve => setTimeout(resolve, 500 * (retryCount + 1)));
|
||||
return await this.chat(message, retryCount + 1);
|
||||
}
|
||||
}
|
||||
|
||||
setPrompt(prompt) {
|
||||
this.config.prompt = prompt;
|
||||
}
|
||||
|
||||
getResponse(result) {
|
||||
return result.response.text();
|
||||
}
|
||||
|
||||
async close() {
|
||||
this.messages = [];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = OllamaProvider;
|
||||
@@ -138,16 +138,24 @@ class Craft{
|
||||
|
||||
// Move these into openCrating function
|
||||
let windowOnce = (event)=> new Promise((resolve, reject)=> window.once(event, resolve));
|
||||
let inventory = ()=> window.slots.slice(window.inventoryStart, window.inventoryEnd)
|
||||
let inventory = window.slots.slice(window.inventoryStart, window.inventoryEnd);
|
||||
|
||||
// Move the items into the crafting grid
|
||||
// Keep track of used inventory slots to avoid reusing the same slot
|
||||
let usedInventorySlots = new Set();
|
||||
let slotCount = 1;
|
||||
for(let shapeRow of recipe.inShape){
|
||||
for(let shape of shapeRow){
|
||||
this.bot.bot.moveSlotItem(
|
||||
inventory().find((element)=> element && element.type === shape.id).slot,
|
||||
slotCount
|
||||
let inventorySlot = inventory.findIndex((element, index) =>
|
||||
element && element.type === shape.id && !usedInventorySlots.has(index)
|
||||
);
|
||||
if (inventorySlot === -1) {
|
||||
throw new Error(`Not enough items of type ${shape.id} in inventory`);
|
||||
}
|
||||
let actualSlot = window.inventoryStart + inventorySlot;
|
||||
usedInventorySlots.add(inventorySlot);
|
||||
|
||||
this.bot.bot.moveSlotItem(actualSlot, slotCount);
|
||||
await windowOnce(`updateSlot:${slotCount}`);
|
||||
slotCount++;
|
||||
}
|
||||
|
||||
@@ -152,16 +152,24 @@ class CraftChests{
|
||||
|
||||
// Move these into openCrating function
|
||||
let windowOnce = (event)=> new Promise((resolve, reject)=> window.once(event, resolve));
|
||||
let inventory = ()=> window.slots.slice(window.inventoryStart, window.inventoryEnd)
|
||||
let inventory = window.slots.slice(window.inventoryStart, window.inventoryEnd);
|
||||
|
||||
// Move the items into the crafting grid
|
||||
// Keep track of used inventory slots to avoid reusing the same slot
|
||||
let usedInventorySlots = new Set();
|
||||
let slotCount = 1;
|
||||
for(let shapeRow of recipe.inShape){
|
||||
for(let shape of shapeRow){
|
||||
this.bot.bot.moveSlotItem(
|
||||
inventory().find((element)=> element && element.type === shape.id).slot,
|
||||
slotCount
|
||||
let inventorySlot = inventory.findIndex((element, index) =>
|
||||
element && element.type === shape.id && !usedInventorySlots.has(index)
|
||||
);
|
||||
if (inventorySlot === -1) {
|
||||
throw new Error(`Not enough items of type ${shape.id} in inventory`);
|
||||
}
|
||||
let actualSlot = window.inventoryStart + inventorySlot;
|
||||
usedInventorySlots.add(inventorySlot);
|
||||
|
||||
this.bot.bot.moveSlotItem(actualSlot, slotCount);
|
||||
await windowOnce(`updateSlot:${slotCount}`);
|
||||
slotCount++;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user