diff --git a/commands/llama.ts b/commands/llama.ts index 46b89b9..d22f357 100644 --- a/commands/llama.ts +++ b/commands/llama.ts @@ -1,21 +1,15 @@ // Import necessary modules and dependencies import { Client, Message } from "whatsapp-web.js"; import config from "../config.js"; -import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp"; - -const modelPath = config.llama_model_path; // Retrieve model path from environment variable - -const model = new LlamaModel({ modelPath }); -const context = new LlamaContext({ model }); -const session = new LlamaChatSession({ context }); +import axios from "../helpers/axios.js"; const execute = async (client: Client, msg: Message, args: string[]) => { const chatId = (await msg.getChat()).id._serialized; - if (!modelPath) { + if (!config.cf_worker.url) { return client.sendMessage( chatId, - "Sorry, llama model path not specified in the environment variable." + "Sorry, cf worker url not specified in the environment variable." ); } @@ -28,11 +22,30 @@ const execute = async (client: Client, msg: Message, args: string[]) => { const text = args.join(" ") || quotedMsg.body; + const username = config.cf_worker.username; + const password = config.cf_worker.password; + + const encodedCredentials = Buffer.from(`${username}:${password}`).toString( + "base64" + ); + const authHeader = `Basic ${encodedCredentials}`; + // Call Llama model with the obtained text - const response = await session.prompt(text); + const response = await axios.get<{ response: string }>(config.cf_worker.url, { + params: { + prompt: text, + }, + headers: { + Authorization: authHeader, + }, + }); - // Send the response back to the user - await client.sendMessage(chatId, `Llama: ${response}`); + try { + // Send the response back to the user + await client.sendMessage(chatId, `Llama: ${response.data.response}`); + } catch { + await client.sendMessage(chatId, "LLaMA generation failed."); + } // Optionally, you can handle conversation history or context here diff --git a/config.ts b/config.ts index 689c31d..f20d1a0 100644 --- a/config.ts +++ b/config.ts @@ -10,7 +10,11 @@ const config = { default_tr_lang: process.env.DEFAULT_TR_LANG || "en", enable_delete_alert: process.env.ENABLE_DELETE_ALERT || "true", ocr_space_api_key: process.env.OCR_SPACE_API_KEY || "", - llama_model_path: process.env.LLAMA_MODEL_PATH || "", + cf_worker: { + url: process.env.CF_WORKER_URL, + username: process.env.CF_WORKER_USERNAME, + password: process.env.CF_WORKER_PASSWORD, + }, }; export default config; diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 3ea4283..e570e5c 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -33,7 +33,9 @@ services: BITLY_API_KEY: ${BITLY_API_KEY} OPENAI_API_KEY: ${OPENAI_API_KEY} TZ: ${TZ:-Asia/Hong_Kong} - LLAMA_MODEL_PATH: ${LLAMA_MODEL_PATH} + CF_WORKER_URL: ${CF_WORKER_URL} + CF_WORKER_USERNAME: ${CF_WORKER_USERNAME} + CF_WORKER_PASSWORD: ${CF_WORKER_PASSWORD} networks: - whatsbot-network wtstodis-mongo: diff --git a/example.env b/example.env index 6ee25dd..7957d7e 100644 --- a/example.env +++ b/example.env @@ -19,4 +19,6 @@ DISCORD_OWNER_ID="" BITLY_API_KEY="" OPENAI_API_KEY="" TZ="Asia/Hong_Kong" -LLAMA_MODEL_PATH="llama/openllama-3b-v2-q4_0.gguf" \ No newline at end of file +CF_WORKER_URL= +CF_WORKER_USERNAME= +CF_WORKER_PASSWORD= \ No newline at end of file