diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..1ae36c3 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.mjs] +indent_style = space +indent_size = 2 diff --git a/Chat.mjs b/Chat.mjs index 2089b64..def3526 100644 --- a/Chat.mjs +++ b/Chat.mjs @@ -9,7 +9,7 @@ import { encode } from "gpt-tokenizer/esm/model/davinci-codex"; // tokenizer // Map of model shortcodes to full model names export const MODELS = { - g: 'gpt-4o', + g: 'gpt-4o', G: 'gpt-4-32k-0314', h: 'claude-3-haiku-20240307', s: 'claude-3-5-sonnet-20240620', @@ -20,18 +20,40 @@ export const MODELS = { I: 'gemini-1.5-pro-latest' }; +const DEFAULT_MODEL = "s"; + +// Select which model to use based on a string. When empty, use default model. +export function selectModel(model) { + return MODELS[model] || model || MODELS[DEFAULT_MODEL]; +} + +// Create a new Chat interface object. +function newChat(ask, getMessages) { + return { ask, getMessages }; +} + // Factory function to create a stateful OpenAI chat -export function openAIChat(clientClass) { +function openAIChat(clientClass, { system, model, temperature = 0.0, max_tokens = 4096, stream = true, old_messages}) { const messages = []; - async function ask(userMessage, { system, model, temperature = 0.0, max_tokens = 4096, stream = true }) { + if (system) { + messages.push({ role: "system", content: system }) + } + + + // Ignore "system" messages (as they are added above). + if (old_messages) { + old_messages.forEach(m => { + if (m.role != "system") { + messages.push(m); + } + }); + } + + async function ask(userMessage) { model = MODELS[model] || model; const client = new clientClass({ apiKey: await getToken(clientClass.name.toLowerCase()) }); - if (messages.length === 0) { - messages.push({ role: "system", content: system }); - } - messages.push({ role: "user", content: userMessage }); const params = { messages, model, temperature, max_tokens, stream }; @@ -50,14 +72,25 @@ export function openAIChat(clientClass) { return result; } - return ask; + const getMessages = () => messages; + + return newChat(ask, getMessages); } // Factory function to create a stateful Anthropic chat -export function anthropicChat(clientClass) { +function anthropicChat(clientClass, { system, model, temperature = 0.0, max_tokens = 4096, stream = true, old_messages }) { const messages = []; - async function ask(userMessage, { system, model, temperature = 0.0, max_tokens = 4096, stream = true }) { + // Ignore "system" messages (they are an arg in the call). + if (old_messages) { + old_messages.forEach(m => { + if (m.role != "system") { + messages.push(m); + } + }); + } + + async function ask(userMessage) { model = MODELS[model] || model; const client = new clientClass({ apiKey: await getToken(clientClass.name.toLowerCase()) }); @@ -70,7 +103,7 @@ export function anthropicChat(clientClass) { .stream({ ...params, messages }) .on('text', (text) => { process.stdout.write(text); - result += text; + result += text; }); await response.finalMessage(); @@ -79,13 +112,24 @@ export function anthropicChat(clientClass) { return result; } - return ask; + const getMessages = () => messages; + + return newChat(ask, getMessages); } -export function geminiChat(clientClass) { +function geminiChat(clientClass, { system, model, temperature = 0.0, max_tokens = 4096, stream = true }) { const messages = []; - async function ask(userMessage, { system, model, temperature = 0.0, max_tokens = 4096, stream = true }) { + // Convert to the format used by gemini. + if (old_messages) { + old_messages.forEach(m => { + if (m.role != "system") { + messages.push({ role: m.role, parts: [{ text: m.content }] }); + } + }); + } + + async function ask(userMessage) { model = MODELS[model] || model; const client = new clientClass(await getToken(clientClass.name.toLowerCase())); @@ -117,20 +161,27 @@ export function geminiChat(clientClass) { return result; } - return ask; + // Gemini messages do not have the same format as openAI/Anthropic. Pop + // the inner list and rename 'model' to 'assistant'. + const getMessages = () => messages.map(m => ({ + role: m.role == "model" ? "assistant" : m.role, + content: m.parts[0].text, + })); + + return newChat(ask, getMessages); } // Generic asker function that dispatches to the correct asker based on the model name -export function chat(model) { +export function createChat(model, opts) { model = MODELS[model] || model; if (model.startsWith('gpt')) { - return openAIChat(OpenAI); + return openAIChat(OpenAI, { model, ... opts }); } else if (model.startsWith('claude')) { - return anthropicChat(Anthropic); + return anthropicChat(Anthropic, { model, ... opts }); } else if (model.startsWith('llama')) { - return openAIChat(Groq); + return openAIChat(Groq, { model, ... opts }); } else if (model.startsWith('gemini')) { - return geminiChat(GoogleGenerativeAI); + return geminiChat(GoogleGenerativeAI, { model, ... opts }); } else { throw new Error(`Unsupported model: ${model}`); } @@ -151,7 +202,7 @@ export function tokenCount(inputText) { // Encode the input string into tokens const tokens = encode(inputText); - // Get the number of tokens + // Get the number of tokens const numberOfTokens = tokens.length; // Return the number of tokens diff --git a/Claude.mjs b/Claude.mjs deleted file mode 100644 index fd9dea5..0000000 --- a/Claude.mjs +++ /dev/null @@ -1,33 +0,0 @@ -import { Anthropic } from '@anthropic-ai/sdk'; -import fs from 'fs/promises'; -import path from 'path'; - -async function getAnthropicKey() { - const keyPath = path.join(process.env.HOME, '.config', 'anthropic.token'); - return (await fs.readFile(keyPath, 'utf8')).trim(); -} - -export async function ask({ system, prompt, max_tokens, model = 'claude-3-opus-20240229', temperature = 1, debug = true }) { - const anthropic = new Anthropic({ apiKey: await getAnthropicKey() }); - if (debug) { - const stream = anthropic.messages.stream({ - model, - messages: [{ role: 'user', content: prompt }], - max_tokens: max_tokens || 4096, - temperature, - ...(system && { system }), - }).on('text', (text) => process.stdout.write(text)); - const message = await stream.finalMessage(); - console.log(); // Add a newline at the end - return message.content[0].text; - } else { - const message = await anthropic.messages.create({ - model, - messages: [{ role: 'user', content: prompt }], - max_tokens: max_tokens || 4096, - temperature, - ...(system && { system }), - }); - return message.content[0].text; - } -} diff --git a/GPT.mjs b/GPT.mjs deleted file mode 100644 index 7c404eb..0000000 --- a/GPT.mjs +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env node - -import process from "process"; -import OpenAI from "openai"; -import fs from "fs/promises"; -import os from "os"; -import path from "path"; -import { encode } from "gpt-tokenizer/esm/model/davinci-codex"; // tokenizer - -const openai = new OpenAI({apiKey: await get_token()}); - -export async function get_token() { - const tokenPath = path.join(os.homedir(), ".config", "openai.token"); - try { - const token = (await fs.readFile(tokenPath, "utf8")).trim(); - return token; - } catch (err) { - if (err.code === "ENOENT") { - console.error("Error: openai.token file not found in `~/.config/openai.token`."); - console.error("Please make sure the file exists and contains your OpenAI API token."); - } else { - console.error("Error reading openai.token file:", err.message); - } - process.exit(1); - } -} - -export async function ask({system, prompt, model, temperature}) { - const stream = await openai.chat.completions.create({ - model: model || "gpt-4-turbo-2024-04-09", - messages: [ - {role: "system", content: system || "You're a helpful assistant." }, - {role: "user", content: prompt || "What time is it?" } - ], - stream: true, - temperature: temperature || 0, - }); - var result = ""; - for await (const chunk of stream) { - var text = chunk.choices[0]?.delta?.content || ""; - process.stdout.write(text); - result += text; - } - process.stdout.write("\n"); - return result; -} - -export function token_count(inputText) { - // Encode the input string into tokens - const tokens = encode(inputText); - - // Get the number of tokens - const numberOfTokens = tokens.length; - - // Return the number of tokens - return numberOfTokens; -} diff --git a/README.md b/README.md index 793d2cb..1a30530 100644 --- a/README.md +++ b/README.md @@ -8,5 +8,3 @@ Some AI scripts I use daily. - `aiemu`: moved to [here](https://github.com/victorTaelin/aiemu) - `chatsh [model]`: like ChatGPT but in the terminal - -TODO: remove `Claude.mjs`/`GPT.mjs` and just use `Ask.mjs` in all files diff --git a/aiemu.mjs b/aiemu.mjs index ef2e921..8d603eb 100755 --- a/aiemu.mjs +++ b/aiemu.mjs @@ -2,9 +2,9 @@ import process from "process"; import fs from 'fs/promises'; -import { chat, MODELS } from './Chat.mjs'; +import { createChat, selectModel } from './Chat.mjs'; -const MODEL = process.argv[2] || "s"; +const MODEL = selectModel(process.argv[2]); const SYSTEM = ` You're a game emulator. You can emulate ANY game, but text-based. Your goal is @@ -42,11 +42,11 @@ Here are some examples of how your game screen should look. You're in a PokΓ©mon battle. ,-----------------------------, - Blastoise LV30 [πŸ’¦πŸ’πŸ’£] - HP: |||....... [πŸ”«πŸšπŸ›‘οΈ] - - Charizard LV32 [πŸ”₯πŸ‰πŸ¦‡] - HP: ||||||.... [πŸŒ‹πŸ¦–πŸ˜€] + Blastoise LV30 [πŸ’¦πŸ’πŸ’£] + HP: |||....... [πŸ”«πŸšπŸ›‘οΈ] + + Charizard LV32 [πŸ”₯πŸ‰πŸ¦‡] + HP: ||||||.... [πŸŒ‹πŸ¦–πŸ˜€] '-----------------------------' A) FIGHT B) PKMN @@ -58,19 +58,19 @@ D) RUN You're in Odolwa's boss room in Woodfall Temple. Odolwa is dancing and swinging his swords menacingly. ,--------------------------------------------------, - HP ❀️ ❀️ ❀️ 🀍🀍🀍🀍 - MANA 🟩🟩🟩⬜⬜⬜⬜⬜⬜⬜ - - Link Navi Door0 - [πŸ—‘οΈπŸ§πŸ›‘οΈ] [🧚] [πŸšͺπŸ”’] - - Odolwa Jar Door1 Chest - [πŸ—‘οΈπŸŽ­πŸ—‘οΈ] [🏺] [πŸšͺπŸ”’] [πŸŽπŸ”’] - - Grs0 Grs1 Grs2 - [🌿] [🌿] [🌿] - - πŸ’Ž 000 πŸ•’ 7 AM :: β˜€οΈ 1st Day + HP ❀️ ❀️ ❀️ 🀍🀍🀍🀍 + MANA 🟩🟩🟩⬜⬜⬜⬜⬜⬜⬜ + + Link Navi Door0 + [πŸ—‘οΈπŸ§πŸ›‘οΈ] [🧚] [πŸšͺπŸ”’] + + Odolwa Jar Door1 Chest + [πŸ—‘οΈπŸŽ­πŸ—‘οΈ] [🏺] [πŸšͺπŸ”’] [πŸŽπŸ”’] + + Grs0 Grs1 Grs2 + [🌿] [🌿] [🌿] + + πŸ’Ž 000 πŸ•’ 7 AM :: β˜€οΈ 1st Day '--------------------------------------------------' A) Talk to Navi B) Enter Door0 @@ -85,16 +85,16 @@ H) Check Grs2 You're in the main entrance hall of Princess Peach's castle. ,---------------------------------. - πŸ„x4 🌟x7 - - Door0 Door1 Door2 - [πŸšͺ🌟] [πŸšͺπŸ”’] [πŸšͺ0] - - Door3 Door4 Door5 Door6 - [πŸšͺ0] [πŸšͺ3] [πŸšͺ7] [πŸšͺ1] - - Exit Mario Coin0 Coin1 - [πŸšͺ] [πŸ„] [🟑] [🟑] + πŸ„x4 🌟x7 + + Door0 Door1 Door2 + [πŸšͺ🌟] [πŸšͺπŸ”’] [πŸšͺ0] + + Door3 Door4 Door5 Door6 + [πŸšͺ0] [πŸšͺ3] [πŸšͺ7] [πŸšͺ1] + + Exit Mario Coin0 Coin1 + [πŸšͺ] [πŸ„] [🟑] [🟑] '---------------------------------' A) Enter Door0 B) Enter Door1 @@ -110,16 +110,16 @@ J) Exit //# Example: PokΓ©mon Red - Title Screen ,-------------------------------, - PokΓ©mon - Red - - [πŸ”₯πŸ‰πŸ¦‡] - - Β©1996 Nintendo - Creatures Inc. - GAME FREAK inc. - - Press Start Button + PokΓ©mon + Red + + [πŸ”₯πŸ‰πŸ¦‡] + + Β©1996 Nintendo + Creatures Inc. + GAME FREAK inc. + + Press Start Button '-------------------------------' A) New Game B) Continue @@ -128,18 +128,18 @@ C) Options //# Example: PokΓ©mon Red - Introduction ,-------------------------------. - - OAK - Hello there! Welcome to the - world of POKΓ‰MON! - - OAK - My name is OAK! - People call me the - POKΓ‰MON PROF! - - NIDORANβ™‚ - [πŸ­πŸ’œπŸ¦] + + OAK + Hello there! Welcome to the + world of POKΓ‰MON! + + OAK + My name is OAK! + People call me the + POKΓ‰MON PROF! + + NIDORANβ™‚ + [πŸ­πŸ’œπŸ¦] '-------------------------------' A) Next @@ -147,16 +147,16 @@ A) Next You're in Pallet Town, your hometown. ,--------------------------, - 🌳 [Route 1] 🌳 - - House0 House1 - [🏠] [🏠] - - Grass Oak's Lab - [🌿] [🏫] - - Beach Sign 🌸 - [🌊] [πŸͺ§] 🌼 + 🌳 [Route 1] 🌳 + + House0 House1 + [🏠] [🏠] + + Grass Oak's Lab + [🌿] [🏫] + + Beach Sign 🌸 + [🌊] [πŸͺ§] 🌼 '--------------------------' A) Enter House0 B) Enter House1 @@ -169,11 +169,11 @@ F) Exit to Route 1 You're inside your house in Pallet Town. ,---------------------------. - PC TV Stairs - [πŸ’»] [πŸ“Ί] [β”—β”“] - - Bed You - [πŸ›οΈ] [πŸ‘¦] + PC TV Stairs + [πŸ’»] [πŸ“Ί] [β”—β”“] + + Bed You + [πŸ›οΈ] [πŸ‘¦] '---------------------------' A) Check the PC B) Play SNES on TV @@ -183,23 +183,23 @@ B) Go Downstairs //# Example: The Legend of Zelda - Majora's Mask - Title Screen ,------------------------------------------, - - The Legend of - Zelda - Majora's Mask - - [πŸŽ­πŸ˜ˆπŸŒ™] - - Press Start - - - Β©2000 Nintendo. All Rights Reserved. + + The Legend of + Zelda + Majora's Mask + + [πŸŽ­πŸ˜ˆπŸŒ™] + + Press Start + + + Β©2000 Nintendo. All Rights Reserved. '------------------------------------------' A) PRESS START B) OPTIONS -IMPORTANT: -- You ARE the videogame. Stay in character. +IMPORTANT: +- You ARE the videogame. Stay in character. - Start from the game's initial menus and emulate each level in order. - Emulate the game loyally, following its original sequence of events. - Design a well-aligned UI for each screen. Position elements in 2D. @@ -217,17 +217,17 @@ If the player provides feedback after a '#', use it to improve the experience. console.clear(); const ASCII_ART = ` -\x1b[1m\x1b[36mβ–ˆβ–€β–€β–€β–€β–€β–ˆ β–€ β–„β–€β–„ β–ˆβ–€β–€β–€β–€β–€β–ˆ\x1b[0m +\x1b[1m\x1b[36mβ–ˆβ–€β–€β–€β–€β–€β–ˆ β–€ β–„β–€β–„ β–ˆβ–€β–€β–€β–€β–€β–ˆ\x1b[0m \x1b[1m\x1b[36mβ–ˆ β–ˆβ–ˆβ–ˆ β–ˆ β–€ β–€β–ˆβ–€ β–ˆ β–ˆβ–ˆβ–ˆ β–ˆ\x1b[0m \x1b[1m\x1b[36mβ–ˆ β–€β–€β–€ β–ˆ β–ˆ β–„β–ˆβ–„ β–ˆ β–€β–€β–€ β–ˆ\x1b[0m \x1b[1m\x1b[36mβ–€β–€β–€β–€β–€β–€β–€ β–€ β–€β–€β–€ β–€β–€β–€β–€β–€β–€β–€\x1b[0m -\x1b[2mA I E M U L A T O R\x1b[0m +\x1b[2mA I E M U L A T O R\x1b[0m `.trim(); console.log(ASCII_ART); console.log(""); - console.log(`\x1b[32mUsing \x1b[1m${MODELS[MODEL]||MODEL}\x1b[0m`); + console.log(`\x1b[32mUsing \x1b[1m${MODEL}\x1b[0m`); console.log(""); process.stdout.write("Game: "); @@ -235,7 +235,12 @@ If the player provides feedback after a '#', use it to improve the experience. console.log(`Emulating ${game}...\n\n`); - const ask = chat(MODEL); + const chatOpts = { + system: SYSTEM, + max_tokens: 2048, + temperature: 0.5, + }; + const chat = createChat(MODEL, chatOpts); let messages = [ {role: "user", content: `# GAME: ${game}`}, ]; @@ -243,12 +248,7 @@ If the player provides feedback after a '#', use it to improve the experience. while (true) { console.clear(); - const response = await ask(messages[messages.length - 1].content, { - system: SYSTEM, - model: MODEL, - max_tokens: 2048, - temperature: 0.5, - }); + const response = await chat.ask(messages[messages.length - 1].content); messages.push({role: "assistant", content: response}); diff --git a/chatsh.mjs b/chatsh.mjs index c68dcdf..086a9e5 100755 --- a/chatsh.mjs +++ b/chatsh.mjs @@ -1,18 +1,17 @@ #!/usr/bin/env node -import readline from 'readline'; +import readline from 'readline/promises'; import { exec } from 'child_process'; import { promisify } from 'util'; -import { chat, MODELS } from './Chat.mjs'; +import { createChat, selectModel, MODELS } from './Chat.mjs'; +import { Command, Argument } from 'commander'; const execAsync = promisify(exec); -// Default model if not specified -const DEFAULT_MODEL = "s"; // Get model from environment variable or use default -const MODEL = process.argv[2] || DEFAULT_MODEL; +let model = selectModel(process.argv[2]); -console.log(`Welcome to ChatSH. Model: ${MODELS[MODEL]||MODEL}\n`); +console.log(`Welcome to ChatSH. Model: ${model}\n`); // System prompt to set the assistant's behavior const SYSTEM_PROMPT = `You are ChatSH, an AI language model that specializes in assisting users with tasks on their system using shell commands. ChatSH operates in two modes: COMMAND MODE and CHAT MODE. @@ -219,17 +218,116 @@ Done. const rl = readline.createInterface({ input: process.stdin, output: process.stdout, - terminal: true + terminal: true }); // Create a stateful asker -const ask = chat(MODEL); +let chat = createChat(model, { system: SYSTEM_PROMPT }); + + +// Prompts. +const CMDPROMPT = ': '; +const CHATPROMPT = 'Ξ» '; + +// Process a single command when in command mode. +async function processCommand(input) { + if (input == '') { + return; + } + + // Split input into arguments. + const regex = new RegExp('"[^"]+"|[\\S]+', 'g'); + const argv = input.match(regex).filter(e => !!e).map(e => e.replace(/"/g, '')) + + // Setup the handlers for commands. + const program = new Command(); + program + .command('show') + .summary('Debug an internal var') + .addArgument(new Argument('', 'Which var to show').choices([ + 'messages', 'model'])) + // .argument('', 'Which var to show') + .action((varname) => { + switch (varname) { + case 'messages': + console.log(chat.getMessages()); + break; + case 'model': + console.log(`Current model: ${model}`) + break; + default: + console.log(`Unknown var ${varname}`); + } + }) + .exitOverride(); + + program + .command('model') + .summary('Change active model') + .argument('[newModelName]', 'New model name') + .action(newModelName => { + if (!newModelName) { + console.log('Known Models'); + console.log(MODELS); + return; + } + + // Replace chat. + const newModel = selectModel(newModelName); + const old_messages = chat.getMessages(); + chat = createChat(newModel, { system: SYSTEM_PROMPT, old_messages }); + model = newModel; + }) + .exitOverride(); + + program.exitOverride(); + + try { + program.parse(argv, { from: 'user' }); + } catch (err) { + if (err.code == 'commander.help' || err.code == 'commander.helpDisplayed') { + return; + } + console.log(`Exception processing command: ${err}`); + } +} + +// Loop waiting for a chat prompt (including going into command prompt when +// requested). +async function promptLoop() { + let prompt = CHATPROMPT; + while (true) { + let abort = new AbortController(); + + async function firstKey(c, k) { + if (rl.line == ':') { + abort.abort(); + } + } + + process.stdin.on('keypress', firstKey); + try { + process.stdout.write('\x1b[1m'); // blue color + let line = await rl.question(prompt, {signal: abort.signal }); + process.stdout.write('\x1b[0m'); // reset color + process.stdin.removeListener('keypress', firstKey); + + if (prompt == CHATPROMPT) { + // Received latest chat prompt. + return line; + } + + // Process a command prompt. + processCommand(line); + } catch (exception) { + // Aborted. Toggle between chat and command modes. + process.stdout.moveCursor(-1, -1); + process.stdout.clearLine(1); + process.stdin.removeListener('keypress', firstKey); + prompt = prompt == CHATPROMPT ? CMDPROMPT : CHATPROMPT; + } + } -// Utility function to prompt the user for input -async function prompt(query) { - return new Promise(resolve => { - rl.question(query, resolve); - }); } // If there are words after the 'chatsh', set them as the initialUserMessage @@ -245,25 +343,24 @@ async function main() { userMessage = initialUserMessage; initialUserMessage = null; } else { - process.stdout.write('\x1b[1m'); // blue color - userMessage = await prompt('Ξ» '); - process.stdout.write('\x1b[0m'); // reset color + userMessage = await promptLoop(); } - + try { const fullMessage = userMessage.trim() !== '' ? `\n${lastOutput.trim()}\n\n\n${userMessage}\n\n` : `\n${lastOutput.trim()}\n`; - const assistantMessage = await ask(fullMessage, { system: SYSTEM_PROMPT, model: MODEL }); - console.log(); - + const assistantMessage = await chat.ask(fullMessage); + console.log(); + const code = extractCode(assistantMessage); lastOutput = ""; + if (code) { console.log("\x1b[31mPress enter to execute, or 'N' to cancel.\x1b[0m"); - const answer = await prompt(''); + const answer = await rl.question(""); // TODO: delete the warning above from the terminal process.stdout.moveCursor(0, -2); process.stdout.clearLine(2); diff --git a/holefill.mjs b/holefill.mjs index af48a5a..e9837b9 100755 --- a/holefill.mjs +++ b/holefill.mjs @@ -1,5 +1,5 @@ #!/usr/bin/env node -import { chat, MODELS, tokenCount } from './Chat.mjs'; +import { createChat, selectModel, tokenCount } from './Chat.mjs'; import process from "process"; import fs from 'fs/promises'; import os from 'os'; @@ -98,8 +98,8 @@ function hypothenuse(a, b) { var file = process.argv[2]; var mini = process.argv[3]; -var model = process.argv[4] || "s"; -var ask = chat(model); +var model = selectModel(process.argv[4]); +var chat = createChat(model, {system}); if (!file) { console.log("Usage: holefill [] []"); @@ -138,12 +138,12 @@ if (holes.length === 0 && mini_code.indexOf("??") !== -1 && (mini_code.match(/\? console.log("holes_found:", holes); console.log("token_count:", tokens); -console.log("model_label:", MODELS[model] || model); +console.log("model_label:", model); if (holes === "??") { console.log("next_filled: ??"); var prompt = "\n" + mini_code.replace("??", "{{FILL_HERE}}") + "\n"; - var answer = await ask(prompt, {system, model}); + var answer = await chat.ask(prompt, {system, model}); var match = answer.match(/([\s\S]*?)<\/COMPLETION>/); if (match) { file_code = file_code.replace("??", match[1]); @@ -155,7 +155,7 @@ if (holes === "??") { for (let hole of holes) { console.log("next_filled: " + hole + "..."); var prompt = "\n" + mini_code + "\n"; - var answer = await ask(prompt, {system, model}); + var answer = await chat.ask(prompt); var match = answer.match(/([\s\S]*?)<\/COMPLETION>/); if (match) { file_code = file_code.replace(hole, match[1]); diff --git a/package-lock.json b/package-lock.json index b848f27..de287db 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,7 @@ "@anthropic-ai/sdk": "^0.19.1", "@google/generative-ai": "^0.12.0", "@portkey-ai/gateway": "^1.4.0", + "commander": "^12.1.0", "gateway": "^1.0.0", "gpt-tokenizer": "^2.1.2", "groq-sdk": "^0.3.2", @@ -22,7 +23,9 @@ "bin": { "aiemu": "aiemu.mjs", "chatsh": "chatsh.mjs", - "holefill": "holefill.mjs" + "csh": "chatsh.mjs", + "holefill": "holefill.mjs", + "refactor": "refactor.mjs" }, "devDependencies": { "tsx": "^4.16.2" @@ -686,6 +689,14 @@ "node": ">= 0.8" } }, + "node_modules/commander": { + "version": "12.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-12.1.0.tgz", + "integrity": "sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==", + "engines": { + "node": ">=18" + } + }, "node_modules/crypt": { "version": "0.0.2", "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", diff --git a/package.json b/package.json index 840865e..39a1594 100644 --- a/package.json +++ b/package.json @@ -23,6 +23,7 @@ "@anthropic-ai/sdk": "^0.19.1", "@google/generative-ai": "^0.12.0", "@portkey-ai/gateway": "^1.4.0", + "commander": "^12.1.0", "gateway": "^1.0.0", "gpt-tokenizer": "^2.1.2", "groq-sdk": "^0.3.2", diff --git a/refactor.mjs b/refactor.mjs index 246b239..c612577 100755 --- a/refactor.mjs +++ b/refactor.mjs @@ -2,7 +2,7 @@ import fs from 'fs/promises'; import os from 'os'; import path from 'path'; import process from "process"; -import { chat, MODELS, tokenCount } from './Chat.mjs'; +import { createChat, selectModel, tokenCount } from './Chat.mjs'; import { exec } from 'child_process'; import { promisify } from 'util'; @@ -139,11 +139,11 @@ async function main() { const file = process.argv[2]; const request = process.argv[3]; - const model = process.argv[4] || "s"; + const model = selectModel(process.argv[4]); const check = process.argv.includes("--check"); // Initialize the chat function with the specified model - const ask = chat(model); + const chat = createChat(model, { system }); // Get directory and file information const dir = path.dirname(file); @@ -162,8 +162,8 @@ async function main() { // Main interaction loop with the AI while (true) { console.log(""); - const aiOutput = await ask(aiInput, { system, model }); - + const aiOutput = await chat.ask(aiInput); + // Handle AI's request for additional information if (aiOutput.includes("")) { const showMatch = aiOutput.match(/([\s\S]*?)<\/SHOW>/); @@ -182,7 +182,7 @@ async function main() { } aiInput = showContent; } - } + } // Handle AI's refactoring result else if (aiOutput.includes("")) { const resultMatch = aiOutput.match(/([\s\S]*?)<\/RESULT>/); @@ -190,7 +190,7 @@ async function main() { const newContent = resultMatch[1]; await fs.writeFile(file, newContent.trim(), 'utf-8'); console.log("\nFile refactored successfully."); - + // If --check flag is present, perform type check on the refactored file if (check) { const checkResult = await typeCheck(file); @@ -228,7 +228,7 @@ async function typeCheck(file) { default: return null; } - + try { var result = await execAsync(cmd); return result.stderr || result.stdout;