diff --git a/README.md b/README.md index 396b6a5..8177621 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,10 @@ # AI CLI -AI agent in your terminal. +AI assistant in your terminal. + +``` +$ ai "What are generator functions is JS?" +``` ## Installation @@ -19,6 +23,7 @@ You will need a valid API key from one of currently supported AI inference provi Create a minimal `~/.airc` file (in your home directory): For Open AI: + ```json { "providers": { @@ -30,6 +35,7 @@ For Open AI: ``` For Perplexity: + ```json { "providers": { @@ -63,6 +69,27 @@ ai: What do you call fake spaghetti? An impasta! me: _ ``` +## CLI options + +CLI options are passed when invoking the `ai` commend: + +- `--help`: display available options +- `--version`: display CLI version +- `--interactive` (or `-i`): start an interactive session where user can ask follow-up questions +- `--provider [name]` (or `-p [name]`): select an inference provider to use: `openai` or `perplexity` (alias `pplx`). You should have relevant API key in your `~/.airc` file. +- `--model [name]` (or `-m [name]`): select a model to use. This should be a model available for selected provider. +- `--verbose` (or `-V`): enable verbose mode + +## CLI commands + +CLI commands are available when using CLI in interactive mode. + +- `/help`: display available commands +- `/exit`: exit the CLI +- `/info`: display information about current session +- `/verbose [on|off]`: enable/disable verbose mode +- `/forget`: forget previous messages in the session + ## Contributing See the [contributing guide](CONTRIBUTING.md) to learn how to contribute to the repository and the development workflow. diff --git a/src/commands/prompt/commands.ts b/src/commands/prompt/commands.ts new file mode 100644 index 0000000..7e5a7be --- /dev/null +++ b/src/commands/prompt/commands.ts @@ -0,0 +1,56 @@ +import type { Message } from '../../inference'; +import * as output from '../../output'; + +export interface CommandContext { + messages: Message[]; + providerName: string; + config: { + model: string; + systemPrompt: string; + }; +} + +export function processCommand(input: string, context: CommandContext): boolean { + if (!input.startsWith('/')) { + return false; + } + + const [command, ...args] = input.split(' '); + if (command === '/exit') { + process.exit(0); + // No need to return. + } + + if (command === '/help') { + output.outputInfo('Available commands:'); + output.outputInfo('- /exit: Exit the CLI'); + output.outputInfo('- /info: Show current provider, model, and system prompt'); + output.outputInfo('- /verbose [on|off]: Enable or disable verbose output'); + output.outputInfo('- /forget: AI will forget previous messages'); + + return true; + } + + if (command === '/info') { + output.outputInfo(`Provider: ${context.providerName}, model: ${context.config.model}`); + output.outputInfo('System prompt:', context.config.systemPrompt); + output.outputVerbose('Current context:', JSON.stringify(context.messages, null, 2)); + return true; + } + + if (command === '/verbose') { + output.setVerbose(args[0] !== 'off'); + output.outputInfo(`Verbose mode: ${output.isVerbose() ? 'on' : 'off'}`); + return true; + } + + if (command === '/forget') { + // Forget all messages from the context. + context.messages.length = 0; + output.outputInfo('AI will forget previous messages.', context.messages); + return true; + } + + output.outputError(`Unknown command: ${command} ${args.join(' ')}`); + return true; +} diff --git a/src/commands/prompt.ts b/src/commands/prompt/index.ts similarity index 82% rename from src/commands/prompt.ts rename to src/commands/prompt/index.ts index 7d41f36..080e175 100644 --- a/src/commands/prompt.ts +++ b/src/commands/prompt/index.ts @@ -1,9 +1,10 @@ import type { CommandModule } from 'yargs'; -import { parseConfigFile } from '../config-file'; -import { type Message } from '../inference'; -import { inputLine } from '../input'; -import * as output from '../output'; -import { providers, providerOptions, resolveProviderName } from '../providers'; +import { parseConfigFile } from '../../config-file'; +import { type Message } from '../../inference'; +import { inputLine } from '../../input'; +import * as output from '../../output'; +import { providers, providerOptions, resolveProviderName } from '../../providers'; +import { processCommand } from './commands'; export interface PromptOptions { /** Interactive mode */ @@ -77,13 +78,13 @@ async function runInternal(initialPrompt: string, options: PromptOptions) { throw new Error(`Provider config not found: ${providerName}.`); } - const actualConfig = { + const config = { model: options.model ?? initialConfig.model, apiKey: initialConfig.apiKey, systemPrompt: initialConfig.systemPrompt, }; - output.outputVerbose(`Using model: ${actualConfig.model}`); + output.outputVerbose(`Using model: ${config.model}`); const messages: Message[] = []; @@ -92,27 +93,36 @@ async function runInternal(initialPrompt: string, options: PromptOptions) { output.outputAiProgress('Thinking...'); messages.push({ role: 'user', content: initialPrompt }); - const [content, response] = await provider.getChatCompletion(actualConfig, messages); + const [content, response] = await provider.getChatCompletion(config, messages); output.clearLine(); output.outputVerbose(`Response: ${JSON.stringify(response, null, 2)}`); output.outputAi(content ?? '(null)'); messages.push({ role: 'assistant', content: content ?? '' }); } else { - output.outputAi('Hello, how can I help you? Press Ctrl+C to exit.'); + output.outputAi('Hello, how can I help you? '); } - if (!options.interactive && initialPrompt) { + if (options.interactive || !initialPrompt) { + output.outputInfo( + 'Type "/exit" or press Ctrl+C to exit. Type "/help" to see available commands.' + ); + } else { process.exit(0); } // eslint-disable-next-line no-constant-condition while (true) { const userPrompt = await inputLine('me: '); + const isCommand = processCommand(userPrompt, { messages, providerName, config }); + if (isCommand) { + continue; + } + output.outputAiProgress('Thinking...'); messages.push({ role: 'user', content: userPrompt }); - const [content, response] = await provider.getChatCompletion(actualConfig, messages); + const [content, response] = await provider.getChatCompletion(config, messages); output.clearLine(); output.outputVerbose(`Response Object: ${JSON.stringify(response, null, 2)}`); output.outputAi(content ?? '(null)'); diff --git a/src/output.ts b/src/output.ts index dc20ae4..4837785 100644 --- a/src/output.ts +++ b/src/output.ts @@ -7,6 +7,10 @@ export function setVerbose(value: boolean) { verbose = value; } +export function isVerbose() { + return verbose; +} + export function outputUser(message: string) { console.log('me:', message); } @@ -22,12 +26,20 @@ export function outputAiProgress(message: string) { export function outputVerbose(message: string, ...args: unknown[]) { if (!verbose) return; - console.debug(chalk.grey(message), ...args); + console.debug(chalk.grey(message, ...args)); +} + +export function outputInfo(message: string, ...args: unknown[]) { + console.log(chalk.dim(message, ...args)); } export function outputError(error: unknown, ...args: unknown[]) { const message = extractErrorMessage(error); - console.error(chalk.red(`ERROR: ${message}`), error, ...args); + if (error === message) { + console.error(chalk.red(`ERROR: ${message}`, ...args)); + } else { + console.error(chalk.red(`ERROR: ${message}`, error, ...args)); + } } /**