Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ ENV/

# Node.js
node_modules/
package-lock.json
npm-debug.log*
yarn-debug.log*
yarn-error.log*
Expand Down
9 changes: 9 additions & 0 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion env.example
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@ FINANCIAL_DATASETS_API_KEY=your-api-key
EXASEARCH_API_KEY=your-api-key
TAVILY_API_KEY=your-api-key

# LangSmith
# Telegram Bot
TELEGRAM_BOT_TOKEN=your-bot-token

# LangSmith
LANGSMITH_API_KEY=your-api-key
LANGSMITH_ENDPOINT=https://api.smith.langchain.com
LANGSMITH_PROJECT=dexter
Expand Down
4 changes: 3 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
"typecheck": "tsc --noEmit",
"test": "bun test",
"test:watch": "bun test --watch",
"postinstall": "playwright install chromium"
"postinstall": "playwright install chromium",
"telegram": "bun run src/telegram/bot.ts"
},
"dependencies": {
"@langchain/anthropic": "^1.1.3",
Expand All @@ -26,6 +27,7 @@
"@mozilla/readability": "^0.6.0",
"dotenv": "^17.2.3",
"exa-js": "^2.2.0",
"grammy": "^1.40.0",
"gray-matter": "^4.0.3",
"ink": "^6.5.1",
"ink-spinner": "^5.0.0",
Expand Down
31 changes: 31 additions & 0 deletions src/telegram/bot.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import 'dotenv/config';
import { Bot } from 'grammy';
import { handleMessage } from './handler.js';

const token = process.env.TELEGRAM_BOT_TOKEN;
if (!token) {
console.error('TELEGRAM_BOT_TOKEN is not set. Please add it to your .env file.');
process.exit(1);
}

const bot = new Bot(token);

bot.command('start', (ctx) =>
ctx.reply(
'Welcome to Dexter! Send me a financial research question and I\'ll look into it for you.'
)
);

bot.on('message:text', handleMessage);

// Graceful shutdown
function shutdown(signal: string) {
console.log(`Received ${signal}, shutting down...`);
bot.stop();
}

process.on('SIGINT', () => shutdown('SIGINT'));
process.on('SIGTERM', () => shutdown('SIGTERM'));

console.log('Dexter Telegram bot is running...');
bot.start();
141 changes: 141 additions & 0 deletions src/telegram/handler.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
import type { Context } from 'grammy';
import { Agent } from '../agent/agent.js';
import { InMemoryChatHistory } from '../utils/in-memory-chat-history.js';
import { getToolDescription } from '../utils/tool-description.js';
import { getSetting } from '../utils/config.js';
import { DEFAULT_PROVIDER, DEFAULT_MODEL } from '../model/llm.js';
import { getDefaultModelForProvider } from '../components/ModelSelector.js';
import { splitMessage } from './message-utils.js';

// Per-chat conversation history
const chatHistories = new Map<number, InMemoryChatHistory>();

// Per-chat busy guard to prevent interleaved processing
const busyChats = new Set<number>();

// Minimum interval between status message edits (ms)
const STATUS_EDIT_INTERVAL = 2000;

function resolveModel(): { model: string; provider: string } {
const provider = getSetting('provider', DEFAULT_PROVIDER);
const model = getSetting('modelId', null) ?? getDefaultModelForProvider(provider) ?? DEFAULT_MODEL;
return { model, provider };
}

function getHistory(chatId: number, model: string): InMemoryChatHistory {
let history = chatHistories.get(chatId);
if (!history) {
history = new InMemoryChatHistory(model);
chatHistories.set(chatId, history);
}
return history;
}

export async function handleMessage(ctx: Context): Promise<void> {
const text = ctx.message?.text;
if (!text) return;

const chatId = ctx.chat?.id;
if (!chatId) return;

// Busy guard — one query at a time per chat
if (busyChats.has(chatId)) {
await ctx.reply('Please wait for the current query to finish.');
return;
}

busyChats.add(chatId);

const { model, provider } = resolveModel();
const history = getHistory(chatId, model);

// Save the user query to history before processing
history.saveUserQuery(text);

// Send initial status message
const statusMsg = await ctx.reply('Thinking...');
let lastEditTime = Date.now();

// Debounced status updater
async function updateStatus(newText: string): Promise<void> {
const now = Date.now();
if (now - lastEditTime < STATUS_EDIT_INTERVAL) return;
lastEditTime = now;
try {
await ctx.api.editMessageText(chatId!, statusMsg.message_id, newText);
} catch {
// Ignore edit errors (message not modified, etc.)
}
}

try {
const agent = Agent.create({ model, modelProvider: provider });
let finalAnswer = '';

for await (const event of agent.run(text, history)) {
switch (event.type) {
case 'thinking':
await updateStatus('Thinking...');
break;

case 'tool_start':
await updateStatus(`Using ${getToolDescription(event.tool, event.args)}...`);
break;

case 'tool_progress':
await updateStatus(`${event.tool}: ${event.message}`);
break;

case 'tool_end':
await updateStatus('Analyzing results...');
break;

case 'tool_error':
await updateStatus('Encountered an error, retrying...');
break;

case 'answer_start':
await updateStatus('Writing answer...');
break;

case 'done':
finalAnswer = event.answer;
break;
}
}

// Delete the status message
try {
await ctx.api.deleteMessage(chatId, statusMsg.message_id);
} catch {
// Ignore delete errors
}

// Save the answer to history
await history.saveAnswer(finalAnswer);

// Send the final answer, chunked if necessary
const chunks = splitMessage(finalAnswer || 'No answer was generated.');
for (const chunk of chunks) {
try {
await ctx.reply(chunk, { parse_mode: 'Markdown' });
} catch {
// Markdown parse failed — fall back to plain text
await ctx.reply(chunk);
}
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
try {
await ctx.api.editMessageText(
chatId,
statusMsg.message_id,
`Error: ${errorMessage.slice(0, 4000)}`
);
} catch {
await ctx.reply(`Error: ${errorMessage.slice(0, 4000)}`);
}
} finally {
busyChats.delete(chatId);
}
}
40 changes: 40 additions & 0 deletions src/telegram/message-utils.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
const MAX_LENGTH = 4096;
const SAFE_LENGTH = 4000; // Safety margin for Telegram's limit

/**
* Splits a message into chunks that fit within Telegram's 4096 character limit.
* Tries to split at paragraph boundaries, then line boundaries, then hard limit.
*/
export function splitMessage(text: string): string[] {
if (text.length <= MAX_LENGTH) {
return [text];
}

const chunks: string[] = [];
let remaining = text;

while (remaining.length > 0) {
if (remaining.length <= MAX_LENGTH) {
chunks.push(remaining);
break;
}

// Try to split at paragraph boundary
let splitIndex = remaining.lastIndexOf('\n\n', SAFE_LENGTH);

// Fall back to line boundary
if (splitIndex <= 0) {
splitIndex = remaining.lastIndexOf('\n', SAFE_LENGTH);
}

// Hard split at safe length
if (splitIndex <= 0) {
splitIndex = SAFE_LENGTH;
}

chunks.push(remaining.slice(0, splitIndex));
remaining = remaining.slice(splitIndex).trimStart();
}

return chunks;
}