Skip to content

Commit 207422a

Browse files
committed
Add tests
1 parent 24de369 commit 207422a

File tree

4 files changed

+783
-7
lines changed

4 files changed

+783
-7
lines changed

plugins/baseten/package.json

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,21 @@
66
"main": "dist/index.js",
77
"types": "dist/index.d.ts",
88
"type": "module",
9+
"author": "LiveKit",
10+
"repository": "[email protected]:livekit/agents-js.git",
11+
"license": "Apache-2.0",
12+
"files": [
13+
"dist",
14+
"src",
15+
"README.md"
16+
],
917
"scripts": {
1018
"build": "tsc",
1119
"dev": "tsc --watch",
1220
"clean": "rm -rf dist",
1321
"typecheck": "tsc --noEmit",
1422
"lint": "eslint \"src/**/*.ts\"",
15-
"lint:fix": "eslint --fix \"src/**/*.ts\"",
16-
"test:llm-cli": "tsx test/test-baseten-llm-cli.ts",
17-
"test:tts-cli": "tsx test/test-baseten-tts-cli.ts",
18-
"test:stt-cli": "tsx test/test-stt.ts"
23+
"lint:fix": "eslint --fix \"src/**/*.ts\""
1924
},
2025
"keywords": [
2126
"livekit",
@@ -27,11 +32,11 @@
2732
"voice-ai"
2833
],
2934
"dependencies": {
30-
"@livekit/agents": "^1.0.17",
35+
"@livekit/agents": "workspace:*",
3136
"@livekit/agents-plugin-openai": "^1.0.0",
3237
"@livekit/rtc-node": "^0.13.12",
38+
"@livekit/agents-plugins-test": "workspace:*",
3339
"dotenv": "^17.2.3",
34-
"node-record-lpcm16": "^1.0.1",
3540
"openai": "^4.0.0",
3641
"ws": "^8.14.2"
3742
},
@@ -42,7 +47,7 @@
4247
"typescript": "^5.9.3"
4348
},
4449
"peerDependencies": {
45-
"@livekit/agents": "^1.0.17",
50+
"@livekit/agents": "workspace:*",
4651
"@livekit/rtc-node": "^0.13.12"
4752
}
4853
}
Lines changed: 251 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,251 @@
1+
/**
2+
* Interactive CLI test for Baseten LLM
3+
*
4+
* Tests the Baseten LLM plugin with an interactive chat interface.
5+
* Run with: pnpm test:llm-cli
6+
*/
7+
import { LLM } from '../src'
8+
import { llm, initializeLogger, log } from '@livekit/agents'
9+
import * as dotenv from 'dotenv'
10+
import * as readline from 'readline'
11+
12+
dotenv.config()
13+
14+
// Initialize the LiveKit agents logger (required before using agents)
15+
initializeLogger({ pretty: true, level: 'info' })
16+
const logger = log()
17+
18+
// ANSI color codes for prettier output
19+
const colors = {
20+
reset: '\x1b[0m',
21+
bright: '\x1b[1m',
22+
dim: '\x1b[2m',
23+
cyan: '\x1b[36m',
24+
green: '\x1b[32m',
25+
yellow: '\x1b[33m',
26+
red: '\x1b[31m',
27+
blue: '\x1b[34m'
28+
}
29+
30+
interface TestOptions {
31+
apiKey: string
32+
model: string
33+
temperature: number
34+
interactive: boolean
35+
}
36+
37+
class LLMTester {
38+
private llmInstance: LLM
39+
private chatContext: llm.ChatContext
40+
private rl: readline.Interface | null = null
41+
42+
constructor(options: TestOptions) {
43+
logger.info(`${colors.cyan}${colors.bright}Baseten LLM Test${colors.reset}\n`)
44+
logger.info(`${colors.dim}Model: ${options.model}${colors.reset}`)
45+
logger.info(`${colors.dim}Temperature: ${options.temperature}${colors.reset}\n`)
46+
47+
this.llmInstance = new LLM({
48+
apiKey: options.apiKey,
49+
model: options.model,
50+
temperature: options.temperature
51+
})
52+
53+
this.chatContext = new llm.ChatContext()
54+
}
55+
56+
/**
57+
* Run a single test query
58+
*/
59+
async runSingleTest(query: string): Promise<void> {
60+
logger.info(`${colors.green}User:${colors.reset} ${query}`)
61+
62+
this.chatContext.addMessage({
63+
role: 'user',
64+
content: query
65+
})
66+
67+
try {
68+
const stream = this.llmInstance.chat({ chatCtx: this.chatContext })
69+
70+
process.stdout.write(`${colors.blue}Assistant:${colors.reset} `)
71+
72+
let fullResponse = ''
73+
for await (const chunk of stream) {
74+
if (chunk.delta?.content) {
75+
process.stdout.write(chunk.delta.content)
76+
fullResponse += chunk.delta.content
77+
}
78+
}
79+
process.stdout.write('\n\n')
80+
81+
if (!fullResponse.trim()) {
82+
throw new Error('No response received from LLM')
83+
}
84+
85+
// Add assistant response to context for multi-turn conversations
86+
this.chatContext.addMessage({
87+
role: 'assistant',
88+
content: fullResponse
89+
})
90+
} catch (error: any) {
91+
logger.error(`\n${colors.red}Error:${colors.reset}`)
92+
if (error?.statusCode === 404) {
93+
logger.error('Model not found. Please check your BASETEN_LLM_MODEL environment variable.')
94+
} else if (error?.message) {
95+
logger.error(error.message)
96+
} else {
97+
logger.error(error)
98+
}
99+
throw error
100+
}
101+
}
102+
103+
/**
104+
* Run interactive REPL mode
105+
*/
106+
async runInteractive(): Promise<void> {
107+
this.rl = readline.createInterface({
108+
input: process.stdin,
109+
output: process.stdout,
110+
prompt: `${colors.green}You:${colors.reset} `
111+
})
112+
113+
logger.info(`${colors.yellow}Interactive mode. Type your messages and press Enter.${colors.reset}`)
114+
logger.info(`${colors.yellow}Commands: /exit, /clear, /history${colors.reset}\n`)
115+
116+
this.rl.prompt()
117+
118+
this.rl.on('line', async (line) => {
119+
const input = line.trim()
120+
121+
if (!input) {
122+
this.rl!.prompt()
123+
return
124+
}
125+
126+
// Handle commands
127+
if (input === '/exit') {
128+
logger.info(`${colors.cyan}Goodbye!${colors.reset}`)
129+
this.rl!.close()
130+
process.exit(0)
131+
} else if (input === '/clear') {
132+
this.chatContext = new llm.ChatContext()
133+
logger.info(`${colors.yellow}Chat history cleared.${colors.reset}\n`)
134+
this.rl!.prompt()
135+
return
136+
} else if (input === '/history') {
137+
this.printHistory()
138+
this.rl!.prompt()
139+
return
140+
}
141+
142+
// Process user message
143+
this.chatContext.addMessage({
144+
role: 'user',
145+
content: input
146+
})
147+
148+
try {
149+
const stream = this.llmInstance.chat({ chatCtx: this.chatContext })
150+
151+
process.stdout.write(`${colors.blue}Assistant:${colors.reset} `)
152+
153+
let fullResponse = ''
154+
for await (const chunk of stream) {
155+
if (chunk.delta?.content) {
156+
process.stdout.write(chunk.delta.content)
157+
fullResponse += chunk.delta.content
158+
}
159+
}
160+
process.stdout.write('\n\n')
161+
162+
if (!fullResponse.trim()) {
163+
throw new Error('No response received from LLM')
164+
}
165+
166+
// Add assistant response to context
167+
this.chatContext.addMessage({
168+
role: 'assistant',
169+
content: fullResponse
170+
})
171+
} catch (error: any) {
172+
logger.error(`\n${colors.red}Error:${colors.reset}`)
173+
if (error?.statusCode === 404) {
174+
logger.error('Model not found. Please check your BASETEN_LLM_MODEL environment variable.')
175+
} else if (error?.message) {
176+
logger.error(error.message)
177+
} else {
178+
logger.error(error)
179+
}
180+
}
181+
182+
this.rl!.prompt()
183+
})
184+
185+
this.rl.on('close', () => {
186+
logger.info(`${colors.cyan}Goodbye!${colors.reset}`)
187+
process.exit(0)
188+
})
189+
}
190+
191+
/**
192+
* Print chat history
193+
*/
194+
private printHistory(): void {
195+
logger.info(`${colors.yellow}Chat History:${colors.reset}`)
196+
for (const item of this.chatContext.items) {
197+
if (item.type === 'message') {
198+
const msg = item as llm.ChatMessage
199+
const roleColor = msg.role === 'user' ? colors.green : colors.blue
200+
const content = msg.textContent || JSON.stringify(msg.content)
201+
logger.info(`${roleColor}${msg.role}:${colors.reset} ${content}`)
202+
}
203+
}
204+
logger.info('')
205+
}
206+
}
207+
208+
async function main() {
209+
const apiKey = process.env.BASETEN_API_KEY
210+
if (!apiKey) {
211+
logger.error(`${colors.red}Error: BASETEN_API_KEY environment variable not set${colors.reset}`)
212+
logger.error('Set it in your .env file or export it in your shell')
213+
process.exit(1)
214+
}
215+
216+
const model = process.env.BASETEN_LLM_MODEL_ID || 'openai/gpt-oss-120b'
217+
const temperature = parseFloat(process.env.BASETEN_LLM_TEMPERATURE || '0.8')
218+
const interactive = process.env.INTERACTIVE !== 'false'
219+
220+
const tester = new LLMTester({
221+
apiKey,
222+
model,
223+
temperature,
224+
interactive
225+
})
226+
227+
if (interactive) {
228+
// Interactive mode
229+
await tester.runInteractive()
230+
} else {
231+
// Run a few test queries
232+
logger.info(`${colors.yellow}Running test queries...${colors.reset}\n`)
233+
234+
await tester.runSingleTest('Tell me a short joke about programming.')
235+
await tester.runSingleTest('What was my previous question about?')
236+
await tester.runSingleTest('Write a haiku about AI.')
237+
238+
logger.info(`${colors.cyan}Test completed successfully!${colors.reset}`)
239+
}
240+
}
241+
242+
// Handle cleanup
243+
process.on('SIGINT', () => {
244+
logger.info(`\n${colors.cyan}Interrupted. Exiting...${colors.reset}`)
245+
process.exit(0)
246+
})
247+
248+
main().catch((err) => {
249+
logger.error(`${colors.red}Fatal error: ${err}${colors.reset}`)
250+
process.exit(1)
251+
})

0 commit comments

Comments
 (0)