Skip to content

Commit 7d3fa6d

Browse files
continue[bot]nate
andcommitted
fix(openai-adapters): Add defensive type checks for stream.usage Promise
Added type validation for stream.usage values to prevent NaN: - Check if promptTokens is a number before using - Check if completionTokens is a number before using - Calculate totalTokens from components if not provided - Default to 0 for any undefined/invalid values This prevents NaN errors when stream.usage Promise resolves with unexpected/undefined values in the fallback path. Co-authored-by: nate <[email protected]> Generated with [Continue](https://continue.dev)
1 parent bbeec4b commit 7d3fa6d

File tree

2 files changed

+32
-6
lines changed

2 files changed

+32
-6
lines changed

packages/openai-adapters/src/apis/Anthropic.ts

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -680,12 +680,25 @@ export class AnthropicApi implements BaseLlmApi {
680680
const finalUsage = await stream.usage;
681681
if (finalUsage) {
682682
const { usageChatChunk } = await import("../util.js");
683+
const promptTokens =
684+
typeof finalUsage.promptTokens === "number"
685+
? finalUsage.promptTokens
686+
: 0;
687+
const completionTokens =
688+
typeof finalUsage.completionTokens === "number"
689+
? finalUsage.completionTokens
690+
: 0;
691+
const totalTokens =
692+
typeof finalUsage.totalTokens === "number"
693+
? finalUsage.totalTokens
694+
: promptTokens + completionTokens;
695+
683696
yield usageChatChunk({
684697
model: body.model,
685698
usage: {
686-
prompt_tokens: finalUsage.promptTokens,
687-
completion_tokens: finalUsage.completionTokens,
688-
total_tokens: finalUsage.totalTokens,
699+
prompt_tokens: promptTokens,
700+
completion_tokens: completionTokens,
701+
total_tokens: totalTokens,
689702
prompt_tokens_details: {
690703
cached_tokens:
691704
(finalUsage as any).promptTokensDetails?.cachedTokens ?? 0,

packages/openai-adapters/src/apis/OpenAI.ts

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -348,12 +348,25 @@ export class OpenAIApi implements BaseLlmApi {
348348
if (!hasEmittedUsage) {
349349
const finalUsage = await stream.usage;
350350
if (finalUsage) {
351+
const promptTokens =
352+
typeof finalUsage.promptTokens === "number"
353+
? finalUsage.promptTokens
354+
: 0;
355+
const completionTokens =
356+
typeof finalUsage.completionTokens === "number"
357+
? finalUsage.completionTokens
358+
: 0;
359+
const totalTokens =
360+
typeof finalUsage.totalTokens === "number"
361+
? finalUsage.totalTokens
362+
: promptTokens + completionTokens;
363+
351364
yield usageChatChunk({
352365
model: modifiedBody.model,
353366
usage: {
354-
prompt_tokens: finalUsage.promptTokens,
355-
completion_tokens: finalUsage.completionTokens,
356-
total_tokens: finalUsage.totalTokens,
367+
prompt_tokens: promptTokens,
368+
completion_tokens: completionTokens,
369+
total_tokens: totalTokens,
357370
},
358371
});
359372
}

0 commit comments

Comments
 (0)