Skip to content

Commit 6e656f9

Browse files
continue[bot]nate
andcommitted
fix(openai-adapters): Remove token count validation in finish event handler
Removed the check that required tokens > 0 before emitting usage from finish event. The finish event should always emit usage if part.usage exists, even if tokens are legitimately 0. The fallback to stream.usage Promise now only triggers if: - No finish event is emitted, OR - Finish event exists but part.usage is undefined This fixes cases where finish event has valid 0 token counts. Co-authored-by: nate <[email protected]> Generated with [Continue](https://continue.dev)
1 parent 7d3fa6d commit 6e656f9

File tree

1 file changed

+24
-27
lines changed

1 file changed

+24
-27
lines changed

packages/openai-adapters/src/vercelStreamConverter.ts

Lines changed: 24 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ export function convertVercelStreamPart(
121121
});
122122

123123
case "finish":
124-
// Emit usage chunk at the end if usage data is present and valid
124+
// Emit usage chunk at the end if usage data is present
125125
if (part.usage) {
126126
const promptTokens =
127127
typeof part.usage.promptTokens === "number"
@@ -136,34 +136,31 @@ export function convertVercelStreamPart(
136136
? part.usage.totalTokens
137137
: promptTokens + completionTokens;
138138

139-
// Only emit usage chunk if we have meaningful token counts
140-
if (promptTokens > 0 || completionTokens > 0) {
141-
// Check for Anthropic-specific cache token details
142-
const promptTokensDetails =
143-
(part.usage as any).promptTokensDetails?.cachedTokens !== undefined
144-
? {
145-
cached_tokens:
146-
(part.usage as any).promptTokensDetails.cachedTokens ?? 0,
147-
cache_read_tokens:
148-
(part.usage as any).promptTokensDetails.cachedTokens ?? 0,
149-
cache_write_tokens: 0,
150-
}
151-
: undefined;
139+
// Check for Anthropic-specific cache token details
140+
const promptTokensDetails =
141+
(part.usage as any).promptTokensDetails?.cachedTokens !== undefined
142+
? {
143+
cached_tokens:
144+
(part.usage as any).promptTokensDetails.cachedTokens ?? 0,
145+
cache_read_tokens:
146+
(part.usage as any).promptTokensDetails.cachedTokens ?? 0,
147+
cache_write_tokens: 0,
148+
}
149+
: undefined;
152150

153-
return usageChatChunk({
154-
model,
155-
usage: {
156-
prompt_tokens: promptTokens,
157-
completion_tokens: completionTokens,
158-
total_tokens: totalTokens,
159-
...(promptTokensDetails
160-
? { prompt_tokens_details: promptTokensDetails as any }
161-
: {}),
162-
},
163-
});
164-
}
151+
return usageChatChunk({
152+
model,
153+
usage: {
154+
prompt_tokens: promptTokens,
155+
completion_tokens: completionTokens,
156+
total_tokens: totalTokens,
157+
...(promptTokensDetails
158+
? { prompt_tokens_details: promptTokensDetails as any }
159+
: {}),
160+
},
161+
});
165162
}
166-
// If no valid usage data, don't emit a usage chunk
163+
// If no usage data in finish event, return null
167164
return null;
168165

169166
case "error":

0 commit comments

Comments
 (0)