fix: UI fallbacks and Electron release workflow#811
Conversation
…permissions to electron-release.yml (#745, #761) - Use ProviderIcon for internal .png paths solving SVG provider 404 images (#745). - Add id-token: write and packages: write permissions to .github/workflows/electron-release.yml to fix permissions denied failure when calling the reusable workflow npm-publish.yml (#761). - Fix tests and ESM resolution for autoUpdate.ts override logic.
| if (toNumber(usage.thoughtsTokenCount, 0) > 0) { | ||
| (result.usage as JsonRecord).completion_tokens_details = { | ||
| reasoning_tokens: toNumber(usage.thoughtsTokenCount, 0), | ||
|
|
There was a problem hiding this comment.
WARNING: Incomplete source format translation - Only Claude format is handled in Phase 3. If sourceFormat is GEMINI, ANTIGRAVITY, or GEMINI_CLI, the intermediate OpenAI format will be returned without converting back to the original source format, potentially breaking clients expecting Gemini-format responses.
| @@ -0,0 +1,25 @@ | |||
| import { openaiToOpenAIResponsesRequest } from "./open-sse/translator/request/openai-responses.ts"; | |||
There was a problem hiding this comment.
WARNING: Debug/test file included in PR - This appears to be a development artifact. Consider removing these test files before merging.
| @@ -0,0 +1,36 @@ | |||
| import { getTargetFormat } from "./open-sse/services/provider.ts"; | |||
There was a problem hiding this comment.
WARNING: Debug/test file included in PR - This appears to be a development artifact. Consider removing these test files before merging.
| @@ -0,0 +1,51 @@ | |||
| import { translateRequest } from "./open-sse/translator/index.ts"; | |||
There was a problem hiding this comment.
WARNING: Debug/test file included in PR - This appears to be a development artifact. Consider removing these test files before merging.
Code Review SummaryStatus: 2 Issues Found | Recommendation: Address before merge Overview
Issue Details (click to expand)WARNING
Other Observations (not in diff)No issues found in unchanged code. Files Reviewed (11 files)
Positive Changes
|
There was a problem hiding this comment.
Code Review
This pull request improves agent workflows by implementing a two-step process for fetching GitHub issues and PRs to prevent data truncation. It refactors the non-streaming response translation logic to use an intermediate OpenAI format, enhances auto-update detection, and adds provider icons to the UI header. Feedback recommends adding error handling for JSON parsing in the translator and removing temporary test files included in the PR.
| id: toString(toolObj.id, `call_${Date.now()}`), | ||
| name: toString(fn.name), | ||
| input: | ||
| typeof fn.arguments === "string" ? JSON.parse(fn.arguments || "{}") : fn.arguments || {}, |
There was a problem hiding this comment.
The JSON.parse call here can throw an exception if fn.arguments is a string that is not valid JSON. This would cause the request to fail. It's safer to wrap this in a try-catch block to handle potential parsing errors gracefully.
| typeof fn.arguments === "string" ? JSON.parse(fn.arguments || "{}") : fn.arguments || {}, | |
| typeof fn.arguments === "string" ? (() => { try { return JSON.parse(fn.arguments || '{}'); } catch { return {}; } })() : fn.arguments || {} |
| import { openaiToOpenAIResponsesRequest } from "./open-sse/translator/request/openai-responses.ts"; | ||
|
|
||
| const root = { | ||
| model: "gpt-5.3-codex-xhigh", | ||
| messages: [ | ||
| { | ||
| role: "user", | ||
| content: [ | ||
| { | ||
| type: "text", | ||
| text: "<system-reminder>\nThe following skills are available...", | ||
| }, | ||
| ], | ||
| }, | ||
| ], | ||
| }; | ||
|
|
||
| try { | ||
| // Let's modify the file to actually export the function throwing or we can just copy the original logic. | ||
| // Actually, wait, let's just create a modified version of it here inline to see where it breaks. | ||
| const result = openaiToOpenAIResponsesRequest("gpt-5.3-codex-xhigh", root, true, null); | ||
| console.log("Result:", JSON.stringify(result, null, 2)); | ||
| } catch (e) { | ||
| console.error("Test Error:", e); | ||
| } |
| import { getTargetFormat } from "./open-sse/services/provider.ts"; | ||
| import { parseModelFromRequest, resolveProviderAndModel } from "./open-sse/handlers/chatCore.ts"; // Since they're in chatCore directly? | ||
| import { getProviderConfig } from "./open-sse/services/provider.ts"; | ||
|
|
||
| const body = { model: "codex/gpt-5.3-codex-xhigh" }; | ||
| const parsedModel = body.model; | ||
|
|
||
| function resolveProviderAndModel(rawModel, providerFromPath = "") { | ||
| let provider = providerFromPath; | ||
| let model = rawModel; | ||
| let resolvedAlias = null; | ||
|
|
||
| if (rawModel && rawModel.includes("/")) { | ||
| const parts = rawModel.split("/"); | ||
| provider = parts[0]; | ||
| model = parts.slice(1).join("/"); | ||
| } | ||
|
|
||
| return { provider, model, resolvedAlias: null }; | ||
| } | ||
|
|
||
| const { provider, model, resolvedAlias } = resolveProviderAndModel(parsedModel, ""); | ||
| const effectiveModel = resolvedAlias || model; | ||
|
|
||
| const config = getProviderConfig(provider); | ||
| const modelTargetFormat = config?.models?.find((m) => m.id === effectiveModel)?.targetFormat; | ||
| const targetFormat = modelTargetFormat || getTargetFormat(provider); | ||
|
|
||
| console.log({ | ||
| provider, | ||
| model, | ||
| resolvedAlias, | ||
| effectiveModel, | ||
| modelTargetFormat, | ||
| targetFormat, | ||
| }); |
| import { translateRequest } from "./open-sse/translator/index.ts"; | ||
| import { FORMATS } from "./open-sse/translator/formats.ts"; | ||
| import { CodexExecutor } from "./open-sse/executors/codex.ts"; | ||
|
|
||
| const claudeCodeRequest = { | ||
| model: "codex/gpt-5.3-codex-xhigh", | ||
| messages: [ | ||
| { | ||
| role: "user", | ||
| content: [ | ||
| { | ||
| type: "text", | ||
| text: "What time is it?", | ||
| }, | ||
| ], | ||
| }, | ||
| ], | ||
| system: "Test system prompt", | ||
| tools: [ | ||
| { | ||
| name: "get_time", | ||
| description: "Get the time", | ||
| input_schema: { | ||
| type: "object", | ||
| properties: { timezone: { type: "string" } }, | ||
| }, | ||
| }, | ||
| ], | ||
| }; | ||
|
|
||
| try { | ||
| const result = translateRequest( | ||
| FORMATS.CLAUDE, | ||
| FORMATS.OPENAI_RESPONSES, | ||
| "gpt-5.3-codex-xhigh", | ||
| claudeCodeRequest, | ||
| true, // stream | ||
| null, // credentials | ||
| "codex", // provider | ||
| null, // reqLogger | ||
| { normalizeToolCallId: false, preserveDeveloperRole: true } | ||
| ); | ||
|
|
||
| const exec = new CodexExecutor(); | ||
| const finalBody = exec.transformRequest("gpt-5.3-codex-xhigh", result, true, {}); | ||
|
|
||
| console.log("FINAL BODY:", JSON.stringify(finalBody, null, 2)); | ||
| } catch (err) { | ||
| console.error("ERROR:"); | ||
| console.error(err); | ||
| } |
There was a problem hiding this comment.
💡 Codex Review
Here are some automated review suggestions for this pull request.
Reviewed commit: 5ad687c6d8
ℹ️ About Codex in GitHub
Your team has set up Codex to review pull requests in this repo. Reviews are triggered when you
- Open a pull request for review
- Mark a draft as ready
- Comment "@codex review".
If Codex has suggestions, it will comment; otherwise it will react with 👍.
Codex can also answer questions or update the PR. Try commenting "@codex address that feedback".
| if (sourceFormat === FORMATS.CLAUDE && sourceFormat !== targetFormat) { | ||
| return convertOpenAINonStreamingToClaude(toRecord(intermediateOpenAI)); |
There was a problem hiding this comment.
Skip Claude back-conversion before OpenAI sanitization
This new branch returns a Claude-shaped payload whenever sourceFormat === FORMATS.CLAUDE, but the non-streaming path still sanitizes based on targetFormat in chatCore (sanitizeOpenAIResponse runs for OpenAI/OpenAI Responses targets). In that flow, the Claude object is treated as OpenAI and collapsed to an empty choices array, so /v1/messages clients can lose the assistant message entirely when routed to OpenAI/Codex-style providers. Keep the payload in OpenAI shape until after sanitization (or gate sanitization by output/source format).
Useful? React with 👍 / 👎.
| @@ -0,0 +1,36 @@ | |||
| import { getTargetFormat } from "./open-sse/services/provider.ts"; | |||
| import { parseModelFromRequest, resolveProviderAndModel } from "./open-sse/handlers/chatCore.ts"; // Since they're in chatCore directly? | |||
There was a problem hiding this comment.
Remove debug TypeScript file with invalid imports
This committed debug script is included by the root TypeScript program (tsconfig.json includes **/*.ts) and imports parseModelFromRequest/resolveProviderAndModel from chatCore.ts, but those exports do not exist. That introduces TS2305 build/typecheck failures in CI/release pipelines even though the file is not production code; it should be removed from the repo or excluded from compilation.
Useful? React with 👍 / 👎.
| id: toString(toolObj.id, `call_${Date.now()}`), | ||
| name: toString(fn.name), | ||
| input: | ||
| typeof fn.arguments === "string" ? JSON.parse(fn.arguments || "{}") : fn.arguments || {}, |
There was a problem hiding this comment.
Guard tool argument parsing when converting to Claude
The new converter unconditionally does JSON.parse(fn.arguments). If a provider returns malformed/non-JSON tool arguments (which can happen in real tool-call outputs), this throws and fails the entire non-streaming response translation with a 500. The converter should catch parse errors and fall back to a safe object/string instead of crashing the request.
Useful? React with 👍 / 👎.
There was a problem hiding this comment.
Pull request overview
Improves UI/provider icon fallbacks and adjusts update/release plumbing to address missing provider images, broken “Update Now” behavior for source installs, and missing Electron assets in releases.
Changes:
- Update auto-update mode detection to treat local/source installs as non-auto-updatable (“source”) and adjust unit test expectations.
- Improve provider breadcrumb icon rendering by using
ProviderIconinstead of hard-coded/providers/*.pngpaths. - Extend non-streaming response translation to support OpenAI→Claude conversion, and tweak Electron release workflow permissions.
Reviewed changes
Copilot reviewed 10 out of 10 changed files in this pull request and generated 7 comments.
Show a summary per file
| File | Description |
|---|---|
src/lib/system/autoUpdate.ts |
Adds heuristics to detect source installs and disables auto-update for them. |
tests/unit/auto-update-runtime.test.mjs |
Loosens default-mode assertion to allow npm or source depending on runtime context. |
src/shared/components/Header.tsx |
Switches provider breadcrumbs from static images to ProviderIcon rendering. |
open-sse/handlers/responseTranslator.ts |
Refactors non-streaming translation into an OpenAI intermediate and adds OpenAI→Claude conversion helper. |
.github/workflows/electron-release.yml |
Adds additional GitHub token permissions for the Electron release workflow. |
test_translator.ts |
Adds a repo-root debugging script for request translation/executor inspection. |
test_target_format.ts |
Adds a repo-root debugging script for provider targetFormat inspection. |
test_exception.ts |
Adds a repo-root debugging script for reproducing a translator exception. |
.agents/workflows/review-prs.md |
Updates internal agent workflow docs to avoid truncated gh pr list JSON output. |
.agents/workflows/resolve-issues.md |
Updates internal agent workflow docs to avoid truncated gh issue list JSON output. |
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
| import { translateRequest } from "./open-sse/translator/index.ts"; | ||
| import { FORMATS } from "./open-sse/translator/formats.ts"; | ||
| import { CodexExecutor } from "./open-sse/executors/codex.ts"; | ||
|
|
||
| const claudeCodeRequest = { | ||
| model: "codex/gpt-5.3-codex-xhigh", | ||
| messages: [ | ||
| { | ||
| role: "user", | ||
| content: [ | ||
| { | ||
| type: "text", |
There was a problem hiding this comment.
This appears to be a local/manual debugging script, but it will be picked up by tsconfig.json (**/*.ts) and by eslint . because it’s at repo root and not in the ignore list. That can introduce CI failures (unused imports, Node-only code, accidental execution assumptions). Please remove it from the PR, or relocate it under an ignored folder (e.g. scripts/) and ensure it’s excluded from lint/typecheck.
| import { openaiToOpenAIResponsesRequest } from "./open-sse/translator/request/openai-responses.ts"; | ||
|
|
||
| const root = { | ||
| model: "gpt-5.3-codex-xhigh", | ||
| messages: [ | ||
| { | ||
| role: "user", | ||
| content: [ | ||
| { | ||
| type: "text", | ||
| text: "<system-reminder>\nThe following skills are available...", | ||
| }, | ||
| ], | ||
| }, | ||
| ], | ||
| }; | ||
|
|
||
| try { | ||
| // Let's modify the file to actually export the function throwing or we can just copy the original logic. | ||
| // Actually, wait, let's just create a modified version of it here inline to see where it breaks. | ||
| const result = openaiToOpenAIResponsesRequest("gpt-5.3-codex-xhigh", root, true, null); | ||
| console.log("Result:", JSON.stringify(result, null, 2)); | ||
| } catch (e) { | ||
| console.error("Test Error:", e); | ||
| } |
There was a problem hiding this comment.
This file is a one-off debugging harness and will be included by tsconfig.json (**/*.ts) and eslint . since it lives at repo root. That makes it easy for CI/lint/typecheck to start failing due to unused imports or Node-only assumptions. Please remove it from the PR or move it under an ignored directory (e.g. scripts/).
| import { openaiToOpenAIResponsesRequest } from "./open-sse/translator/request/openai-responses.ts"; | |
| const root = { | |
| model: "gpt-5.3-codex-xhigh", | |
| messages: [ | |
| { | |
| role: "user", | |
| content: [ | |
| { | |
| type: "text", | |
| text: "<system-reminder>\nThe following skills are available...", | |
| }, | |
| ], | |
| }, | |
| ], | |
| }; | |
| try { | |
| // Let's modify the file to actually export the function throwing or we can just copy the original logic. | |
| // Actually, wait, let's just create a modified version of it here inline to see where it breaks. | |
| const result = openaiToOpenAIResponsesRequest("gpt-5.3-codex-xhigh", root, true, null); | |
| console.log("Result:", JSON.stringify(result, null, 2)); | |
| } catch (e) { | |
| console.error("Test Error:", e); | |
| } | |
| // Intentionally left blank. | |
| // This file previously contained a one-off debugging harness and was | |
| // removed to avoid inclusion in CI, lint, and type checking. |
| id-token: write | ||
| packages: write | ||
|
|
There was a problem hiding this comment.
The workflow grants id-token: write and packages: write, but this job only checks out code, builds artifacts, creates a GitHub release, and calls the reusable npm publish workflow. There’s no OIDC usage (no cloud auth) and no GitHub Packages publishing here, so these extra permissions increase blast radius unnecessarily. Consider removing them (or scoping them to only the job/step that truly needs them).
| id-token: write | |
| packages: write |
| let mode = normalizeMode(env.AUTO_UPDATE_MODE); | ||
| if (mode === "npm") { | ||
| const fs = require("node:fs"); | ||
| if (fs.existsSync(path.join(process.cwd(), ".git"))) { | ||
| const isGitRepo = existsSync(path.join(process.cwd(), ".git")); | ||
| const currentDir = typeof __dirname !== "undefined" ? __dirname : process.cwd(); | ||
| const isGlobalNodeModules = currentDir.includes("node_modules"); | ||
|
|
||
| // If we are not in a global node_modules directory, we are likely a local source install/build. | ||
| // Even if .git is missing (downloaded zip), we should treat it as source. | ||
| if (isGitRepo || !isGlobalNodeModules) { | ||
| mode = "source" as any; | ||
| } |
There was a problem hiding this comment.
getAutoUpdateConfig() sets mode = "source" as any, but AutoUpdateMode is currently only "npm" | "docker-compose". This defeats type-safety and leaks into AutoUpdateConfig.mode / AutoUpdateLaunchResult.channel. Add "source" to the AutoUpdateMode union (and update normalizeMode() accordingly) so the new mode is fully typed and the as any casts can be removed.
| type: "tool_use", | ||
| id: toString(toolObj.id, `call_${Date.now()}`), | ||
| name: toString(fn.name), | ||
| input: | ||
| typeof fn.arguments === "string" ? JSON.parse(fn.arguments || "{}") : fn.arguments || {}, | ||
| }); |
There was a problem hiding this comment.
convertOpenAINonStreamingToClaude() uses JSON.parse(fn.arguments || "{}") directly. If fn.arguments is not valid JSON (or is already a non-JSON string), this will throw and break response translation. Use a safe JSON parse (fall back to {} on errors) similar to the existing parsing approach in open-sse/utils/streamPayloadCollector.ts (tryParseJson).
| // Phase 3: Translate from OpenAI back to Client Source format | ||
| if (sourceFormat === FORMATS.CLAUDE && sourceFormat !== targetFormat) { | ||
| return convertOpenAINonStreamingToClaude(toRecord(intermediateOpenAI)); | ||
| } |
There was a problem hiding this comment.
New behavior translates non-streaming responses from OpenAI (or other targets translated into OpenAI) back into Claude when sourceFormat === FORMATS.CLAUDE. There are existing unit tests for translateNonStreamingResponse in tests/unit/plan3-p0.test.mjs, but none covering this new OpenAI→Claude non-streaming conversion path (including tool_calls argument parsing and finish_reason mapping). Add a focused unit test to prevent regressions.
| import { parseModelFromRequest, resolveProviderAndModel } from "./open-sse/handlers/chatCore.ts"; // Since they're in chatCore directly? | ||
| import { getProviderConfig } from "./open-sse/services/provider.ts"; | ||
|
|
||
| const body = { model: "codex/gpt-5.3-codex-xhigh" }; | ||
| const parsedModel = body.model; | ||
|
|
||
| function resolveProviderAndModel(rawModel, providerFromPath = "") { | ||
| let provider = providerFromPath; | ||
| let model = rawModel; | ||
| let resolvedAlias = null; | ||
|
|
||
| if (rawModel && rawModel.includes("/")) { | ||
| const parts = rawModel.split("/"); | ||
| provider = parts[0]; | ||
| model = parts.slice(1).join("/"); | ||
| } | ||
|
|
||
| return { provider, model, resolvedAlias: null }; | ||
| } | ||
|
|
There was a problem hiding this comment.
This file looks like an ad-hoc debugging script, but it’s included by tsconfig.json (**/*.ts) and not ignored by ESLint, so it can break npm run lint / editor typechecking. It also imports resolveProviderAndModel and then redeclares a function with the same name, which will cause a duplicate identifier error. Please remove this file from the repo, or move it under an ignored directory (e.g. scripts/) and fix the name collision/unusued imports.
| import { parseModelFromRequest, resolveProviderAndModel } from "./open-sse/handlers/chatCore.ts"; // Since they're in chatCore directly? | |
| import { getProviderConfig } from "./open-sse/services/provider.ts"; | |
| const body = { model: "codex/gpt-5.3-codex-xhigh" }; | |
| const parsedModel = body.model; | |
| function resolveProviderAndModel(rawModel, providerFromPath = "") { | |
| let provider = providerFromPath; | |
| let model = rawModel; | |
| let resolvedAlias = null; | |
| if (rawModel && rawModel.includes("/")) { | |
| const parts = rawModel.split("/"); | |
| provider = parts[0]; | |
| model = parts.slice(1).join("/"); | |
| } | |
| return { provider, model, resolvedAlias: null }; | |
| } | |
| import { resolveProviderAndModel } from "./open-sse/handlers/chatCore.ts"; // Since they're in chatCore directly? | |
| import { getProviderConfig } from "./open-sse/services/provider.ts"; | |
| const body = { model: "codex/gpt-5.3-codex-xhigh" }; | |
| const parsedModel = body.model; |
…nstraints and Codex non-streaming integration
Closes #745, Closes #761, Closes #743