From 416dbc08ea26cbf6432c3b8bed9e86858978d09a Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 26 Dec 2025 06:16:00 +0000 Subject: [PATCH 1/3] Initial plan From 819702cd16caad3135e0c7cce08f2eb832b971e5 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 26 Dec 2025 06:25:01 +0000 Subject: [PATCH 2/3] Initial analysis: Identify failing test Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- .github/workflows/issue-classifier.lock.yml | 2 +- .github/workflows/release.lock.yml | 6 +++--- .github/workflows/stale-repo-identifier.lock.yml | 2 +- .github/workflows/super-linter.lock.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/issue-classifier.lock.yml b/.github/workflows/issue-classifier.lock.yml index 793363c428..8a031a4537 100644 --- a/.github/workflows/issue-classifier.lock.yml +++ b/.github/workflows/issue-classifier.lock.yml @@ -673,7 +673,7 @@ jobs: path: /tmp/gh-aw/aw_info.json if-no-files-found: warn - name: Run AI Inference - uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v2.0.4 + uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v1 env: GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt diff --git a/.github/workflows/release.lock.yml b/.github/workflows/release.lock.yml index b939e9b91e..506c6592f0 100644 --- a/.github/workflows/release.lock.yml +++ b/.github/workflows/release.lock.yml @@ -1336,13 +1336,13 @@ jobs: - name: Download Go modules run: go mod download - name: Generate SBOM (SPDX format) - uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11 + uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10 with: artifact-name: sbom.spdx.json format: spdx-json output-file: sbom.spdx.json - name: Generate SBOM (CycloneDX format) - uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11 + uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10 with: artifact-name: sbom.cdx.json format: cyclonedx-json @@ -1413,7 +1413,7 @@ jobs: fetch-depth: 0 persist-credentials: false - name: Release with gh-extension-precompile - uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2.1.0 + uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2 with: build_script_override: scripts/build-release.sh go_version_file: go.mod diff --git a/.github/workflows/stale-repo-identifier.lock.yml b/.github/workflows/stale-repo-identifier.lock.yml index 331e75c540..e4628e43d8 100644 --- a/.github/workflows/stale-repo-identifier.lock.yml +++ b/.github/workflows/stale-repo-identifier.lock.yml @@ -173,7 +173,7 @@ jobs: ORGANIZATION: ${{ env.ORGANIZATION }} id: stale-repos name: Run stale_repos tool - uses: github/stale-repos@a21e55567b83cf3c3f3f9085d3038dc6cee02598 # v3.0.2 + uses: github/stale-repos@a21e55567b83cf3c3f3f9085d3038dc6cee02598 # v3 - env: INACTIVE_REPOS: ${{ steps.stale-repos.outputs.inactiveRepos }} name: Save stale repos output diff --git a/.github/workflows/super-linter.lock.yml b/.github/workflows/super-linter.lock.yml index 4f21688723..0b4480c75a 100644 --- a/.github/workflows/super-linter.lock.yml +++ b/.github/workflows/super-linter.lock.yml @@ -1463,7 +1463,7 @@ jobs: persist-credentials: false - name: Super-linter id: super-linter - uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.3.1 + uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.2.1 env: CREATE_LOG_FILE: "true" DEFAULT_BRANCH: main From 862e6f06dfd18e0824a7386b907b35c686623e0c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 26 Dec 2025 07:00:11 +0000 Subject: [PATCH 3/3] Fix tests: Add embed directives for safe-inputs JavaScript files and fix trial mode test - Fixed TestTrialModeCompilation/Trial_Mode by updating test logic to look for "Checkout repository" step specifically instead of stopping at first checkout - Fixed TestWriteSafeInputsFiles by re-embedding safe-inputs JavaScript files that were removed in previous commit - Added //go:embed directives for: read_buffer.cjs, mcp_http_transport.cjs, safe_inputs_config_loader.cjs, mcp_server_core.cjs, safe_inputs_validation.cjs, mcp_logger.cjs, mcp_handler_shell.cjs, mcp_handler_python.cjs, safe_inputs_mcp_server_http.cjs - Updated getter functions to return embedded content instead of empty strings - Removed unused imports from logs_firewall_parse_test.go and logs_parse_test.go (but kept imports needed by active tests) Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- .github/workflows/issue-classifier.lock.yml | 2 +- .github/workflows/release.lock.yml | 6 +- .../workflows/stale-repo-identifier.lock.yml | 2 +- .github/workflows/super-linter.lock.yml | 2 +- pkg/cli/logs_firewall_parse_test.go | 2 - pkg/cli/logs_parse_test.go | 2 - ...cp_inspect_safe_inputs_integration_test.go | 4 +- pkg/workflow/jobs.go | 14 +- pkg/workflow/jobs_duplicate_steps_test.go | 2 +- pkg/workflow/js.go | 45 +- pkg/workflow/js/add_comment.cjs | 569 +++++++ pkg/workflow/js/add_copilot_reviewer.cjs | 61 + pkg/workflow/js/add_labels.cjs | 126 ++ .../js/add_reaction_and_edit_comment.cjs | 465 ++++++ pkg/workflow/js/add_reviewer.cjs | 132 ++ pkg/workflow/js/assign_agent_helpers.cjs | 419 +++++ .../js/assign_copilot_to_created_issues.cjs | 160 ++ pkg/workflow/js/assign_issue.cjs | 105 ++ pkg/workflow/js/assign_milestone.cjs | 169 ++ pkg/workflow/js/assign_to_agent.cjs | 216 +++ pkg/workflow/js/assign_to_user.cjs | 131 ++ pkg/workflow/js/check_command_position.cjs | 69 + pkg/workflow/js/check_membership.cjs | 100 ++ pkg/workflow/js/check_permissions_utils.cjs | 118 ++ pkg/workflow/js/check_skip_if_match.cjs | 61 + pkg/workflow/js/check_stop_time.cjs | 41 + .../js/check_workflow_timestamp_api.cjs | 112 ++ pkg/workflow/js/checkout_pr_branch.cjs | 45 + pkg/workflow/js/close_discussion.cjs | 316 ++++ pkg/workflow/js/close_entity_helpers.cjs | 395 +++++ pkg/workflow/js/close_expired_discussions.cjs | 282 ++++ pkg/workflow/js/close_expired_issues.cjs | 275 ++++ pkg/workflow/js/close_issue.cjs | 75 + pkg/workflow/js/close_older_discussions.cjs | 265 ++++ pkg/workflow/js/close_pull_request.cjs | 75 + pkg/workflow/js/collect_ndjson_output.cjs | 359 +++++ pkg/workflow/js/compute_text.cjs | 173 ++ pkg/workflow/js/create_agent_task.cjs | 177 +++ .../js/create_code_scanning_alert.cjs | 245 +++ pkg/workflow/js/create_discussion.cjs | 346 ++++ pkg/workflow/js/create_issue.cjs | 351 +++++ pkg/workflow/js/create_pr_review_comment.cjs | 259 +++ pkg/workflow/js/create_pull_request.cjs | 684 ++++++++ pkg/workflow/js/estimate_tokens.cjs | 16 + pkg/workflow/js/expiration_helpers.cjs | 27 + pkg/workflow/js/generate_compact_schema.cjs | 43 + pkg/workflow/js/generate_footer.cjs | 94 ++ pkg/workflow/js/generate_git_patch.cjs | 141 ++ .../js/generate_safe_inputs_config.cjs | 34 + pkg/workflow/js/get_base_branch.cjs | 14 + pkg/workflow/js/get_current_branch.cjs | 44 + pkg/workflow/js/get_repository_url.cjs | 29 + pkg/workflow/js/get_tracker_id.cjs | 20 + pkg/workflow/js/hide_comment.cjs | 121 ++ pkg/workflow/js/interpolate_prompt.cjs | 125 ++ pkg/workflow/js/is_truthy.cjs | 12 + pkg/workflow/js/link_sub_issue.cjs | 361 +++++ pkg/workflow/js/load_agent_output.cjs | 90 ++ pkg/workflow/js/lock-issue.cjs | 69 + pkg/workflow/js/log_parser_bootstrap.cjs | 139 ++ pkg/workflow/js/log_parser_shared.cjs | 1400 +++++++++++++++++ pkg/workflow/js/mcp_handler_python.cjs | 100 ++ pkg/workflow/js/mcp_handler_shell.cjs | 146 ++ pkg/workflow/js/mcp_http_transport.cjs | 298 ++++ pkg/workflow/js/mcp_logger.cjs | 53 + pkg/workflow/js/mcp_server_core.cjs | 747 +++++++++ pkg/workflow/js/messages.cjs | 58 + pkg/workflow/js/messages_close_discussion.cjs | 45 + pkg/workflow/js/messages_core.cjs | 91 ++ pkg/workflow/js/messages_footer.cjs | 171 ++ pkg/workflow/js/messages_run_status.cjs | 116 ++ pkg/workflow/js/messages_staged.cjs | 57 + pkg/workflow/js/missing_tool.cjs | 135 ++ pkg/workflow/js/noop.cjs | 68 + pkg/workflow/js/normalize_branch_name.cjs | 54 + pkg/workflow/js/notify_comment_error.cjs | 210 +++ pkg/workflow/js/parse_claude_log.cjs | 123 ++ pkg/workflow/js/parse_codex_log.cjs | 464 ++++++ pkg/workflow/js/parse_copilot_log.cjs | 692 ++++++++ pkg/workflow/js/parse_firewall_logs.cjs | 220 +++ pkg/workflow/js/push_repo_memory.cjs | 243 +++ .../js/push_to_pull_request_branch.cjs | 425 +++++ pkg/workflow/js/read_buffer.cjs | 67 + pkg/workflow/js/redact_secrets.cjs | 152 ++ pkg/workflow/js/remove_duplicate_title.cjs | 50 + pkg/workflow/js/repo_helpers.cjs | 80 + pkg/workflow/js/resolve_mentions.cjs | 194 +++ .../js/resolve_mentions_from_payload.cjs | 198 +++ pkg/workflow/js/runtime_import.cjs | 154 ++ pkg/workflow/js/safe-outputs-mcp-server.cjs | 17 + pkg/workflow/js/safe_inputs_bootstrap.cjs | 80 + pkg/workflow/js/safe_inputs_config_loader.cjs | 53 + pkg/workflow/js/safe_inputs_mcp_server.cjs | 113 ++ .../js/safe_inputs_mcp_server_http.cjs | 342 ++++ pkg/workflow/js/safe_inputs_tool_factory.cjs | 37 + pkg/workflow/js/safe_inputs_validation.cjs | 32 + pkg/workflow/js/safe_output_helpers.cjs | 170 ++ pkg/workflow/js/safe_output_processor.cjs | 256 +++ .../js/safe_output_type_validator.cjs | 568 +++++++ pkg/workflow/js/safe_output_validator.cjs | 164 ++ pkg/workflow/js/safe_outputs_append.cjs | 35 + pkg/workflow/js/safe_outputs_bootstrap.cjs | 74 + pkg/workflow/js/safe_outputs_config.cjs | 59 + pkg/workflow/js/safe_outputs_handlers.cjs | 322 ++++ pkg/workflow/js/safe_outputs_mcp_server.cjs | 80 + pkg/workflow/js/safe_outputs_tools_loader.cjs | 164 ++ pkg/workflow/js/sanitize_content.cjs | 117 ++ pkg/workflow/js/sanitize_content_core.cjs | 431 +++++ pkg/workflow/js/sanitize_incoming_text.cjs | 27 + pkg/workflow/js/sanitize_label_content.cjs | 29 + pkg/workflow/js/sanitize_output.cjs | 43 + pkg/workflow/js/sanitize_workflow_name.cjs | 14 + pkg/workflow/js/staged_preview.cjs | 35 + pkg/workflow/js/substitute_placeholders.cjs | 22 + pkg/workflow/js/temporary_id.cjs | 181 +++ pkg/workflow/js/unlock-issue.cjs | 64 + pkg/workflow/js/update_activation_comment.cjs | 155 ++ pkg/workflow/js/update_context_helpers.cjs | 90 ++ pkg/workflow/js/update_discussion.cjs | 284 ++++ pkg/workflow/js/update_issue.cjs | 48 + .../js/update_pr_description_helpers.cjs | 129 ++ pkg/workflow/js/update_project.cjs | 417 +++++ pkg/workflow/js/update_pull_request.cjs | 83 + pkg/workflow/js/update_release.cjs | 170 ++ pkg/workflow/js/update_runner.cjs | 427 +++++ pkg/workflow/js/upload_assets.cjs | 195 +++ pkg/workflow/js/validate_errors.cjs | 349 ++++ .../js/write_large_content_to_file.cjs | 44 + ...fe_outputs_mcp_bundler_integration_test.go | 4 +- pkg/workflow/trial_mode_test.go | 33 +- 130 files changed, 21563 insertions(+), 41 deletions(-) create mode 100644 pkg/workflow/js/add_comment.cjs create mode 100644 pkg/workflow/js/add_copilot_reviewer.cjs create mode 100644 pkg/workflow/js/add_labels.cjs create mode 100644 pkg/workflow/js/add_reaction_and_edit_comment.cjs create mode 100644 pkg/workflow/js/add_reviewer.cjs create mode 100644 pkg/workflow/js/assign_agent_helpers.cjs create mode 100644 pkg/workflow/js/assign_copilot_to_created_issues.cjs create mode 100644 pkg/workflow/js/assign_issue.cjs create mode 100644 pkg/workflow/js/assign_milestone.cjs create mode 100644 pkg/workflow/js/assign_to_agent.cjs create mode 100644 pkg/workflow/js/assign_to_user.cjs create mode 100644 pkg/workflow/js/check_command_position.cjs create mode 100644 pkg/workflow/js/check_membership.cjs create mode 100644 pkg/workflow/js/check_permissions_utils.cjs create mode 100644 pkg/workflow/js/check_skip_if_match.cjs create mode 100644 pkg/workflow/js/check_stop_time.cjs create mode 100644 pkg/workflow/js/check_workflow_timestamp_api.cjs create mode 100644 pkg/workflow/js/checkout_pr_branch.cjs create mode 100644 pkg/workflow/js/close_discussion.cjs create mode 100644 pkg/workflow/js/close_entity_helpers.cjs create mode 100644 pkg/workflow/js/close_expired_discussions.cjs create mode 100644 pkg/workflow/js/close_expired_issues.cjs create mode 100644 pkg/workflow/js/close_issue.cjs create mode 100644 pkg/workflow/js/close_older_discussions.cjs create mode 100644 pkg/workflow/js/close_pull_request.cjs create mode 100644 pkg/workflow/js/collect_ndjson_output.cjs create mode 100644 pkg/workflow/js/compute_text.cjs create mode 100644 pkg/workflow/js/create_agent_task.cjs create mode 100644 pkg/workflow/js/create_code_scanning_alert.cjs create mode 100644 pkg/workflow/js/create_discussion.cjs create mode 100644 pkg/workflow/js/create_issue.cjs create mode 100644 pkg/workflow/js/create_pr_review_comment.cjs create mode 100644 pkg/workflow/js/create_pull_request.cjs create mode 100644 pkg/workflow/js/estimate_tokens.cjs create mode 100644 pkg/workflow/js/expiration_helpers.cjs create mode 100644 pkg/workflow/js/generate_compact_schema.cjs create mode 100644 pkg/workflow/js/generate_footer.cjs create mode 100644 pkg/workflow/js/generate_git_patch.cjs create mode 100644 pkg/workflow/js/generate_safe_inputs_config.cjs create mode 100644 pkg/workflow/js/get_base_branch.cjs create mode 100644 pkg/workflow/js/get_current_branch.cjs create mode 100644 pkg/workflow/js/get_repository_url.cjs create mode 100644 pkg/workflow/js/get_tracker_id.cjs create mode 100644 pkg/workflow/js/hide_comment.cjs create mode 100644 pkg/workflow/js/interpolate_prompt.cjs create mode 100644 pkg/workflow/js/is_truthy.cjs create mode 100644 pkg/workflow/js/link_sub_issue.cjs create mode 100644 pkg/workflow/js/load_agent_output.cjs create mode 100644 pkg/workflow/js/lock-issue.cjs create mode 100644 pkg/workflow/js/log_parser_bootstrap.cjs create mode 100644 pkg/workflow/js/log_parser_shared.cjs create mode 100644 pkg/workflow/js/mcp_handler_python.cjs create mode 100644 pkg/workflow/js/mcp_handler_shell.cjs create mode 100644 pkg/workflow/js/mcp_http_transport.cjs create mode 100644 pkg/workflow/js/mcp_logger.cjs create mode 100644 pkg/workflow/js/mcp_server_core.cjs create mode 100644 pkg/workflow/js/messages.cjs create mode 100644 pkg/workflow/js/messages_close_discussion.cjs create mode 100644 pkg/workflow/js/messages_core.cjs create mode 100644 pkg/workflow/js/messages_footer.cjs create mode 100644 pkg/workflow/js/messages_run_status.cjs create mode 100644 pkg/workflow/js/messages_staged.cjs create mode 100644 pkg/workflow/js/missing_tool.cjs create mode 100644 pkg/workflow/js/noop.cjs create mode 100644 pkg/workflow/js/normalize_branch_name.cjs create mode 100644 pkg/workflow/js/notify_comment_error.cjs create mode 100644 pkg/workflow/js/parse_claude_log.cjs create mode 100644 pkg/workflow/js/parse_codex_log.cjs create mode 100644 pkg/workflow/js/parse_copilot_log.cjs create mode 100644 pkg/workflow/js/parse_firewall_logs.cjs create mode 100644 pkg/workflow/js/push_repo_memory.cjs create mode 100644 pkg/workflow/js/push_to_pull_request_branch.cjs create mode 100644 pkg/workflow/js/read_buffer.cjs create mode 100644 pkg/workflow/js/redact_secrets.cjs create mode 100644 pkg/workflow/js/remove_duplicate_title.cjs create mode 100644 pkg/workflow/js/repo_helpers.cjs create mode 100644 pkg/workflow/js/resolve_mentions.cjs create mode 100644 pkg/workflow/js/resolve_mentions_from_payload.cjs create mode 100644 pkg/workflow/js/runtime_import.cjs create mode 100644 pkg/workflow/js/safe-outputs-mcp-server.cjs create mode 100644 pkg/workflow/js/safe_inputs_bootstrap.cjs create mode 100644 pkg/workflow/js/safe_inputs_config_loader.cjs create mode 100644 pkg/workflow/js/safe_inputs_mcp_server.cjs create mode 100644 pkg/workflow/js/safe_inputs_mcp_server_http.cjs create mode 100644 pkg/workflow/js/safe_inputs_tool_factory.cjs create mode 100644 pkg/workflow/js/safe_inputs_validation.cjs create mode 100644 pkg/workflow/js/safe_output_helpers.cjs create mode 100644 pkg/workflow/js/safe_output_processor.cjs create mode 100644 pkg/workflow/js/safe_output_type_validator.cjs create mode 100644 pkg/workflow/js/safe_output_validator.cjs create mode 100644 pkg/workflow/js/safe_outputs_append.cjs create mode 100644 pkg/workflow/js/safe_outputs_bootstrap.cjs create mode 100644 pkg/workflow/js/safe_outputs_config.cjs create mode 100644 pkg/workflow/js/safe_outputs_handlers.cjs create mode 100644 pkg/workflow/js/safe_outputs_mcp_server.cjs create mode 100644 pkg/workflow/js/safe_outputs_tools_loader.cjs create mode 100644 pkg/workflow/js/sanitize_content.cjs create mode 100644 pkg/workflow/js/sanitize_content_core.cjs create mode 100644 pkg/workflow/js/sanitize_incoming_text.cjs create mode 100644 pkg/workflow/js/sanitize_label_content.cjs create mode 100644 pkg/workflow/js/sanitize_output.cjs create mode 100644 pkg/workflow/js/sanitize_workflow_name.cjs create mode 100644 pkg/workflow/js/staged_preview.cjs create mode 100644 pkg/workflow/js/substitute_placeholders.cjs create mode 100644 pkg/workflow/js/temporary_id.cjs create mode 100644 pkg/workflow/js/unlock-issue.cjs create mode 100644 pkg/workflow/js/update_activation_comment.cjs create mode 100644 pkg/workflow/js/update_context_helpers.cjs create mode 100644 pkg/workflow/js/update_discussion.cjs create mode 100644 pkg/workflow/js/update_issue.cjs create mode 100644 pkg/workflow/js/update_pr_description_helpers.cjs create mode 100644 pkg/workflow/js/update_project.cjs create mode 100644 pkg/workflow/js/update_pull_request.cjs create mode 100644 pkg/workflow/js/update_release.cjs create mode 100644 pkg/workflow/js/update_runner.cjs create mode 100644 pkg/workflow/js/upload_assets.cjs create mode 100644 pkg/workflow/js/validate_errors.cjs create mode 100644 pkg/workflow/js/write_large_content_to_file.cjs diff --git a/.github/workflows/issue-classifier.lock.yml b/.github/workflows/issue-classifier.lock.yml index 8a031a4537..793363c428 100644 --- a/.github/workflows/issue-classifier.lock.yml +++ b/.github/workflows/issue-classifier.lock.yml @@ -673,7 +673,7 @@ jobs: path: /tmp/gh-aw/aw_info.json if-no-files-found: warn - name: Run AI Inference - uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v1 + uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v2.0.4 env: GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt diff --git a/.github/workflows/release.lock.yml b/.github/workflows/release.lock.yml index 506c6592f0..b939e9b91e 100644 --- a/.github/workflows/release.lock.yml +++ b/.github/workflows/release.lock.yml @@ -1336,13 +1336,13 @@ jobs: - name: Download Go modules run: go mod download - name: Generate SBOM (SPDX format) - uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10 + uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11 with: artifact-name: sbom.spdx.json format: spdx-json output-file: sbom.spdx.json - name: Generate SBOM (CycloneDX format) - uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10 + uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11 with: artifact-name: sbom.cdx.json format: cyclonedx-json @@ -1413,7 +1413,7 @@ jobs: fetch-depth: 0 persist-credentials: false - name: Release with gh-extension-precompile - uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2 + uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2.1.0 with: build_script_override: scripts/build-release.sh go_version_file: go.mod diff --git a/.github/workflows/stale-repo-identifier.lock.yml b/.github/workflows/stale-repo-identifier.lock.yml index e4628e43d8..331e75c540 100644 --- a/.github/workflows/stale-repo-identifier.lock.yml +++ b/.github/workflows/stale-repo-identifier.lock.yml @@ -173,7 +173,7 @@ jobs: ORGANIZATION: ${{ env.ORGANIZATION }} id: stale-repos name: Run stale_repos tool - uses: github/stale-repos@a21e55567b83cf3c3f3f9085d3038dc6cee02598 # v3 + uses: github/stale-repos@a21e55567b83cf3c3f3f9085d3038dc6cee02598 # v3.0.2 - env: INACTIVE_REPOS: ${{ steps.stale-repos.outputs.inactiveRepos }} name: Save stale repos output diff --git a/.github/workflows/super-linter.lock.yml b/.github/workflows/super-linter.lock.yml index 0b4480c75a..4f21688723 100644 --- a/.github/workflows/super-linter.lock.yml +++ b/.github/workflows/super-linter.lock.yml @@ -1463,7 +1463,7 @@ jobs: persist-credentials: false - name: Super-linter id: super-linter - uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.2.1 + uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.3.1 env: CREATE_LOG_FILE: "true" DEFAULT_BRANCH: main diff --git a/pkg/cli/logs_firewall_parse_test.go b/pkg/cli/logs_firewall_parse_test.go index 149d588d19..4cc01a6022 100644 --- a/pkg/cli/logs_firewall_parse_test.go +++ b/pkg/cli/logs_firewall_parse_test.go @@ -2,9 +2,7 @@ package cli import ( "os" - "os/exec" "path/filepath" - "strings" "testing" "github.com/githubnext/gh-aw/pkg/testutil" diff --git a/pkg/cli/logs_parse_test.go b/pkg/cli/logs_parse_test.go index ed1b4e4f0a..df01333f42 100644 --- a/pkg/cli/logs_parse_test.go +++ b/pkg/cli/logs_parse_test.go @@ -2,12 +2,10 @@ package cli import ( "os" - "os/exec" "path/filepath" "testing" "github.com/githubnext/gh-aw/pkg/testutil" - "github.com/githubnext/gh-aw/pkg/workflow" ) diff --git a/pkg/cli/mcp_inspect_safe_inputs_integration_test.go b/pkg/cli/mcp_inspect_safe_inputs_integration_test.go index 6cd81c3363..efe569df5b 100644 --- a/pkg/cli/mcp_inspect_safe_inputs_integration_test.go +++ b/pkg/cli/mcp_inspect_safe_inputs_integration_test.go @@ -3,12 +3,12 @@ package cli import ( -"testing" + "testing" ) // SKIPPED: Scripts now use require() pattern and are loaded at runtime from external files // TestSafeInputsMCPServerCompilation tests that safe-inputs are properly compiled // into MCP server configurations for all three agentic engines func TestSafeInputsMCPServerCompilation(t *testing.T) { -t.Skip("Test skipped - safe-inputs MCP server scripts now use require() pattern and are loaded at runtime from external files") + t.Skip("Test skipped - safe-inputs MCP server scripts now use require() pattern and are loaded at runtime from external files") } diff --git a/pkg/workflow/jobs.go b/pkg/workflow/jobs.go index 35c76db89d..77e9e708f3 100644 --- a/pkg/workflow/jobs.go +++ b/pkg/workflow/jobs.go @@ -104,15 +104,15 @@ func (jm *JobManager) ValidateDependencies() error { // This detects compiler bugs where the same step is added multiple times func (jm *JobManager) ValidateDuplicateSteps() error { jobLog.Printf("Validating for duplicate steps in %d jobs", len(jm.jobs)) - + for jobName, job := range jm.jobs { if len(job.Steps) == 0 { continue } - + // Track seen steps to detect duplicates seen := make(map[string]int) - + for i, step := range job.Steps { // Extract step name from YAML for comparison stepName := extractStepName(step) @@ -120,16 +120,16 @@ func (jm *JobManager) ValidateDuplicateSteps() error { // Steps without names can't be checked for duplicates continue } - + if firstIndex, exists := seen[stepName]; exists { jobLog.Printf("Duplicate step detected in job '%s': step '%s' at positions %d and %d", jobName, stepName, firstIndex, i) return fmt.Errorf("compiler bug: duplicate step '%s' found in job '%s' (positions %d and %d)", stepName, jobName, firstIndex, i) } - + seen[stepName] = i } } - + jobLog.Print("No duplicate steps detected in any job") return nil } @@ -145,7 +145,7 @@ func extractStepName(stepYAML string) string { // Remove leading dash if present trimmed = strings.TrimPrefix(trimmed, "-") trimmed = strings.TrimSpace(trimmed) - + if strings.HasPrefix(trimmed, "name:") { // Extract the name value after "name:" name := strings.TrimSpace(strings.TrimPrefix(trimmed, "name:")) diff --git a/pkg/workflow/jobs_duplicate_steps_test.go b/pkg/workflow/jobs_duplicate_steps_test.go index 72ad4b097c..3edff6f15e 100644 --- a/pkg/workflow/jobs_duplicate_steps_test.go +++ b/pkg/workflow/jobs_duplicate_steps_test.go @@ -154,7 +154,7 @@ func TestJobManager_ValidateDuplicateSteps_StepsWithoutNames(t *testing.T) { RunsOn: "ubuntu-latest", Steps: []string{ ` - uses: actions/checkout@v4`, // No name - ` - run: echo "Hello"`, // No name + ` - run: echo "Hello"`, // No name ` - name: Named step run: echo "World"`, }, diff --git a/pkg/workflow/js.go b/pkg/workflow/js.go index 76aa830184..066e229581 100644 --- a/pkg/workflow/js.go +++ b/pkg/workflow/js.go @@ -13,6 +13,33 @@ var jsLog = logger.New("workflow:js") //go:embed js/safe_outputs_tools.json var safeOutputsToolsJSONContent string +//go:embed js/read_buffer.cjs +var readBufferScript string + +//go:embed js/mcp_http_transport.cjs +var mcpHTTPTransportScript string + +//go:embed js/safe_inputs_config_loader.cjs +var safeInputsConfigLoaderScript string + +//go:embed js/mcp_server_core.cjs +var mcpServerCoreScript string + +//go:embed js/safe_inputs_validation.cjs +var safeInputsValidationScript string + +//go:embed js/mcp_logger.cjs +var mcpLoggerScript string + +//go:embed js/mcp_handler_shell.cjs +var mcpHandlerShellScript string + +//go:embed js/mcp_handler_python.cjs +var mcpHandlerPythonScript string + +//go:embed js/safe_inputs_mcp_server_http.cjs +var safeInputsMCPServerHTTPScript string + // init registers scripts from js.go with the DefaultScriptRegistry // Note: Embedded scripts have been removed - scripts are now provided by actions/setup at runtime func init() { @@ -71,19 +98,19 @@ func GetSafeOutputsToolsJSON() string { } func GetReadBufferScript() string { - return "" + return readBufferScript } func GetMCPServerCoreScript() string { - return "" + return mcpServerCoreScript } func GetMCPHTTPTransportScript() string { - return "" + return mcpHTTPTransportScript } func GetMCPLoggerScript() string { - return "" + return mcpLoggerScript } func GetSafeInputsMCPServerScript() string { @@ -91,11 +118,11 @@ func GetSafeInputsMCPServerScript() string { } func GetSafeInputsMCPServerHTTPScript() string { - return "" + return safeInputsMCPServerHTTPScript } func GetSafeInputsConfigLoaderScript() string { - return "" + return safeInputsConfigLoaderScript } func GetSafeInputsToolFactoryScript() string { @@ -107,15 +134,15 @@ func GetSafeInputsBootstrapScript() string { } func GetSafeInputsValidationScript() string { - return "" + return safeInputsValidationScript } func GetMCPHandlerShellScript() string { - return "" + return mcpHandlerShellScript } func GetMCPHandlerPythonScript() string { - return "" + return mcpHandlerPythonScript } func GetSafeOutputsConfigScript() string { diff --git a/pkg/workflow/js/add_comment.cjs b/pkg/workflow/js/add_comment.cjs new file mode 100644 index 0000000000..80cc9399e6 --- /dev/null +++ b/pkg/workflow/js/add_comment.cjs @@ -0,0 +1,569 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateFooterWithMessages } = require("./messages_footer.cjs"); +const { getRepositoryUrl } = require("./get_repository_url.cjs"); +const { replaceTemporaryIdReferences, loadTemporaryIdMap } = require("./temporary_id.cjs"); +const { getTrackerID } = require("./get_tracker_id.cjs"); + +/** + * Hide/minimize a comment using the GraphQL API + * @param {any} github - GitHub GraphQL instance + * @param {string} nodeId - Comment node ID + * @param {string} reason - Reason for hiding (default: outdated) + * @returns {Promise<{id: string, isMinimized: boolean}>} + */ +async function minimizeComment(github, nodeId, reason = "outdated") { + const query = /* GraphQL */ ` + mutation ($nodeId: ID!, $classifier: ReportedContentClassifiers!) { + minimizeComment(input: { subjectId: $nodeId, classifier: $classifier }) { + minimizedComment { + isMinimized + } + } + } + `; + + const result = await github.graphql(query, { nodeId, classifier: reason }); + + return { + id: nodeId, + isMinimized: result.minimizeComment.minimizedComment.isMinimized, + }; +} + +/** + * Find comments on an issue/PR with a specific tracker-id + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue/PR number + * @param {string} workflowId - Workflow ID to search for + * @returns {Promise>} + */ +async function findCommentsWithTrackerId(github, owner, repo, issueNumber, workflowId) { + const comments = []; + let page = 1; + const perPage = 100; + + // Paginate through all comments + while (true) { + const { data } = await github.rest.issues.listComments({ + owner, + repo, + issue_number: issueNumber, + per_page: perPage, + page, + }); + + if (data.length === 0) { + break; + } + + // Filter comments that contain the workflow-id and are NOT reaction comments + const filteredComments = data.filter(comment => comment.body?.includes(``) && !comment.body.includes(``)).map(({ id, node_id, body }) => ({ id, node_id, body })); + + comments.push(...filteredComments); + + if (data.length < perPage) { + break; + } + + page++; + } + + return comments; +} + +/** + * Find comments on a discussion with a specific workflow ID + * @param {any} github - GitHub GraphQL instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} discussionNumber - Discussion number + * @param {string} workflowId - Workflow ID to search for + * @returns {Promise>} + */ +async function findDiscussionCommentsWithTrackerId(github, owner, repo, discussionNumber, workflowId) { + const query = /* GraphQL */ ` + query ($owner: String!, $repo: String!, $num: Int!, $cursor: String) { + repository(owner: $owner, name: $repo) { + discussion(number: $num) { + comments(first: 100, after: $cursor) { + nodes { + id + body + } + pageInfo { + hasNextPage + endCursor + } + } + } + } + } + `; + + const comments = []; + let cursor = null; + + while (true) { + const result = await github.graphql(query, { owner, repo, num: discussionNumber, cursor }); + + if (!result.repository?.discussion?.comments?.nodes) { + break; + } + + const filteredComments = result.repository.discussion.comments.nodes + .filter(comment => comment.body?.includes(``) && !comment.body.includes(``)) + .map(({ id, body }) => ({ id, body })); + + comments.push(...filteredComments); + + if (!result.repository.discussion.comments.pageInfo.hasNextPage) { + break; + } + + cursor = result.repository.discussion.comments.pageInfo.endCursor; + } + + return comments; +} + +/** + * Hide all previous comments from the same workflow + * @param {any} github - GitHub API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} itemNumber - Issue/PR/Discussion number + * @param {string} workflowId - Workflow ID to match + * @param {boolean} isDiscussion - Whether this is a discussion + * @param {string} reason - Reason for hiding (default: outdated) + * @param {string[] | null} allowedReasons - List of allowed reasons (default: null for all) + * @returns {Promise} Number of comments hidden + */ +async function hideOlderComments(github, owner, repo, itemNumber, workflowId, isDiscussion, reason = "outdated", allowedReasons = null) { + if (!workflowId) { + core.info("No workflow ID available, skipping hide-older-comments"); + return 0; + } + + // Normalize reason to uppercase for GitHub API + const normalizedReason = reason.toUpperCase(); + + // Validate reason against allowed reasons if specified (case-insensitive) + if (allowedReasons && allowedReasons.length > 0) { + const normalizedAllowedReasons = allowedReasons.map(r => r.toUpperCase()); + if (!normalizedAllowedReasons.includes(normalizedReason)) { + core.warning(`Reason "${reason}" is not in allowed-reasons list [${allowedReasons.join(", ")}]. Skipping hide-older-comments.`); + return 0; + } + } + + core.info(`Searching for previous comments with workflow ID: ${workflowId}`); + + let comments; + if (isDiscussion) { + comments = await findDiscussionCommentsWithTrackerId(github, owner, repo, itemNumber, workflowId); + } else { + comments = await findCommentsWithTrackerId(github, owner, repo, itemNumber, workflowId); + } + + if (comments.length === 0) { + core.info("No previous comments found with matching workflow ID"); + return 0; + } + + core.info(`Found ${comments.length} previous comment(s) to hide with reason: ${normalizedReason}`); + + let hiddenCount = 0; + for (const comment of comments) { + // TypeScript can't narrow the union type here, but we know it's safe due to isDiscussion check + // @ts-expect-error - comment has node_id when not a discussion + const nodeId = isDiscussion ? String(comment.id) : comment.node_id; + core.info(`Hiding comment: ${nodeId}`); + + const result = await minimizeComment(github, nodeId, normalizedReason); + hiddenCount++; + core.info(`✓ Hidden comment: ${nodeId}`); + } + + core.info(`Successfully hidden ${hiddenCount} comment(s)`); + return hiddenCount; +} + +/** + * Comment on a GitHub Discussion using GraphQL + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} discussionNumber - Discussion number + * @param {string} message - Comment body + * @param {string|undefined} replyToId - Optional comment node ID to reply to (for threaded comments) + * @returns {Promise<{id: string, html_url: string, discussion_url: string}>} Comment details + */ +async function commentOnDiscussion(github, owner, repo, discussionNumber, message, replyToId) { + // 1. Retrieve discussion node ID + const { repository } = await github.graphql( + ` + query($owner: String!, $repo: String!, $num: Int!) { + repository(owner: $owner, name: $repo) { + discussion(number: $num) { + id + url + } + } + }`, + { owner, repo, num: discussionNumber } + ); + + if (!repository || !repository.discussion) { + throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`); + } + + const discussionId = repository.discussion.id; + const discussionUrl = repository.discussion.url; + + // 2. Add comment (with optional replyToId for threading) + const mutation = replyToId + ? `mutation($dId: ID!, $body: String!, $replyToId: ID!) { + addDiscussionComment(input: { discussionId: $dId, body: $body, replyToId: $replyToId }) { + comment { + id + body + createdAt + url + } + } + }` + : `mutation($dId: ID!, $body: String!) { + addDiscussionComment(input: { discussionId: $dId, body: $body }) { + comment { + id + body + createdAt + url + } + } + }`; + + const variables = replyToId ? { dId: discussionId, body: message, replyToId } : { dId: discussionId, body: message }; + + const result = await github.graphql(mutation, variables); + + const comment = result.addDiscussionComment.comment; + + return { + id: comment.id, + html_url: comment.url, + discussion_url: discussionUrl, + }; +} + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + const isDiscussionExplicit = process.env.GITHUB_AW_COMMENT_DISCUSSION === "true"; + const hideOlderCommentsEnabled = process.env.GH_AW_HIDE_OLDER_COMMENTS === "true"; + + // Load the temporary ID map from create_issue job + const temporaryIdMap = loadTemporaryIdMap(); + if (temporaryIdMap.size > 0) { + core.info(`Loaded temporary ID map with ${temporaryIdMap.size} entries`); + } + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all add-comment items + const commentItems = result.items.filter(/** @param {any} item */ item => item.type === "add_comment"); + if (commentItems.length === 0) { + core.info("No add-comment items found in agent output"); + return; + } + + core.info(`Found ${commentItems.length} add-comment item(s)`); + + // Helper function to get the target number (issue, discussion, or pull request) + function getTargetNumber(item) { + return item.item_number; + } + + // Get the target configuration from environment variable + const commentTarget = process.env.GH_AW_COMMENT_TARGET || "triggering"; + core.info(`Comment target configuration: ${commentTarget}`); + + // Check if we're in an issue, pull request, or discussion context + const isIssueContext = context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = context.eventName === "pull_request" || context.eventName === "pull_request_review" || context.eventName === "pull_request_review_comment"; + const isDiscussionContext = context.eventName === "discussion" || context.eventName === "discussion_comment"; + const isDiscussion = isDiscussionContext || isDiscussionExplicit; + + // Get workflow ID for hiding older comments + // Use GITHUB_WORKFLOW environment variable which is automatically set by GitHub Actions + const workflowId = process.env.GITHUB_WORKFLOW || ""; + + // Parse allowed reasons from environment variable + const allowedReasons = process.env.GH_AW_ALLOWED_REASONS + ? (() => { + try { + const parsed = JSON.parse(process.env.GH_AW_ALLOWED_REASONS); + core.info(`Allowed reasons for hiding: [${parsed.join(", ")}]`); + return parsed; + } catch (error) { + core.warning(`Failed to parse GH_AW_ALLOWED_REASONS: ${error instanceof Error ? error.message : String(error)}`); + return null; + } + })() + : null; + + if (hideOlderCommentsEnabled) { + core.info(`Hide-older-comments is enabled with workflow ID: ${workflowId || "(none)"}`); + } + + // If in staged mode, emit step summary instead of creating comments + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Add Comments Preview\n\n"; + summaryContent += "The following comments would be added if staged mode was disabled:\n\n"; + + // Show created items references if available + const createdIssueUrl = process.env.GH_AW_CREATED_ISSUE_URL; + const createdIssueNumber = process.env.GH_AW_CREATED_ISSUE_NUMBER; + const createdDiscussionUrl = process.env.GH_AW_CREATED_DISCUSSION_URL; + const createdDiscussionNumber = process.env.GH_AW_CREATED_DISCUSSION_NUMBER; + const createdPullRequestUrl = process.env.GH_AW_CREATED_PULL_REQUEST_URL; + const createdPullRequestNumber = process.env.GH_AW_CREATED_PULL_REQUEST_NUMBER; + + if (createdIssueUrl || createdDiscussionUrl || createdPullRequestUrl) { + summaryContent += "#### Related Items\n\n"; + if (createdIssueUrl && createdIssueNumber) { + summaryContent += `- Issue: [#${createdIssueNumber}](${createdIssueUrl})\n`; + } + if (createdDiscussionUrl && createdDiscussionNumber) { + summaryContent += `- Discussion: [#${createdDiscussionNumber}](${createdDiscussionUrl})\n`; + } + if (createdPullRequestUrl && createdPullRequestNumber) { + summaryContent += `- Pull Request: [#${createdPullRequestNumber}](${createdPullRequestUrl})\n`; + } + summaryContent += "\n"; + } + + for (let i = 0; i < commentItems.length; i++) { + const item = commentItems[i]; + summaryContent += `### Comment ${i + 1}\n`; + const targetNumber = getTargetNumber(item); + if (targetNumber) { + const repoUrl = getRepositoryUrl(); + if (isDiscussion) { + const discussionUrl = `${repoUrl}/discussions/${targetNumber}`; + summaryContent += `**Target Discussion:** [#${targetNumber}](${discussionUrl})\n\n`; + } else { + const issueUrl = `${repoUrl}/issues/${targetNumber}`; + summaryContent += `**Target Issue:** [#${targetNumber}](${issueUrl})\n\n`; + } + } else { + if (isDiscussion) { + summaryContent += `**Target:** Current discussion\n\n`; + } else { + summaryContent += `**Target:** Current issue/PR\n\n`; + } + } + summaryContent += `**Body:**\n${item.body || "No content provided"}\n\n`; + summaryContent += "---\n\n"; + } + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Comment creation preview written to step summary"); + return; + } + + // Validate context based on target configuration + if (commentTarget === "triggering" && !isIssueContext && !isPRContext && !isDiscussionContext) { + core.info('Target is "triggering" but not running in issue, pull request, or discussion context, skipping comment creation'); + return; + } + + // Extract triggering context for footer generation + const triggeringIssueNumber = context.payload?.issue?.number && !context.payload?.issue?.pull_request ? context.payload.issue.number : undefined; + const triggeringPRNumber = context.payload?.pull_request?.number || (context.payload?.issue?.pull_request ? context.payload.issue.number : undefined); + const triggeringDiscussionNumber = context.payload?.discussion?.number; + + const createdComments = []; + + // Process each comment item + for (let i = 0; i < commentItems.length; i++) { + const commentItem = commentItems[i]; + core.info(`Processing add-comment item ${i + 1}/${commentItems.length}: bodyLength=${commentItem.body.length}`); + + // Determine the issue/PR number and comment endpoint for this comment + let itemNumber; + let commentEndpoint; + + if (commentTarget === "*") { + // For target "*", we need an explicit number from the comment item + const targetNumber = getTargetNumber(commentItem); + if (targetNumber) { + itemNumber = parseInt(targetNumber, 10); + if (isNaN(itemNumber) || itemNumber <= 0) { + core.info(`Invalid target number specified: ${targetNumber}`); + continue; + } + commentEndpoint = isDiscussion ? "discussions" : "issues"; + } else { + core.info(`Target is "*" but no number specified in comment item`); + continue; + } + } else if (commentTarget && commentTarget !== "triggering") { + // Explicit number specified in target configuration + itemNumber = parseInt(commentTarget, 10); + if (isNaN(itemNumber) || itemNumber <= 0) { + core.info(`Invalid target number in target configuration: ${commentTarget}`); + continue; + } + commentEndpoint = isDiscussion ? "discussions" : "issues"; + } else { + // Default behavior: use triggering issue/PR/discussion + if (isIssueContext) { + itemNumber = context.payload.issue?.number || context.payload.pull_request?.number || context.payload.discussion?.number; + if (context.payload.issue) { + commentEndpoint = "issues"; + } else { + core.info("Issue context detected but no issue found in payload"); + continue; + } + } else if (isPRContext) { + itemNumber = context.payload.pull_request?.number || context.payload.issue?.number || context.payload.discussion?.number; + if (context.payload.pull_request) { + commentEndpoint = "issues"; // PR comments use the issues API endpoint + } else { + core.info("Pull request context detected but no pull request found in payload"); + continue; + } + } else if (isDiscussionContext) { + itemNumber = context.payload.discussion?.number || context.payload.issue?.number || context.payload.pull_request?.number; + if (context.payload.discussion) { + commentEndpoint = "discussions"; // Discussion comments use GraphQL via commentOnDiscussion + } else { + core.info("Discussion context detected but no discussion found in payload"); + continue; + } + } + } + + if (!itemNumber) { + core.info("Could not determine issue, pull request, or discussion number"); + continue; + } + + // Extract body from the JSON item and replace temporary ID references + let body = replaceTemporaryIdReferences(commentItem.body.trim(), temporaryIdMap); + + // Append references to created issues, discussions, and pull requests if they exist + const createdIssueUrl = process.env.GH_AW_CREATED_ISSUE_URL; + const createdIssueNumber = process.env.GH_AW_CREATED_ISSUE_NUMBER; + const createdDiscussionUrl = process.env.GH_AW_CREATED_DISCUSSION_URL; + const createdDiscussionNumber = process.env.GH_AW_CREATED_DISCUSSION_NUMBER; + const createdPullRequestUrl = process.env.GH_AW_CREATED_PULL_REQUEST_URL; + const createdPullRequestNumber = process.env.GH_AW_CREATED_PULL_REQUEST_NUMBER; + + // Add references section if any URLs are available + const references = [ + createdIssueUrl && createdIssueNumber && `- Issue: [#${createdIssueNumber}](${createdIssueUrl})`, + createdDiscussionUrl && createdDiscussionNumber && `- Discussion: [#${createdDiscussionNumber}](${createdDiscussionUrl})`, + createdPullRequestUrl && createdPullRequestNumber && `- Pull Request: [#${createdPullRequestNumber}](${createdPullRequestUrl})`, + ].filter(Boolean); + + if (references.length > 0) { + body += `\n\n#### Related Items\n\n${references.join("\n")}\n`; + } + + // Add AI disclaimer with workflow name and run url + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || ""; + const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || ""; + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + // Add workflow ID comment marker if present + if (workflowId) { + body += `\n\n`; + } + + // Add tracker-id comment if present + const trackerIDComment = getTrackerID("markdown"); + if (trackerIDComment) { + body += trackerIDComment; + } + + // Add comment type marker to identify this as an add-comment + body += `\n\n`; + + body += generateFooterWithMessages(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber); + + // Hide older comments from the same workflow if enabled + if (hideOlderCommentsEnabled && workflowId) { + core.info("Hide-older-comments is enabled, searching for previous comments to hide"); + await hideOlderComments(github, context.repo.owner, context.repo.repo, itemNumber, workflowId, commentEndpoint === "discussions", "outdated", allowedReasons); + } + + let comment; + + // Use GraphQL API for discussions, REST API for issues/PRs + if (commentEndpoint === "discussions") { + core.info(`Creating comment on discussion #${itemNumber}`); + core.info(`Comment content length: ${body.length}`); + + // For discussion_comment events, extract the comment node_id to create a threaded reply + const replyToId = context.eventName === "discussion_comment" && context.payload?.comment?.node_id ? context.payload.comment.node_id : undefined; + + if (replyToId) { + core.info(`Creating threaded reply to comment ${replyToId}`); + } + + // Create discussion comment using GraphQL + comment = await commentOnDiscussion(github, context.repo.owner, context.repo.repo, itemNumber, body, replyToId); + core.info("Created discussion comment #" + comment.id + ": " + comment.html_url); + + // Add discussion_url to the comment object for consistency + comment.discussion_url = comment.discussion_url; + } else { + core.info(`Creating comment on ${commentEndpoint} #${itemNumber}`); + core.info(`Comment content length: ${body.length}`); + + // Create regular issue/PR comment using REST API + const { data: restComment } = await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: itemNumber, + body: body, + }); + + comment = restComment; + core.info("Created comment #" + comment.id + ": " + comment.html_url); + } + + createdComments.push(comment); + + // Set output for the last created comment (for backward compatibility) + if (i === commentItems.length - 1) { + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); + } + } + + // Write summary for all created comments + if (createdComments.length > 0) { + const summaryContent = "\n\n## GitHub Comments\n" + createdComments.map(c => `- Comment #${c.id}: [View Comment](${c.html_url})`).join("\n"); + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully created ${createdComments.length} comment(s)`); + return createdComments; +} + +module.exports = { main }; diff --git a/pkg/workflow/js/add_copilot_reviewer.cjs b/pkg/workflow/js/add_copilot_reviewer.cjs new file mode 100644 index 0000000000..51c5a43a63 --- /dev/null +++ b/pkg/workflow/js/add_copilot_reviewer.cjs @@ -0,0 +1,61 @@ +// @ts-check +/// + +/** + * Add Copilot as a reviewer to a pull request. + * + * This script is used to add the GitHub Copilot pull request reviewer bot + * to a pull request. It uses the `github` object from actions/github-script + * instead of the `gh api` CLI command. + * + * Environment variables: + * - PR_NUMBER: The pull request number to add the reviewer to + */ + +// GitHub Copilot reviewer bot username +const COPILOT_REVIEWER_BOT = "copilot-pull-request-reviewer[bot]"; + +async function main() { + // Validate required environment variables + const prNumberStr = process.env.PR_NUMBER?.trim(); + + if (!prNumberStr) { + core.setFailed("PR_NUMBER environment variable is required but not set"); + return; + } + + const prNumber = parseInt(prNumberStr, 10); + if (isNaN(prNumber) || prNumber <= 0) { + core.setFailed(`Invalid PR_NUMBER: ${prNumberStr}. Must be a positive integer.`); + return; + } + + core.info(`Adding Copilot as reviewer to PR #${prNumber}`); + + try { + await github.rest.pulls.requestReviewers({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + reviewers: [COPILOT_REVIEWER_BOT], + }); + + core.info(`Successfully added Copilot as reviewer to PR #${prNumber}`); + + await core.summary + .addRaw( + ` +## Copilot Reviewer Added + +Successfully added Copilot as a reviewer to PR #${prNumber}. +` + ) + .write(); + } catch (error) { + const errorMessage = error?.message ?? String(error); + core.error(`Failed to add Copilot as reviewer: ${errorMessage}`); + core.setFailed(`Failed to add Copilot as reviewer to PR #${prNumber}: ${errorMessage}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/add_labels.cjs b/pkg/workflow/js/add_labels.cjs new file mode 100644 index 0000000000..cd76c83a5a --- /dev/null +++ b/pkg/workflow/js/add_labels.cjs @@ -0,0 +1,126 @@ +// @ts-check +/// + +const { processSafeOutput } = require("./safe_output_processor.cjs"); +const { validateLabels } = require("./safe_output_validator.cjs"); + +async function main() { + // Use shared processor for common steps + const result = await processSafeOutput( + { + itemType: "add_labels", + configKey: "add_labels", + displayName: "Labels", + itemTypeName: "label addition", + supportsPR: true, + supportsIssue: true, + envVars: { + allowed: "GH_AW_LABELS_ALLOWED", + maxCount: "GH_AW_LABELS_MAX_COUNT", + target: "GH_AW_LABELS_TARGET", + }, + }, + { + title: "Add Labels", + description: "The following labels would be added if staged mode was disabled:", + renderItem: item => { + let content = ""; + if (item.item_number) { + content += `**Target Issue:** #${item.item_number}\n\n`; + } else { + content += `**Target:** Current issue/PR\n\n`; + } + if (item.labels && item.labels.length > 0) { + content += `**Labels to add:** ${item.labels.join(", ")}\n\n`; + } + return content; + }, + } + ); + + if (!result.success) { + return; + } + + // @ts-ignore - TypeScript doesn't narrow properly after success check + const { item: labelsItem, config, targetResult } = result; + if (!config || !targetResult || targetResult.number === undefined) { + core.setFailed("Internal error: config, targetResult, or targetResult.number is undefined"); + return; + } + const { allowed: allowedLabels, maxCount } = config; + const itemNumber = targetResult.number; + const { contextType } = targetResult; + + const requestedLabels = labelsItem.labels || []; + core.info(`Requested labels: ${JSON.stringify(requestedLabels)}`); + + // Use validation helper to sanitize and validate labels + const labelsResult = validateLabels(requestedLabels, allowedLabels, maxCount); + if (!labelsResult.valid) { + // If no valid labels, log info and return gracefully instead of failing + if (labelsResult.error && labelsResult.error.includes("No valid labels")) { + core.info("No labels to add"); + core.setOutput("labels_added", ""); + await core.summary + .addRaw( + ` +## Label Addition + +No labels were added (no valid labels found in agent output). +` + ) + .write(); + return; + } + // For other validation errors, fail the workflow + core.setFailed(labelsResult.error || "Invalid labels"); + return; + } + + const uniqueLabels = labelsResult.value || []; + + if (uniqueLabels.length === 0) { + core.info("No labels to add"); + core.setOutput("labels_added", ""); + await core.summary + .addRaw( + ` +## Label Addition + +No labels were added (no valid labels found in agent output). +` + ) + .write(); + return; + } + core.info(`Adding ${uniqueLabels.length} labels to ${contextType} #${itemNumber}: ${JSON.stringify(uniqueLabels)}`); + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: itemNumber, + labels: uniqueLabels, + }); + core.info(`Successfully added ${uniqueLabels.length} labels to ${contextType} #${itemNumber}`); + core.setOutput("labels_added", uniqueLabels.join("\n")); + const labelsListMarkdown = uniqueLabels.map(label => `- \`${label}\``).join("\n"); + await core.summary + .addRaw( + ` +## Label Addition + +Successfully added ${uniqueLabels.length} label(s) to ${contextType} #${itemNumber}: + +${labelsListMarkdown} +` + ) + .write(); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to add labels: ${errorMessage}`); + core.setFailed(`Failed to add labels: ${errorMessage}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/add_reaction_and_edit_comment.cjs b/pkg/workflow/js/add_reaction_and_edit_comment.cjs new file mode 100644 index 0000000000..a787b02789 --- /dev/null +++ b/pkg/workflow/js/add_reaction_and_edit_comment.cjs @@ -0,0 +1,465 @@ +// @ts-check +/// + +const { getRunStartedMessage } = require("./messages_run_status.cjs"); + +async function main() { + // Read inputs from environment variables + const reaction = process.env.GH_AW_REACTION || "eyes"; + const command = process.env.GH_AW_COMMAND; // Only present for command workflows + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + core.info(`Reaction type: ${reaction}`); + core.info(`Command name: ${command || "none"}`); + core.info(`Run ID: ${runId}`); + core.info(`Run URL: ${runUrl}`); + + // Validate reaction type + const validReactions = ["+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", "eyes"]; + if (!validReactions.includes(reaction)) { + core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}`); + return; + } + + // Determine the API endpoint based on the event type + let reactionEndpoint; + let commentUpdateEndpoint; + let shouldCreateComment = false; + const eventName = context.eventName; + const owner = context.repo.owner; + const repo = context.repo.repo; + + try { + switch (eventName) { + case "issues": + const issueNumber = context.payload?.issue?.number; + if (!issueNumber) { + core.setFailed("Issue number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/comments`; + // Create comments for all workflows using reactions + shouldCreateComment = true; + break; + + case "issue_comment": + const commentId = context.payload?.comment?.id; + const issueNumberForComment = context.payload?.issue?.number; + if (!commentId) { + core.setFailed("Comment ID not found in event payload"); + return; + } + if (!issueNumberForComment) { + core.setFailed("Issue number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; + // Create new comment on the issue itself, not on the comment + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${issueNumberForComment}/comments`; + // Create comments for all workflows using reactions + shouldCreateComment = true; + break; + + case "pull_request": + const prNumber = context.payload?.pull_request?.number; + if (!prNumber) { + core.setFailed("Pull request number not found in event payload"); + return; + } + // PRs are "issues" for the reactions endpoint + reactionEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/comments`; + // Create comments for all workflows using reactions + shouldCreateComment = true; + break; + + case "pull_request_review_comment": + const reviewCommentId = context.payload?.comment?.id; + const prNumberForReviewComment = context.payload?.pull_request?.number; + if (!reviewCommentId) { + core.setFailed("Review comment ID not found in event payload"); + return; + } + if (!prNumberForReviewComment) { + core.setFailed("Pull request number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; + // Create new comment on the PR itself (using issues endpoint since PRs are issues) + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${prNumberForReviewComment}/comments`; + // Create comments for all workflows using reactions + shouldCreateComment = true; + break; + + case "discussion": + const discussionNumber = context.payload?.discussion?.number; + if (!discussionNumber) { + core.setFailed("Discussion number not found in event payload"); + return; + } + // Discussions use GraphQL API - get the node ID + const discussion = await getDiscussionId(owner, repo, discussionNumber); + reactionEndpoint = discussion.id; // Store node ID for GraphQL + commentUpdateEndpoint = `discussion:${discussionNumber}`; // Special format to indicate discussion + // Create comments for all workflows using reactions + shouldCreateComment = true; + break; + + case "discussion_comment": + const discussionCommentNumber = context.payload?.discussion?.number; + const discussionCommentId = context.payload?.comment?.id; + if (!discussionCommentNumber || !discussionCommentId) { + core.setFailed("Discussion or comment information not found in event payload"); + return; + } + // Get the comment node ID from the payload + const commentNodeId = context.payload?.comment?.node_id; + if (!commentNodeId) { + core.setFailed("Discussion comment node ID not found in event payload"); + return; + } + reactionEndpoint = commentNodeId; // Store node ID for GraphQL + commentUpdateEndpoint = `discussion_comment:${discussionCommentNumber}:${discussionCommentId}`; // Special format + // Create comments for all workflows using reactions + shouldCreateComment = true; + break; + + default: + core.setFailed(`Unsupported event type: ${eventName}`); + return; + } + + core.info(`Reaction API endpoint: ${reactionEndpoint}`); + + // Add reaction first + // For discussions, reactionEndpoint is a node ID (GraphQL), otherwise it's a REST API path + const isDiscussionEvent = eventName === "discussion" || eventName === "discussion_comment"; + if (isDiscussionEvent) { + await addDiscussionReaction(reactionEndpoint, reaction); + } else { + await addReaction(reactionEndpoint, reaction); + } + + // Then add comment if applicable + if (shouldCreateComment && commentUpdateEndpoint) { + core.info(`Comment endpoint: ${commentUpdateEndpoint}`); + await addCommentWithWorkflowLink(commentUpdateEndpoint, runUrl, eventName); + } else { + core.info(`Skipping comment for event type: ${eventName}`); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to process reaction and comment creation: ${errorMessage}`); + core.setFailed(`Failed to process reaction and comment creation: ${errorMessage}`); + } +} + +/** + * Add a reaction to a GitHub issue, PR, or comment using REST API + * @param {string} endpoint - The GitHub API endpoint to add the reaction to + * @param {string} reaction - The reaction type to add + */ +async function addReaction(endpoint, reaction) { + const response = await github.request("POST " + endpoint, { + content: reaction, + headers: { + Accept: "application/vnd.github+json", + }, + }); + + const reactionId = response.data?.id; + if (reactionId) { + core.info(`Successfully added reaction: ${reaction} (id: ${reactionId})`); + core.setOutput("reaction-id", reactionId.toString()); + } else { + core.info(`Successfully added reaction: ${reaction}`); + core.setOutput("reaction-id", ""); + } +} + +/** + * Add a reaction to a GitHub discussion or discussion comment using GraphQL + * @param {string} subjectId - The node ID of the discussion or comment + * @param {string} reaction - The reaction type to add (mapped to GitHub's ReactionContent enum) + */ +async function addDiscussionReaction(subjectId, reaction) { + // Map reaction names to GitHub's GraphQL ReactionContent enum + const reactionMap = { + "+1": "THUMBS_UP", + "-1": "THUMBS_DOWN", + laugh: "LAUGH", + confused: "CONFUSED", + heart: "HEART", + hooray: "HOORAY", + rocket: "ROCKET", + eyes: "EYES", + }; + + const reactionContent = reactionMap[reaction]; + if (!reactionContent) { + throw new Error(`Invalid reaction type for GraphQL: ${reaction}`); + } + + const result = await github.graphql( + ` + mutation($subjectId: ID!, $content: ReactionContent!) { + addReaction(input: { subjectId: $subjectId, content: $content }) { + reaction { + id + content + } + } + }`, + { subjectId, content: reactionContent } + ); + + const reactionId = result.addReaction.reaction.id; + core.info(`Successfully added reaction: ${reaction} (id: ${reactionId})`); + core.setOutput("reaction-id", reactionId); +} + +/** + * Get the node ID for a discussion + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} discussionNumber - Discussion number + * @returns {Promise<{id: string, url: string}>} Discussion details + */ +async function getDiscussionId(owner, repo, discussionNumber) { + const { repository } = await github.graphql( + ` + query($owner: String!, $repo: String!, $num: Int!) { + repository(owner: $owner, name: $repo) { + discussion(number: $num) { + id + url + } + } + }`, + { owner, repo, num: discussionNumber } + ); + + if (!repository || !repository.discussion) { + throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`); + } + + return { + id: repository.discussion.id, + url: repository.discussion.url, + }; +} + +/** + * Get the node ID for a discussion comment + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} discussionNumber - Discussion number + * @param {number} commentId - Comment ID (database ID, not node ID) + * @returns {Promise<{id: string, url: string}>} Comment details + */ +async function getDiscussionCommentId(owner, repo, discussionNumber, commentId) { + // First, get the discussion ID + const discussion = await getDiscussionId(owner, repo, discussionNumber); + if (!discussion) throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`); + + // Then fetch the comment by traversing discussion comments + // Note: GitHub's GraphQL API doesn't provide a direct way to query comment by database ID + // We need to use the comment's node ID from the event payload if available + // For now, we'll use a simplified approach - the commentId from context.payload.comment.node_id + + // If the event payload provides node_id, we can use it directly + // Otherwise, this would need to fetch all comments and find the matching one + const nodeId = context.payload?.comment?.node_id; + if (nodeId) { + return { + id: nodeId, + url: context.payload.comment?.html_url || discussion?.url, + }; + } + + throw new Error(`Discussion comment node ID not found in event payload for comment ${commentId}`); +} + +/** + * Add a comment with a workflow run link + * @param {string} endpoint - The GitHub API endpoint to create the comment (or special format for discussions) + * @param {string} runUrl - The URL of the workflow run + * @param {string} eventName - The event type (to determine the comment text) + */ +async function addCommentWithWorkflowLink(endpoint, runUrl, eventName) { + try { + // Get workflow name from environment variable + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + + // Determine the event type description + let eventTypeDescription; + switch (eventName) { + case "issues": + eventTypeDescription = "issue"; + break; + case "pull_request": + eventTypeDescription = "pull request"; + break; + case "issue_comment": + eventTypeDescription = "issue comment"; + break; + case "pull_request_review_comment": + eventTypeDescription = "pull request review comment"; + break; + case "discussion": + eventTypeDescription = "discussion"; + break; + case "discussion_comment": + eventTypeDescription = "discussion comment"; + break; + default: + eventTypeDescription = "event"; + } + + // Use getRunStartedMessage for the workflow link text (supports custom messages) + const workflowLinkText = getRunStartedMessage({ + workflowName: workflowName, + runUrl: runUrl, + eventType: eventTypeDescription, + }); + + // Add workflow-id and tracker-id markers for hide-older-comments feature + const workflowId = process.env.GITHUB_WORKFLOW || ""; + const trackerId = process.env.GH_AW_TRACKER_ID || ""; + + let commentBody = workflowLinkText; + + // Add lock notice if lock-for-agent is enabled for issues or issue_comment + const lockForAgent = process.env.GH_AW_LOCK_FOR_AGENT === "true"; + if (lockForAgent && (eventName === "issues" || eventName === "issue_comment")) { + commentBody += "\n\n🔒 This issue has been locked while the workflow is running to prevent concurrent modifications."; + } + + // Add workflow-id marker if available + if (workflowId) { + commentBody += `\n\n`; + } + + // Add tracker-id marker if available (for backwards compatibility) + if (trackerId) { + commentBody += `\n\n`; + } + + // Add comment type marker to identify this as a reaction comment + // This prevents it from being hidden by hide-older-comments + commentBody += `\n\n`; + + // Handle discussion events specially + if (eventName === "discussion") { + // Parse discussion number from special format: "discussion:NUMBER" + const discussionNumber = parseInt(endpoint.split(":")[1], 10); + + // Create a new comment on the discussion using GraphQL + const { repository } = await github.graphql( + ` + query($owner: String!, $repo: String!, $num: Int!) { + repository(owner: $owner, name: $repo) { + discussion(number: $num) { + id + } + } + }`, + { owner: context.repo.owner, repo: context.repo.repo, num: discussionNumber } + ); + + const discussionId = repository.discussion.id; + + const result = await github.graphql( + ` + mutation($dId: ID!, $body: String!) { + addDiscussionComment(input: { discussionId: $dId, body: $body }) { + comment { + id + url + } + } + }`, + { dId: discussionId, body: commentBody } + ); + + const comment = result.addDiscussionComment.comment; + core.info(`Successfully created discussion comment with workflow link`); + core.info(`Comment ID: ${comment.id}`); + core.info(`Comment URL: ${comment.url}`); + core.info(`Comment Repo: ${context.repo.owner}/${context.repo.repo}`); + core.setOutput("comment-id", comment.id); + core.setOutput("comment-url", comment.url); + core.setOutput("comment-repo", `${context.repo.owner}/${context.repo.repo}`); + return; + } else if (eventName === "discussion_comment") { + // Parse discussion number from special format: "discussion_comment:NUMBER:COMMENT_ID" + const discussionNumber = parseInt(endpoint.split(":")[1], 10); + + // Create a new comment on the discussion using GraphQL + const { repository } = await github.graphql( + ` + query($owner: String!, $repo: String!, $num: Int!) { + repository(owner: $owner, name: $repo) { + discussion(number: $num) { + id + } + } + }`, + { owner: context.repo.owner, repo: context.repo.repo, num: discussionNumber } + ); + + const discussionId = repository.discussion.id; + + // Get the comment node ID to use as the parent for threading + const commentNodeId = context.payload?.comment?.node_id; + + const result = await github.graphql( + ` + mutation($dId: ID!, $body: String!, $replyToId: ID!) { + addDiscussionComment(input: { discussionId: $dId, body: $body, replyToId: $replyToId }) { + comment { + id + url + } + } + }`, + { dId: discussionId, body: commentBody, replyToId: commentNodeId } + ); + + const comment = result.addDiscussionComment.comment; + core.info(`Successfully created discussion comment with workflow link`); + core.info(`Comment ID: ${comment.id}`); + core.info(`Comment URL: ${comment.url}`); + core.info(`Comment Repo: ${context.repo.owner}/${context.repo.repo}`); + core.setOutput("comment-id", comment.id); + core.setOutput("comment-url", comment.url); + core.setOutput("comment-repo", `${context.repo.owner}/${context.repo.repo}`); + return; + } + + // Create a new comment for non-discussion events + const createResponse = await github.request("POST " + endpoint, { + body: commentBody, + headers: { + Accept: "application/vnd.github+json", + }, + }); + + core.info(`Successfully created comment with workflow link`); + core.info(`Comment ID: ${createResponse.data.id}`); + core.info(`Comment URL: ${createResponse.data.html_url}`); + core.info(`Comment Repo: ${context.repo.owner}/${context.repo.repo}`); + core.setOutput("comment-id", createResponse.data.id.toString()); + core.setOutput("comment-url", createResponse.data.html_url); + core.setOutput("comment-repo", `${context.repo.owner}/${context.repo.repo}`); + } catch (error) { + // Don't fail the entire job if comment creation fails - just log it + const errorMessage = error instanceof Error ? error.message : String(error); + core.warning("Failed to create comment with workflow link (This is not critical - the reaction was still added successfully): " + errorMessage); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/add_reviewer.cjs b/pkg/workflow/js/add_reviewer.cjs new file mode 100644 index 0000000000..9ecaaf7af9 --- /dev/null +++ b/pkg/workflow/js/add_reviewer.cjs @@ -0,0 +1,132 @@ +// @ts-check +/// + +const { processSafeOutput, processItems } = require("./safe_output_processor.cjs"); + +// GitHub Copilot reviewer bot username +const COPILOT_REVIEWER_BOT = "copilot-pull-request-reviewer[bot]"; + +async function main() { + // Use shared processor for common steps + const result = await processSafeOutput( + { + itemType: "add_reviewer", + configKey: "add_reviewer", + displayName: "Reviewers", + itemTypeName: "reviewer addition", + supportsPR: false, // PR-only: supportsPR=false means ONLY PR context (not issues) + supportsIssue: false, + envVars: { + allowed: "GH_AW_REVIEWERS_ALLOWED", + maxCount: "GH_AW_REVIEWERS_MAX_COUNT", + target: "GH_AW_REVIEWERS_TARGET", + }, + }, + { + title: "Add Reviewers", + description: "The following reviewers would be added if staged mode was disabled:", + renderItem: item => { + let content = ""; + if (item.pull_request_number) { + content += `**Target Pull Request:** #${item.pull_request_number}\n\n`; + } else { + content += `**Target:** Current pull request\n\n`; + } + if (item.reviewers && item.reviewers.length > 0) { + content += `**Reviewers to add:** ${item.reviewers.join(", ")}\n\n`; + } + return content; + }, + } + ); + + if (!result.success) { + return; + } + + // @ts-ignore - TypeScript doesn't narrow properly after success check + const { item: reviewerItem, config, targetResult } = result; + if (!config || !targetResult || targetResult.number === undefined) { + core.setFailed("Internal error: config, targetResult, or targetResult.number is undefined"); + return; + } + const { allowed: allowedReviewers, maxCount } = config; + const prNumber = targetResult.number; + + const requestedReviewers = reviewerItem.reviewers || []; + core.info(`Requested reviewers: ${JSON.stringify(requestedReviewers)}`); + + // Use shared helper to filter, sanitize, dedupe, and limit + const uniqueReviewers = processItems(requestedReviewers, allowedReviewers, maxCount); + + if (uniqueReviewers.length === 0) { + core.info("No reviewers to add"); + core.setOutput("reviewers_added", ""); + await core.summary + .addRaw( + ` +## Reviewer Addition + +No reviewers were added (no valid reviewers found in agent output). +` + ) + .write(); + return; + } + + core.info(`Adding ${uniqueReviewers.length} reviewers to PR #${prNumber}: ${JSON.stringify(uniqueReviewers)}`); + + try { + // Special handling for "copilot" reviewer - separate it from other reviewers in a single pass + const hasCopilot = uniqueReviewers.includes("copilot"); + const otherReviewers = hasCopilot ? uniqueReviewers.filter(r => r !== "copilot") : uniqueReviewers; + + // Add non-copilot reviewers first + if (otherReviewers.length > 0) { + await github.rest.pulls.requestReviewers({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + reviewers: otherReviewers, + }); + core.info(`Successfully added ${otherReviewers.length} reviewer(s) to PR #${prNumber}`); + } + + // Add copilot reviewer separately if requested + if (hasCopilot) { + try { + await github.rest.pulls.requestReviewers({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + reviewers: [COPILOT_REVIEWER_BOT], + }); + core.info(`Successfully added copilot as reviewer to PR #${prNumber}`); + } catch (copilotError) { + core.warning(`Failed to add copilot as reviewer: ${copilotError instanceof Error ? copilotError.message : String(copilotError)}`); + // Don't fail the whole step if copilot reviewer fails + } + } + + core.setOutput("reviewers_added", uniqueReviewers.join("\n")); + + const reviewersListMarkdown = uniqueReviewers.map(reviewer => `- \`${reviewer}\``).join("\n"); + await core.summary + .addRaw( + ` +## Reviewer Addition + +Successfully added ${uniqueReviewers.length} reviewer(s) to PR #${prNumber}: + +${reviewersListMarkdown} +` + ) + .write(); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to add reviewers: ${errorMessage}`); + core.setFailed(`Failed to add reviewers: ${errorMessage}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/assign_agent_helpers.cjs b/pkg/workflow/js/assign_agent_helpers.cjs new file mode 100644 index 0000000000..9a40fabda2 --- /dev/null +++ b/pkg/workflow/js/assign_agent_helpers.cjs @@ -0,0 +1,419 @@ +// @ts-check +/// + +/** + * Shared helper functions for assigning coding agents (like Copilot) to issues + * These functions use GraphQL to properly assign bot actors that cannot be assigned via gh CLI + * + * NOTE: All functions use the built-in `github` global object for authentication. + * The token must be set at the step level via the `github-token` parameter in GitHub Actions. + * This approach is required for compatibility with actions/github-script@v8. + */ + +/** + * Map agent names to their GitHub bot login names + * @type {Record} + */ +const AGENT_LOGIN_NAMES = { + copilot: "copilot-swe-agent", +}; + +/** + * Check if an assignee is a known coding agent (bot) + * @param {string} assignee - Assignee name (may include @ prefix) + * @returns {string|null} Agent name if it's a known agent, null otherwise + */ +function getAgentName(assignee) { + // Normalize: remove @ prefix if present + const normalized = assignee.startsWith("@") ? assignee.slice(1) : assignee; + + // Check if it's a known agent + if (AGENT_LOGIN_NAMES[normalized]) { + return normalized; + } + + return null; +} + +/** + * Return list of coding agent bot login names that are currently available as assignable actors + * (intersection of suggestedActors and known AGENT_LOGIN_NAMES values) + * @param {string} owner + * @param {string} repo + * @returns {Promise} + */ +async function getAvailableAgentLogins(owner, repo) { + const query = ` + query($owner: String!, $repo: String!) { + repository(owner: $owner, name: $repo) { + suggestedActors(first: 100, capabilities: CAN_BE_ASSIGNED) { + nodes { ... on Bot { login __typename } } + } + } + } + `; + try { + const response = await github.graphql(query, { owner, repo }); + const actors = response.repository?.suggestedActors?.nodes || []; + const knownValues = Object.values(AGENT_LOGIN_NAMES); + const available = []; + for (const actor of actors) { + if (actor && actor.login && knownValues.includes(actor.login)) { + available.push(actor.login); + } + } + return available.sort(); + } catch (e) { + const msg = e instanceof Error ? e.message : String(e); + core.debug(`Failed to list available agent logins: ${msg}`); + return []; + } +} + +/** + * Find an agent in repository's suggested actors using GraphQL + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {string} agentName - Agent name (copilot) + * @returns {Promise} Agent ID or null if not found + */ +async function findAgent(owner, repo, agentName) { + const query = ` + query($owner: String!, $repo: String!) { + repository(owner: $owner, name: $repo) { + suggestedActors(first: 100, capabilities: CAN_BE_ASSIGNED) { + nodes { + ... on Bot { + id + login + __typename + } + } + } + } + } + `; + + try { + const response = await github.graphql(query, { owner, repo }); + const actors = response.repository.suggestedActors.nodes; + + const loginName = AGENT_LOGIN_NAMES[agentName]; + if (!loginName) { + core.error(`Unknown agent: ${agentName}. Supported agents: ${Object.keys(AGENT_LOGIN_NAMES).join(", ")}`); + return null; + } + + for (const actor of actors) { + if (actor.login === loginName) { + return actor.id; + } + } + + const available = actors.filter(a => a && a.login && Object.values(AGENT_LOGIN_NAMES).includes(a.login)).map(a => a.login); + + core.warning(`${agentName} coding agent (${loginName}) is not available as an assignee for this repository`); + if (available.length > 0) { + core.info(`Available assignable coding agents: ${available.join(", ")}`); + } else { + core.info("No coding agents are currently assignable in this repository."); + } + if (agentName === "copilot") { + core.info("Please visit https://docs.github.com/en/copilot/using-github-copilot/using-copilot-coding-agent-to-work-on-tasks/about-assigning-tasks-to-copilot"); + } + return null; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to find ${agentName} agent: ${errorMessage}`); + return null; + } +} + +/** + * Get issue details (ID and current assignees) using GraphQL + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @returns {Promise<{issueId: string, currentAssignees: string[]}|null>} + */ +async function getIssueDetails(owner, repo, issueNumber) { + const query = ` + query($owner: String!, $repo: String!, $issueNumber: Int!) { + repository(owner: $owner, name: $repo) { + issue(number: $issueNumber) { + id + assignees(first: 100) { + nodes { + id + } + } + } + } + } + `; + + try { + const response = await github.graphql(query, { owner, repo, issueNumber }); + const issue = response.repository.issue; + + if (!issue || !issue.id) { + core.error("Could not get issue data"); + return null; + } + + const currentAssignees = issue.assignees.nodes.map(assignee => assignee.id); + + return { + issueId: issue.id, + currentAssignees: currentAssignees, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to get issue details: ${errorMessage}`); + return null; + } +} + +/** + * Assign agent to issue using GraphQL replaceActorsForAssignable mutation + * @param {string} issueId - GitHub issue ID + * @param {string} agentId - Agent ID + * @param {string[]} currentAssignees - List of current assignee IDs + * @param {string} agentName - Agent name for error messages + * @returns {Promise} True if successful + */ +async function assignAgentToIssue(issueId, agentId, currentAssignees, agentName) { + // Build actor IDs array - include agent and preserve other assignees + const actorIds = [agentId]; + for (const assigneeId of currentAssignees) { + if (assigneeId !== agentId) { + actorIds.push(assigneeId); + } + } + + const mutation = ` + mutation($assignableId: ID!, $actorIds: [ID!]!) { + replaceActorsForAssignable(input: { + assignableId: $assignableId, + actorIds: $actorIds + }) { + __typename + } + } + `; + + try { + core.info("Using built-in github object for mutation"); + + core.debug(`GraphQL mutation with variables: assignableId=${issueId}, actorIds=${JSON.stringify(actorIds)}`); + const response = await github.graphql(mutation, { + assignableId: issueId, + actorIds: actorIds, + }); + + if (response && response.replaceActorsForAssignable && response.replaceActorsForAssignable.__typename) { + return true; + } else { + core.error("Unexpected response from GitHub API"); + return false; + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + + // Debug: surface the raw GraphQL error structure for troubleshooting fine-grained permission issues + try { + core.debug(`Raw GraphQL error message: ${errorMessage}`); + if (error && typeof error === "object") { + // Common GraphQL error shapes: error.errors (array), error.data, error.response + const details = {}; + if (error.errors) details.errors = error.errors; + // Some libraries wrap the payload under 'response' or 'response.data' + if (error.response) details.response = error.response; + if (error.data) details.data = error.data; + // If GitHub returns an array of errors with 'type'/'message' + if (Array.isArray(error.errors)) { + details.compactMessages = error.errors.map(e => e.message).filter(Boolean); + } + const serialized = JSON.stringify(details, (_k, v) => v, 2); + if (serialized && serialized !== "{}") { + core.debug(`Raw GraphQL error details: ${serialized}`); + // Also emit non-debug version so users without ACTIONS_STEP_DEBUG can see it + core.error("Raw GraphQL error details (for troubleshooting):"); + // Split large JSON for readability + for (const line of serialized.split(/\n/)) { + if (line.trim()) core.error(line); + } + } + } + } catch (loggingErr) { + // Never fail assignment because of debug logging + core.debug(`Failed to serialize GraphQL error details: ${loggingErr instanceof Error ? loggingErr.message : String(loggingErr)}`); + } + + // Check for permission-related errors + if (errorMessage.includes("Resource not accessible by personal access token") || errorMessage.includes("Resource not accessible by integration") || errorMessage.includes("Insufficient permissions to assign")) { + // Attempt fallback mutation addAssigneesToAssignable when replaceActorsForAssignable is forbidden + core.info("Primary mutation replaceActorsForAssignable forbidden. Attempting fallback addAssigneesToAssignable..."); + try { + const fallbackMutation = ` + mutation($assignableId: ID!, $assigneeIds: [ID!]!) { + addAssigneesToAssignable(input: { + assignableId: $assignableId, + assigneeIds: $assigneeIds + }) { + clientMutationId + } + } + `; + core.info("Using built-in github object for fallback mutation"); + core.debug(`Fallback GraphQL mutation with variables: assignableId=${issueId}, assigneeIds=[${agentId}]`); + const fallbackResp = await github.graphql(fallbackMutation, { + assignableId: issueId, + assigneeIds: [agentId], + }); + if (fallbackResp && fallbackResp.addAssigneesToAssignable) { + core.info(`Fallback succeeded: agent '${agentName}' added via addAssigneesToAssignable.`); + return true; + } else { + core.warning("Fallback mutation returned unexpected response; proceeding with permission guidance."); + } + } catch (fallbackError) { + const fbMsg = fallbackError instanceof Error ? fallbackError.message : String(fallbackError); + core.error(`Fallback addAssigneesToAssignable failed: ${fbMsg}`); + } + logPermissionError(agentName); + } else { + core.error(`Failed to assign ${agentName}: ${errorMessage}`); + } + return false; + } +} + +/** + * Log detailed permission error guidance + * @param {string} agentName - Agent name for error messages + */ +function logPermissionError(agentName) { + core.error(`Failed to assign ${agentName}: Insufficient permissions`); + core.error(""); + core.error("Assigning Copilot agents requires:"); + core.error(" 1. All four workflow permissions:"); + core.error(" - actions: write"); + core.error(" - contents: write"); + core.error(" - issues: write"); + core.error(" - pull-requests: write"); + core.error(""); + core.error(" 2. A classic PAT with 'repo' scope OR fine-grained PAT with explicit Write permissions above:"); + core.error(" (Fine-grained PATs must grant repository access + write for Issues, Pull requests, Contents, Actions)"); + core.error(""); + core.error(" 3. Repository settings:"); + core.error(" - Actions must have write permissions"); + core.error(" - Go to: Settings > Actions > General > Workflow permissions"); + core.error(" - Select: 'Read and write permissions'"); + core.error(""); + core.error(" 4. Organization/Enterprise settings:"); + core.error(" - Check if your org restricts bot assignments"); + core.error(" - Verify Copilot is enabled for your repository"); + core.error(""); + core.info("For more information, see: https://docs.github.com/en/copilot/how-tos/use-copilot-agents/coding-agent/create-a-pr"); +} + +/** + * Generate permission error summary content for step summary + * @returns {string} Markdown content for permission error guidance + */ +function generatePermissionErrorSummary() { + let content = "\n### ⚠️ Permission Requirements\n\n"; + content += "Assigning Copilot agents requires **ALL** of these permissions:\n\n"; + content += "```yaml\n"; + content += "permissions:\n"; + content += " actions: write\n"; + content += " contents: write\n"; + content += " issues: write\n"; + content += " pull-requests: write\n"; + content += "```\n\n"; + content += "**Token capability note:**\n"; + content += "- Current token (PAT or GITHUB_TOKEN) lacks assignee mutation capability for this repository.\n"; + content += "- Both `replaceActorsForAssignable` and fallback `addAssigneesToAssignable` returned FORBIDDEN/Resource not accessible.\n"; + content += "- This typically means bot/user assignment requires an elevated OAuth or GitHub App installation token.\n\n"; + content += "**Recommended remediation paths:**\n"; + content += "1. Create & install a GitHub App with: Issues/Pull requests/Contents/Actions (write) → use installation token in job.\n"; + content += "2. Manual assignment: add the agent through the UI until broader token support is available.\n"; + content += "3. Open a support ticket referencing failing mutation `replaceActorsForAssignable` and repository slug.\n\n"; + content += "**Why this failed:** Fine-grained and classic PATs can update issue title (verified) but not modify assignees in this environment.\n\n"; + content += "📖 Reference: https://docs.github.com/en/copilot/how-tos/use-copilot-agents/coding-agent/create-a-pr (general agent docs)\n"; + return content; +} + +/** + * Assign an agent to an issue using GraphQL + * This is the main entry point for assigning agents from other scripts + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @param {string} agentName - Agent name (e.g., "copilot") + * @returns {Promise<{success: boolean, error?: string}>} + */ +async function assignAgentToIssueByName(owner, repo, issueNumber, agentName) { + // Check if agent is supported + if (!AGENT_LOGIN_NAMES[agentName]) { + const error = `Agent "${agentName}" is not supported. Supported agents: ${Object.keys(AGENT_LOGIN_NAMES).join(", ")}`; + core.warning(error); + return { success: false, error }; + } + + try { + // Find agent using the github object authenticated via step-level github-token + core.info(`Looking for ${agentName} coding agent...`); + const agentId = await findAgent(owner, repo, agentName); + if (!agentId) { + const error = `${agentName} coding agent is not available for this repository`; + // Enrich with available agent logins + const available = await getAvailableAgentLogins(owner, repo); + const enrichedError = available.length > 0 ? `${error} (available agents: ${available.join(", ")})` : error; + return { success: false, error: enrichedError }; + } + core.info(`Found ${agentName} coding agent (ID: ${agentId})`); + + // Get issue details (ID and current assignees) via GraphQL + core.info("Getting issue details..."); + const issueDetails = await getIssueDetails(owner, repo, issueNumber); + if (!issueDetails) { + return { success: false, error: "Failed to get issue details" }; + } + + core.info(`Issue ID: ${issueDetails.issueId}`); + + // Check if agent is already assigned + if (issueDetails.currentAssignees.includes(agentId)) { + core.info(`${agentName} is already assigned to issue #${issueNumber}`); + return { success: true }; + } + + // Assign agent using GraphQL mutation + core.info(`Assigning ${agentName} coding agent to issue #${issueNumber}...`); + const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName); + + if (!success) { + return { success: false, error: `Failed to assign ${agentName} via GraphQL` }; + } + + core.info(`Successfully assigned ${agentName} coding agent to issue #${issueNumber}`); + return { success: true }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { success: false, error: errorMessage }; + } +} + +module.exports = { + AGENT_LOGIN_NAMES, + getAgentName, + getAvailableAgentLogins, + findAgent, + getIssueDetails, + assignAgentToIssue, + logPermissionError, + generatePermissionErrorSummary, + assignAgentToIssueByName, +}; diff --git a/pkg/workflow/js/assign_copilot_to_created_issues.cjs b/pkg/workflow/js/assign_copilot_to_created_issues.cjs new file mode 100644 index 0000000000..e30cc134e3 --- /dev/null +++ b/pkg/workflow/js/assign_copilot_to_created_issues.cjs @@ -0,0 +1,160 @@ +// @ts-check +/// + +const { AGENT_LOGIN_NAMES, findAgent, getIssueDetails, assignAgentToIssue, generatePermissionErrorSummary } = require("./assign_agent_helpers.cjs"); + +/** + * Assign copilot to issues created by create_issue job. + * This script reads the issues_to_assign_copilot output and assigns copilot to each issue. + * It uses the agent token (GH_AW_AGENT_TOKEN) for the GraphQL mutation. + */ + +async function main() { + // Get the issues to assign from step output + const issuesToAssignStr = "${{ steps.create_issue.outputs.issues_to_assign_copilot }}"; + + if (!issuesToAssignStr || issuesToAssignStr.trim() === "") { + core.info("No issues to assign copilot to"); + return; + } + + core.info(`Issues to assign copilot: ${issuesToAssignStr}`); + + // Parse the comma-separated list of repo:number entries + const issueEntries = issuesToAssignStr.split(",").filter(entry => entry.trim() !== ""); + if (issueEntries.length === 0) { + core.info("No valid issue entries found"); + return; + } + + core.info(`Processing ${issueEntries.length} issue(s) for copilot assignment`); + + const agentName = "copilot"; + const results = []; + let agentId = null; + + for (const entry of issueEntries) { + // Parse repo:number format + const parts = entry.split(":"); + if (parts.length !== 2) { + core.warning(`Invalid issue entry format: ${entry}. Expected 'owner/repo:number'`); + continue; + } + + const repoSlug = parts[0]; + const issueNumber = parseInt(parts[1], 10); + + if (isNaN(issueNumber) || issueNumber <= 0) { + core.warning(`Invalid issue number in entry: ${entry}`); + continue; + } + + // Parse owner/repo from repo slug + const repoParts = repoSlug.split("/"); + if (repoParts.length !== 2) { + core.warning(`Invalid repo format: ${repoSlug}. Expected 'owner/repo'`); + continue; + } + + const owner = repoParts[0]; + const repo = repoParts[1]; + + try { + // Find agent (reuse cached ID for same repo) + if (!agentId) { + core.info(`Looking for ${agentName} coding agent...`); + agentId = await findAgent(owner, repo, agentName); + if (!agentId) { + throw new Error(`${agentName} coding agent is not available for this repository`); + } + core.info(`Found ${agentName} coding agent (ID: ${agentId})`); + } + + // Get issue details + core.info(`Getting details for issue #${issueNumber} in ${repoSlug}...`); + const issueDetails = await getIssueDetails(owner, repo, issueNumber); + if (!issueDetails) { + throw new Error("Failed to get issue details"); + } + + core.info(`Issue ID: ${issueDetails.issueId}`); + + // Check if agent is already assigned + if (issueDetails.currentAssignees.includes(agentId)) { + core.info(`${agentName} is already assigned to issue #${issueNumber}`); + results.push({ + repo: repoSlug, + issue_number: issueNumber, + success: true, + already_assigned: true, + }); + continue; + } + + // Assign agent using GraphQL mutation + core.info(`Assigning ${agentName} coding agent to issue #${issueNumber}...`); + const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName); + + if (!success) { + throw new Error(`Failed to assign ${agentName} via GraphQL`); + } + + core.info(`Successfully assigned ${agentName} coding agent to issue #${issueNumber}`); + results.push({ + repo: repoSlug, + issue_number: issueNumber, + success: true, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to assign ${agentName} to issue #${issueNumber} in ${repoSlug}: ${errorMessage}`); + results.push({ + repo: repoSlug, + issue_number: issueNumber, + success: false, + error: errorMessage, + }); + } + } + + // Generate step summary + const successCount = results.filter(r => r.success).length; + const failureCount = results.filter(r => !r.success).length; + + let summaryContent = "## Copilot Assignment for Created Issues\n\n"; + + if (successCount > 0) { + summaryContent += `✅ Successfully assigned copilot to ${successCount} issue(s):\n\n`; + for (const result of results.filter(r => r.success)) { + const note = result.already_assigned ? " (already assigned)" : ""; + summaryContent += `- ${result.repo}#${result.issue_number}${note}\n`; + } + summaryContent += "\n"; + } + + if (failureCount > 0) { + summaryContent += `❌ Failed to assign copilot to ${failureCount} issue(s):\n\n`; + for (const result of results.filter(r => !r.success)) { + summaryContent += `- ${result.repo}#${result.issue_number}: ${result.error}\n`; + } + + // Check if any failures were permission-related + const hasPermissionError = results.some(r => !r.success && r.error && (r.error.includes("Resource not accessible") || r.error.includes("Insufficient permissions"))); + + if (hasPermissionError) { + summaryContent += generatePermissionErrorSummary(); + } + } + + await core.summary.addRaw(summaryContent).write(); + + // Fail if any assignments failed + if (failureCount > 0) { + core.setFailed(`Failed to assign copilot to ${failureCount} issue(s)`); + } +} + +// Export for use with require() +if (typeof module !== "undefined" && module.exports) { + module.exports = { main }; +} diff --git a/pkg/workflow/js/assign_issue.cjs b/pkg/workflow/js/assign_issue.cjs new file mode 100644 index 0000000000..0e5c0236fd --- /dev/null +++ b/pkg/workflow/js/assign_issue.cjs @@ -0,0 +1,105 @@ +// @ts-check +/// + +const { getAgentName, getIssueDetails, findAgent, assignAgentToIssue } = require("./assign_agent_helpers.cjs"); + +/** + * Assign an issue to a user or bot (including copilot) + * This script handles assigning issues after they are created + */ + +async function main() { + // Validate required environment variables + const ghToken = process.env.GH_TOKEN; + const assignee = process.env.ASSIGNEE; + const issueNumber = process.env.ISSUE_NUMBER; + + // Check if GH_TOKEN is present + if (!ghToken || ghToken.trim() === "") { + const docsUrl = "https://githubnext.github.io/gh-aw/reference/safe-outputs/#assigning-issues-to-copilot"; + core.setFailed(`GH_TOKEN environment variable is required but not set. ` + `This token is needed to assign issues. ` + `For more information on configuring Copilot tokens, see: ${docsUrl}`); + return; + } + + // Validate assignee + if (!assignee || assignee.trim() === "") { + core.setFailed("ASSIGNEE environment variable is required but not set"); + return; + } + + // Validate issue number + if (!issueNumber || issueNumber.trim() === "") { + core.setFailed("ISSUE_NUMBER environment variable is required but not set"); + return; + } + + const trimmedAssignee = assignee.trim(); + const trimmedIssueNumber = issueNumber.trim(); + const issueNum = parseInt(trimmedIssueNumber, 10); + + core.info(`Assigning issue #${trimmedIssueNumber} to ${trimmedAssignee}`); + + try { + // Check if the assignee is a known coding agent (e.g., copilot, @copilot) + const agentName = getAgentName(trimmedAssignee); + + if (agentName) { + // Use GraphQL API for agent assignment + // The token is set at the step level via github-token parameter + core.info(`Detected coding agent: ${agentName}. Using GraphQL API for assignment.`); + + // Get repository owner and repo from context + const owner = context.repo.owner; + const repo = context.repo.repo; + + // Find the agent in the repository + const agentId = await findAgent(owner, repo, agentName); + if (!agentId) { + throw new Error(`${agentName} coding agent is not available for this repository`); + } + core.info(`Found ${agentName} coding agent (ID: ${agentId})`); + + // Get issue details + const issueDetails = await getIssueDetails(owner, repo, issueNum); + if (!issueDetails) { + throw new Error("Failed to get issue details"); + } + + // Check if agent is already assigned + if (issueDetails.currentAssignees.includes(agentId)) { + core.info(`${agentName} is already assigned to issue #${trimmedIssueNumber}`); + } else { + // Assign agent using GraphQL mutation - uses built-in github object authenticated via github-token + const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName); + + if (!success) { + throw new Error(`Failed to assign ${agentName} via GraphQL`); + } + } + } else { + // Use gh CLI for regular user assignment + await exec.exec("gh", ["issue", "edit", trimmedIssueNumber, "--add-assignee", trimmedAssignee], { + env: { ...process.env, GH_TOKEN: ghToken }, + }); + } + + core.info(`✅ Successfully assigned issue #${trimmedIssueNumber} to ${trimmedAssignee}`); + + // Write summary + await core.summary + .addRaw( + ` +## Issue Assignment + +Successfully assigned issue #${trimmedIssueNumber} to \`${trimmedAssignee}\`. +` + ) + .write(); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to assign issue: ${errorMessage}`); + core.setFailed(`Failed to assign issue #${trimmedIssueNumber} to ${trimmedAssignee}: ${errorMessage}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/assign_milestone.cjs b/pkg/workflow/js/assign_milestone.cjs new file mode 100644 index 0000000000..0b9c88cba6 --- /dev/null +++ b/pkg/workflow/js/assign_milestone.cjs @@ -0,0 +1,169 @@ +// @ts-check +/// + +const { processSafeOutput } = require("./safe_output_processor.cjs"); + +async function main() { + // Use shared processor for common steps + const result = await processSafeOutput( + { + itemType: "assign_milestone", + configKey: "assign_milestone", + displayName: "Milestone", + itemTypeName: "milestone assignment", + supportsPR: true, + supportsIssue: true, + findMultiple: true, // This processor finds multiple items + envVars: { + allowed: "GH_AW_MILESTONE_ALLOWED", + maxCount: "GH_AW_MILESTONE_MAX_COUNT", + target: "GH_AW_MILESTONE_TARGET", + }, + }, + { + title: "Assign Milestone", + description: "The following milestone assignments would be made if staged mode was disabled:", + renderItem: item => { + let content = `**Issue:** #${item.issue_number}\n`; + content += `**Milestone Number:** ${item.milestone_number}\n\n`; + return content; + }, + } + ); + + if (!result.success) { + return; + } + + // @ts-ignore - TypeScript doesn't narrow properly after success check + const { items: milestoneItems, config } = result; + if (!config || !milestoneItems) { + core.setFailed("Internal error: config or milestoneItems is undefined"); + return; + } + const { allowed: allowedMilestones, maxCount } = config; + + // Limit items to max count + const itemsToProcess = milestoneItems.slice(0, maxCount); + if (milestoneItems.length > maxCount) { + core.warning(`Found ${milestoneItems.length} milestone assignments, but max is ${maxCount}. Processing first ${maxCount}.`); + } + + // Fetch all milestones to validate against allowed list + let allMilestones = []; + if (allowedMilestones) { + try { + const milestonesResponse = await github.rest.issues.listMilestones({ + owner: context.repo.owner, + repo: context.repo.repo, + state: "all", + per_page: 100, + }); + allMilestones = milestonesResponse.data; + core.info(`Fetched ${allMilestones.length} milestones from repository`); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to fetch milestones: ${errorMessage}`); + core.setFailed(`Failed to fetch milestones for validation: ${errorMessage}`); + return; + } + } + + // Process each milestone assignment + const results = []; + for (const item of itemsToProcess) { + const issueNumber = typeof item.issue_number === "number" ? item.issue_number : parseInt(String(item.issue_number), 10); + const milestoneNumber = typeof item.milestone_number === "number" ? item.milestone_number : parseInt(String(item.milestone_number), 10); + + if (isNaN(issueNumber) || issueNumber <= 0) { + core.error(`Invalid issue_number: ${item.issue_number}`); + continue; + } + + if (isNaN(milestoneNumber) || milestoneNumber <= 0) { + core.error(`Invalid milestone_number: ${item.milestone_number}`); + continue; + } + + // Validate against allowed list if configured + if (allowedMilestones && allowedMilestones.length > 0) { + const milestone = allMilestones.find(m => m.number === milestoneNumber); + + if (!milestone) { + core.warning(`Milestone #${milestoneNumber} not found in repository. Skipping.`); + continue; + } + + // Check if milestone title or number (as string) is in allowed list + const isAllowed = allowedMilestones.includes(milestone.title) || allowedMilestones.includes(String(milestoneNumber)); + + if (!isAllowed) { + core.warning(`Milestone "${milestone.title}" (#${milestoneNumber}) is not in the allowed list. Skipping.`); + continue; + } + } + + // Assign the milestone to the issue + try { + await github.rest.issues.update({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + milestone: milestoneNumber, + }); + + core.info(`Successfully assigned milestone #${milestoneNumber} to issue #${issueNumber}`); + results.push({ + issue_number: issueNumber, + milestone_number: milestoneNumber, + success: true, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to assign milestone #${milestoneNumber} to issue #${issueNumber}: ${errorMessage}`); + results.push({ + issue_number: issueNumber, + milestone_number: milestoneNumber, + success: false, + error: errorMessage, + }); + } + } + + // Generate step summary + const successCount = results.filter(r => r.success).length; + const failureCount = results.filter(r => !r.success).length; + + let summaryContent = "## Milestone Assignment\n\n"; + + if (successCount > 0) { + summaryContent += `✅ Successfully assigned ${successCount} milestone(s):\n\n`; + for (const result of results.filter(r => r.success)) { + summaryContent += `- Issue #${result.issue_number} → Milestone #${result.milestone_number}\n`; + } + summaryContent += "\n"; + } + + if (failureCount > 0) { + summaryContent += `❌ Failed to assign ${failureCount} milestone(s):\n\n`; + for (const result of results.filter(r => !r.success)) { + summaryContent += `- Issue #${result.issue_number} → Milestone #${result.milestone_number}: ${result.error}\n`; + } + } + + await core.summary.addRaw(summaryContent).write(); + + // Set outputs + const assignedMilestones = results + .filter(r => r.success) + .map(r => `${r.issue_number}:${r.milestone_number}`) + .join("\n"); + core.setOutput("assigned_milestones", assignedMilestones); + + // Fail if any assignments failed + if (failureCount > 0) { + core.setFailed(`Failed to assign ${failureCount} milestone(s)`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/assign_to_agent.cjs b/pkg/workflow/js/assign_to_agent.cjs new file mode 100644 index 0000000000..93ced2cef1 --- /dev/null +++ b/pkg/workflow/js/assign_to_agent.cjs @@ -0,0 +1,216 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { AGENT_LOGIN_NAMES, getAvailableAgentLogins, findAgent, getIssueDetails, assignAgentToIssue, generatePermissionErrorSummary } = require("./assign_agent_helpers.cjs"); + +async function main() { + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + const assignItems = result.items.filter(item => item.type === "assign_to_agent"); + if (assignItems.length === 0) { + core.info("No assign_to_agent items found in agent output"); + return; + } + + core.info(`Found ${assignItems.length} assign_to_agent item(s)`); + + // Check if we're in staged mode + if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") { + await generateStagedPreview({ + title: "Assign to Agent", + description: "The following agent assignments would be made if staged mode was disabled:", + items: assignItems, + renderItem: item => { + let content = `**Issue:** #${item.issue_number}\n`; + content += `**Agent:** ${item.agent || "copilot"}\n`; + content += "\n"; + return content; + }, + }); + return; + } + + // Get default agent from configuration + const defaultAgent = process.env.GH_AW_AGENT_DEFAULT?.trim() || "copilot"; + core.info(`Default agent: ${defaultAgent}`); + + // Get max count configuration + const maxCountEnv = process.env.GH_AW_AGENT_MAX_COUNT; + const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 1; + if (isNaN(maxCount) || maxCount < 1) { + core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`); + return; + } + core.info(`Max count: ${maxCount}`); + + // Limit items to max count + const itemsToProcess = assignItems.slice(0, maxCount); + if (assignItems.length > maxCount) { + core.warning(`Found ${assignItems.length} agent assignments, but max is ${maxCount}. Processing first ${maxCount}.`); + } + + // Get target repository configuration + const targetRepoEnv = process.env.GH_AW_TARGET_REPO?.trim(); + let targetOwner = context.repo.owner; + let targetRepo = context.repo.repo; + + if (targetRepoEnv) { + const parts = targetRepoEnv.split("/"); + if (parts.length === 2) { + targetOwner = parts[0]; + targetRepo = parts[1]; + core.info(`Using target repository: ${targetOwner}/${targetRepo}`); + } else { + core.warning(`Invalid target-repo format: ${targetRepoEnv}. Expected owner/repo. Using current repository.`); + } + } + + // The github-token is set at the step level, so the built-in github object is authenticated + // with the correct token (GH_AW_AGENT_TOKEN by default) + + // Cache agent IDs to avoid repeated lookups + const agentCache = {}; + + // Process each agent assignment + const results = []; + for (const item of itemsToProcess) { + const issueNumber = typeof item.issue_number === "number" ? item.issue_number : parseInt(String(item.issue_number), 10); + const agentName = item.agent || defaultAgent; + + if (isNaN(issueNumber) || issueNumber <= 0) { + core.error(`Invalid issue_number: ${item.issue_number}`); + continue; + } + + // Check if agent is supported + if (!AGENT_LOGIN_NAMES[agentName]) { + core.warning(`Agent "${agentName}" is not supported. Supported agents: ${Object.keys(AGENT_LOGIN_NAMES).join(", ")}`); + results.push({ + issue_number: issueNumber, + agent: agentName, + success: false, + error: `Unsupported agent: ${agentName}`, + }); + continue; + } + + // Assign the agent to the issue using GraphQL + try { + // Find agent (use cache if available) - uses built-in github object authenticated via github-token + let agentId = agentCache[agentName]; + if (!agentId) { + core.info(`Looking for ${agentName} coding agent...`); + agentId = await findAgent(targetOwner, targetRepo, agentName); + if (!agentId) { + throw new Error(`${agentName} coding agent is not available for this repository`); + } + agentCache[agentName] = agentId; + core.info(`Found ${agentName} coding agent (ID: ${agentId})`); + } + + // Get issue details (ID and current assignees) via GraphQL + core.info("Getting issue details..."); + const issueDetails = await getIssueDetails(targetOwner, targetRepo, issueNumber); + if (!issueDetails) { + throw new Error("Failed to get issue details"); + } + + core.info(`Issue ID: ${issueDetails.issueId}`); + + // Check if agent is already assigned + if (issueDetails.currentAssignees.includes(agentId)) { + core.info(`${agentName} is already assigned to issue #${issueNumber}`); + results.push({ + issue_number: issueNumber, + agent: agentName, + success: true, + }); + continue; + } + + // Assign agent using GraphQL mutation - uses built-in github object authenticated via github-token + core.info(`Assigning ${agentName} coding agent to issue #${issueNumber}...`); + const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName); + + if (!success) { + throw new Error(`Failed to assign ${agentName} via GraphQL`); + } + + core.info(`Successfully assigned ${agentName} coding agent to issue #${issueNumber}`); + results.push({ + issue_number: issueNumber, + agent: agentName, + success: true, + }); + } catch (error) { + let errorMessage = error instanceof Error ? error.message : String(error); + if (errorMessage.includes("coding agent is not available for this repository")) { + // Enrich with available agent logins to aid troubleshooting - uses built-in github object + try { + const available = await getAvailableAgentLogins(targetOwner, targetRepo); + if (available.length > 0) { + errorMessage += ` (available agents: ${available.join(", ")})`; + } + } catch (e) { + core.debug("Failed to enrich unavailable agent message with available list"); + } + } + core.error(`Failed to assign agent "${agentName}" to issue #${issueNumber}: ${errorMessage}`); + results.push({ + issue_number: issueNumber, + agent: agentName, + success: false, + error: errorMessage, + }); + } + } + + // Generate step summary + const successCount = results.filter(r => r.success).length; + const failureCount = results.filter(r => !r.success).length; + + let summaryContent = "## Agent Assignment\n\n"; + + if (successCount > 0) { + summaryContent += `✅ Successfully assigned ${successCount} agent(s):\n\n`; + for (const result of results.filter(r => r.success)) { + summaryContent += `- Issue #${result.issue_number} → Agent: ${result.agent}\n`; + } + summaryContent += "\n"; + } + + if (failureCount > 0) { + summaryContent += `❌ Failed to assign ${failureCount} agent(s):\n\n`; + for (const result of results.filter(r => !r.success)) { + summaryContent += `- Issue #${result.issue_number} → Agent: ${result.agent}: ${result.error}\n`; + } + + // Check if any failures were permission-related + const hasPermissionError = results.some(r => !r.success && r.error && (r.error.includes("Resource not accessible") || r.error.includes("Insufficient permissions"))); + + if (hasPermissionError) { + summaryContent += generatePermissionErrorSummary(); + } + } + + await core.summary.addRaw(summaryContent).write(); + + // Set outputs + const assignedAgents = results + .filter(r => r.success) + .map(r => `${r.issue_number}:${r.agent}`) + .join("\n"); + core.setOutput("assigned_agents", assignedAgents); + + // Fail if any assignments failed + if (failureCount > 0) { + core.setFailed(`Failed to assign ${failureCount} agent(s)`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/assign_to_user.cjs b/pkg/workflow/js/assign_to_user.cjs new file mode 100644 index 0000000000..ed21318148 --- /dev/null +++ b/pkg/workflow/js/assign_to_user.cjs @@ -0,0 +1,131 @@ +// @ts-check +/// + +const { processSafeOutput, processItems } = require("./safe_output_processor.cjs"); + +async function main() { + // Use shared processor for common steps + const result = await processSafeOutput( + { + itemType: "assign_to_user", + configKey: "assign_to_user", + displayName: "Assignees", + itemTypeName: "user assignment", + supportsPR: false, // Issue-only: not relevant for PRs + supportsIssue: true, + envVars: { + allowed: "GH_AW_ASSIGNEES_ALLOWED", + maxCount: "GH_AW_ASSIGNEES_MAX_COUNT", + target: "GH_AW_ASSIGNEES_TARGET", + }, + }, + { + title: "Assign to User", + description: "The following user assignments would be made if staged mode was disabled:", + renderItem: item => { + let content = ""; + if (item.issue_number) { + content += `**Target Issue:** #${item.issue_number}\n\n`; + } else { + content += `**Target:** Current issue\n\n`; + } + if (item.assignees && item.assignees.length > 0) { + content += `**Users to assign:** ${item.assignees.join(", ")}\n\n`; + } else if (item.assignee) { + content += `**User to assign:** ${item.assignee}\n\n`; + } + return content; + }, + } + ); + + if (!result.success) { + return; + } + + // @ts-ignore - TypeScript doesn't narrow properly after success check + const { item: assignItem, config, targetResult } = result; + if (!config || !targetResult || targetResult.number === undefined) { + core.setFailed("Internal error: config, targetResult, or targetResult.number is undefined"); + return; + } + const { allowed: allowedAssignees, maxCount } = config; + const issueNumber = targetResult.number; + + // Support both singular "assignee" and plural "assignees" for flexibility + let requestedAssignees = []; + if (assignItem.assignees && Array.isArray(assignItem.assignees)) { + requestedAssignees = assignItem.assignees; + } else if (assignItem.assignee) { + requestedAssignees = [assignItem.assignee]; + } + + core.info(`Requested assignees: ${JSON.stringify(requestedAssignees)}`); + + // Use shared helper to filter, sanitize, dedupe, and limit + const uniqueAssignees = processItems(requestedAssignees, allowedAssignees, maxCount); + + if (uniqueAssignees.length === 0) { + core.info("No assignees to add"); + core.setOutput("assigned_users", ""); + await core.summary + .addRaw( + ` +## User Assignment + +No users were assigned (no valid assignees found in agent output). +` + ) + .write(); + return; + } + + core.info(`Assigning ${uniqueAssignees.length} users to issue #${issueNumber}: ${JSON.stringify(uniqueAssignees)}`); + + try { + // Get target repository from environment or use current + const targetRepoEnv = process.env.GH_AW_TARGET_REPO_SLUG?.trim(); + let targetOwner = context.repo.owner; + let targetRepo = context.repo.repo; + + if (targetRepoEnv) { + const parts = targetRepoEnv.split("/"); + if (parts.length === 2) { + targetOwner = parts[0]; + targetRepo = parts[1]; + core.info(`Using target repository: ${targetOwner}/${targetRepo}`); + } + } + + // Add assignees to the issue + await github.rest.issues.addAssignees({ + owner: targetOwner, + repo: targetRepo, + issue_number: issueNumber, + assignees: uniqueAssignees, + }); + + core.info(`Successfully assigned ${uniqueAssignees.length} user(s) to issue #${issueNumber}`); + + core.setOutput("assigned_users", uniqueAssignees.join("\n")); + + const assigneesListMarkdown = uniqueAssignees.map(assignee => `- \`${assignee}\``).join("\n"); + await core.summary + .addRaw( + ` +## User Assignment + +Successfully assigned ${uniqueAssignees.length} user(s) to issue #${issueNumber}: + +${assigneesListMarkdown} +` + ) + .write(); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to assign users: ${errorMessage}`); + core.setFailed(`Failed to assign users: ${errorMessage}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/check_command_position.cjs b/pkg/workflow/js/check_command_position.cjs new file mode 100644 index 0000000000..d0df669e00 --- /dev/null +++ b/pkg/workflow/js/check_command_position.cjs @@ -0,0 +1,69 @@ +// @ts-check +/// + +/** + * Check if command is the first word in the triggering text + * This prevents accidental command triggers from words appearing later in content + */ +async function main() { + const command = process.env.GH_AW_COMMAND; + + if (!command) { + core.setFailed("Configuration error: GH_AW_COMMAND not specified."); + return; + } + + // Get the triggering text based on event type + let text = ""; + const eventName = context.eventName; + + try { + if (eventName === "issues") { + text = context.payload.issue?.body || ""; + } else if (eventName === "pull_request") { + text = context.payload.pull_request?.body || ""; + } else if (eventName === "issue_comment") { + text = context.payload.comment?.body || ""; + } else if (eventName === "pull_request_review_comment") { + text = context.payload.comment?.body || ""; + } else if (eventName === "discussion") { + text = context.payload.discussion?.body || ""; + } else if (eventName === "discussion_comment") { + text = context.payload.comment?.body || ""; + } else { + // For non-comment events, pass the check + core.info(`Event ${eventName} does not require command position check`); + core.setOutput("command_position_ok", "true"); + return; + } + + // Expected command format: /command + const expectedCommand = `/${command}`; + + // If text is empty or doesn't contain the command at all, pass the check + if (!text || !text.includes(expectedCommand)) { + core.info(`No command '${expectedCommand}' found in text, passing check`); + core.setOutput("command_position_ok", "true"); + return; + } + + // Normalize whitespace and get the first word + const trimmedText = text.trim(); + const firstWord = trimmedText.split(/\s+/)[0]; + + core.info(`Checking command position for: ${expectedCommand}`); + core.info(`First word in text: ${firstWord}`); + + if (firstWord === expectedCommand) { + core.info(`✓ Command '${expectedCommand}' is at the start of the text`); + core.setOutput("command_position_ok", "true"); + } else { + core.warning(`⚠️ Command '${expectedCommand}' is not the first word (found: '${firstWord}'). Workflow will be skipped.`); + core.setOutput("command_position_ok", "false"); + } + } catch (error) { + core.setFailed(error instanceof Error ? error.message : String(error)); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/check_membership.cjs b/pkg/workflow/js/check_membership.cjs new file mode 100644 index 0000000000..d5f6c8a5c9 --- /dev/null +++ b/pkg/workflow/js/check_membership.cjs @@ -0,0 +1,100 @@ +// @ts-check +/// + +const { parseRequiredPermissions, parseAllowedBots, checkRepositoryPermission, checkBotStatus } = require("./check_permissions_utils.cjs"); + +async function main() { + const { eventName } = context; + const actor = context.actor; + const { owner, repo } = context.repo; + const requiredPermissions = parseRequiredPermissions(); + const allowedBots = parseAllowedBots(); + + // For workflow_dispatch, only skip check if "write" is in the allowed roles + // since workflow_dispatch can be triggered by users with write access + if (eventName === "workflow_dispatch") { + const hasWriteRole = requiredPermissions.includes("write"); + if (hasWriteRole) { + core.info(`✅ Event ${eventName} does not require validation (write role allowed)`); + core.setOutput("is_team_member", "true"); + core.setOutput("result", "safe_event"); + return; + } + // If write is not allowed, continue with permission check + core.info(`Event ${eventName} requires validation (write role not allowed)`); + } + + // skip check for other safe events + // workflow_run is intentionally excluded due to HIGH security risks: + // - Privilege escalation (inherits permissions from triggering workflow) + // - Branch protection bypass (can execute on protected branches) + // - Secret exposure (secrets available from untrusted code) + const safeEvents = ["schedule"]; + if (safeEvents.includes(eventName)) { + core.info(`✅ Event ${eventName} does not require validation`); + core.setOutput("is_team_member", "true"); + core.setOutput("result", "safe_event"); + return; + } + + if (!requiredPermissions || requiredPermissions.length === 0) { + core.warning("❌ Configuration error: Required permissions not specified. Contact repository administrator."); + core.setOutput("is_team_member", "false"); + core.setOutput("result", "config_error"); + core.setOutput("error_message", "Configuration error: Required permissions not specified"); + return; + } + + // Check if the actor has the required repository permissions + const result = await checkRepositoryPermission(actor, owner, repo, requiredPermissions); + + if (result.error) { + core.setOutput("is_team_member", "false"); + core.setOutput("result", "api_error"); + core.setOutput("error_message", `Repository permission check failed: ${result.error}`); + return; + } + + if (result.authorized) { + core.setOutput("is_team_member", "true"); + core.setOutput("result", "authorized"); + core.setOutput("user_permission", result.permission); + } else { + // User doesn't have required permissions, check if they're an allowed bot + if (allowedBots && allowedBots.length > 0) { + core.info(`Checking if actor '${actor}' is in allowed bots list: ${allowedBots.join(", ")}`); + + if (allowedBots.includes(actor)) { + core.info(`Actor '${actor}' is in the allowed bots list`); + + // Verify the bot is active/installed on the repository + const botStatus = await checkBotStatus(actor, owner, repo); + + if (botStatus.isBot && botStatus.isActive) { + core.info(`✅ Bot '${actor}' is active on the repository and authorized`); + core.setOutput("is_team_member", "true"); + core.setOutput("result", "authorized_bot"); + core.setOutput("user_permission", "bot"); + return; + } else if (botStatus.isBot && !botStatus.isActive) { + core.warning(`Bot '${actor}' is in the allowed list but not active/installed on ${owner}/${repo}`); + core.setOutput("is_team_member", "false"); + core.setOutput("result", "bot_not_active"); + core.setOutput("user_permission", result.permission); + core.setOutput("error_message", `Access denied: Bot '${actor}' is not active/installed on this repository`); + return; + } else { + core.info(`Actor '${actor}' is in allowed bots list but bot status check failed`); + } + } + } + + // Not authorized by role or bot + core.setOutput("is_team_member", "false"); + core.setOutput("result", "insufficient_permissions"); + core.setOutput("user_permission", result.permission); + core.setOutput("error_message", `Access denied: User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/check_permissions_utils.cjs b/pkg/workflow/js/check_permissions_utils.cjs new file mode 100644 index 0000000000..21c1fea2c4 --- /dev/null +++ b/pkg/workflow/js/check_permissions_utils.cjs @@ -0,0 +1,118 @@ +// @ts-check +/// + +/** + * Shared utility for repository permission validation + * Used by both check_permissions.cjs and check_membership.cjs + */ + +/** + * Parse required permissions from environment variable + * @returns {string[]} Array of required permission levels + */ +function parseRequiredPermissions() { + const requiredPermissionsEnv = process.env.GH_AW_REQUIRED_ROLES; + return requiredPermissionsEnv ? requiredPermissionsEnv.split(",").filter(p => p.trim() !== "") : []; +} + +/** + * Parse allowed bot identifiers from environment variable + * @returns {string[]} Array of allowed bot identifiers + */ +function parseAllowedBots() { + const allowedBotsEnv = process.env.GH_AW_ALLOWED_BOTS; + return allowedBotsEnv ? allowedBotsEnv.split(",").filter(b => b.trim() !== "") : []; +} + +/** + * Check if the actor is a bot and if it's active on the repository + * @param {string} actor - GitHub username to check + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @returns {Promise<{isBot: boolean, isActive: boolean, error?: string}>} + */ +async function checkBotStatus(actor, owner, repo) { + try { + // Check if the actor looks like a bot (ends with [bot]) + const isBot = actor.endsWith("[bot]"); + + if (!isBot) { + return { isBot: false, isActive: false }; + } + + core.info(`Checking if bot '${actor}' is active on ${owner}/${repo}`); + + // Try to get the bot's permission level to verify it's installed/active on the repo + // GitHub Apps/bots that are installed on a repository show up in the collaborators + try { + const botPermission = await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); + + core.info(`Bot '${actor}' is active with permission level: ${botPermission.data.permission}`); + return { isBot: true, isActive: true }; + } catch (botError) { + // If we get a 404, the bot is not installed/active on this repository + if (typeof botError === "object" && botError !== null && "status" in botError && botError.status === 404) { + core.warning(`Bot '${actor}' is not active/installed on ${owner}/${repo}`); + return { isBot: true, isActive: false }; + } + // For other errors, we'll treat as inactive to be safe + const errorMessage = botError instanceof Error ? botError.message : String(botError); + core.warning(`Failed to check bot status: ${errorMessage}`); + return { isBot: true, isActive: false, error: errorMessage }; + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.warning(`Error checking bot status: ${errorMessage}`); + return { isBot: false, isActive: false, error: errorMessage }; + } +} + +/** + * Check if user has required repository permissions + * @param {string} actor - GitHub username to check + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {string[]} requiredPermissions - Array of required permission levels + * @returns {Promise<{authorized: boolean, permission?: string, error?: string}>} + */ +async function checkRepositoryPermission(actor, owner, repo, requiredPermissions) { + try { + core.info(`Checking if user '${actor}' has required permissions for ${owner}/${repo}`); + core.info(`Required permissions: ${requiredPermissions.join(", ")}`); + + const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); + + const permission = repoPermission.data.permission; + core.info(`Repository permission level: ${permission}`); + + // Check if user has one of the required permission levels + for (const requiredPerm of requiredPermissions) { + if (permission === requiredPerm || (requiredPerm === "maintainer" && permission === "maintain")) { + core.info(`✅ User has ${permission} access to repository`); + return { authorized: true, permission: permission }; + } + } + + core.warning(`User permission '${permission}' does not meet requirements: ${requiredPermissions.join(", ")}`); + return { authorized: false, permission: permission }; + } catch (repoError) { + const errorMessage = repoError instanceof Error ? repoError.message : String(repoError); + core.warning(`Repository permission check failed: ${errorMessage}`); + return { authorized: false, error: errorMessage }; + } +} + +module.exports = { + parseRequiredPermissions, + parseAllowedBots, + checkRepositoryPermission, + checkBotStatus, +}; diff --git a/pkg/workflow/js/check_skip_if_match.cjs b/pkg/workflow/js/check_skip_if_match.cjs new file mode 100644 index 0000000000..fa8525b928 --- /dev/null +++ b/pkg/workflow/js/check_skip_if_match.cjs @@ -0,0 +1,61 @@ +// @ts-check +/// + +async function main() { + const skipQuery = process.env.GH_AW_SKIP_QUERY; + const workflowName = process.env.GH_AW_WORKFLOW_NAME; + const maxMatchesStr = process.env.GH_AW_SKIP_MAX_MATCHES || "1"; + + if (!skipQuery) { + core.setFailed("Configuration error: GH_AW_SKIP_QUERY not specified."); + return; + } + + if (!workflowName) { + core.setFailed("Configuration error: GH_AW_WORKFLOW_NAME not specified."); + return; + } + + const maxMatches = parseInt(maxMatchesStr, 10); + if (isNaN(maxMatches) || maxMatches < 1) { + core.setFailed(`Configuration error: GH_AW_SKIP_MAX_MATCHES must be a positive integer, got "${maxMatchesStr}".`); + return; + } + + core.info(`Checking skip-if-match query: ${skipQuery}`); + core.info(`Maximum matches threshold: ${maxMatches}`); + + // Get repository information from context + const { owner, repo } = context.repo; + + // Scope the query to the current repository + const scopedQuery = `${skipQuery} repo:${owner}/${repo}`; + + core.info(`Scoped query: ${scopedQuery}`); + + try { + // Search for issues and pull requests using the GitHub API + // We only need to know if the count reaches the threshold + const response = await github.rest.search.issuesAndPullRequests({ + q: scopedQuery, + per_page: 1, // We only need the count, not the items + }); + + const totalCount = response.data.total_count; + core.info(`Search found ${totalCount} matching items`); + + if (totalCount >= maxMatches) { + core.warning(`🔍 Skip condition matched (${totalCount} items found, threshold: ${maxMatches}). Workflow execution will be prevented by activation job.`); + core.setOutput("skip_check_ok", "false"); + return; + } + + core.info(`✓ Found ${totalCount} matches (below threshold of ${maxMatches}), workflow can proceed`); + core.setOutput("skip_check_ok", "true"); + } catch (error) { + core.setFailed(`Failed to execute search query: ${error instanceof Error ? error.message : String(error)}`); + return; + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/check_stop_time.cjs b/pkg/workflow/js/check_stop_time.cjs new file mode 100644 index 0000000000..4e86da0f8a --- /dev/null +++ b/pkg/workflow/js/check_stop_time.cjs @@ -0,0 +1,41 @@ +// @ts-check +/// + +async function main() { + const stopTime = process.env.GH_AW_STOP_TIME; + const workflowName = process.env.GH_AW_WORKFLOW_NAME; + + if (!stopTime) { + core.setFailed("Configuration error: GH_AW_STOP_TIME not specified."); + return; + } + + if (!workflowName) { + core.setFailed("Configuration error: GH_AW_WORKFLOW_NAME not specified."); + return; + } + + core.info(`Checking stop-time limit: ${stopTime}`); + + // Parse the stop time (format: "YYYY-MM-DD HH:MM:SS") + const stopTimeDate = new Date(stopTime); + + if (isNaN(stopTimeDate.getTime())) { + core.setFailed(`Invalid stop-time format: ${stopTime}. Expected format: YYYY-MM-DD HH:MM:SS`); + return; + } + + const currentTime = new Date(); + core.info(`Current time: ${currentTime.toISOString()}`); + core.info(`Stop time: ${stopTimeDate.toISOString()}`); + + if (currentTime >= stopTimeDate) { + core.warning(`⏰ Stop time reached. Workflow execution will be prevented by activation job.`); + core.setOutput("stop_time_ok", "false"); + return; + } + + core.setOutput("stop_time_ok", "true"); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/check_workflow_timestamp_api.cjs b/pkg/workflow/js/check_workflow_timestamp_api.cjs new file mode 100644 index 0000000000..9b94409b83 --- /dev/null +++ b/pkg/workflow/js/check_workflow_timestamp_api.cjs @@ -0,0 +1,112 @@ +// @ts-check +/// + +/** + * Check workflow file timestamps using GitHub API to detect outdated lock files + * This script compares the last commit time of the source .md file + * with the compiled .lock.yml file and warns if recompilation is needed + */ + +async function main() { + const workflowFile = process.env.GH_AW_WORKFLOW_FILE; + + if (!workflowFile) { + core.setFailed("Configuration error: GH_AW_WORKFLOW_FILE not available."); + return; + } + + // Construct file paths + const workflowBasename = workflowFile.replace(".lock.yml", ""); + const workflowMdPath = `.github/workflows/${workflowBasename}.md`; + const lockFilePath = `.github/workflows/${workflowFile}`; + + core.info(`Checking workflow timestamps using GitHub API:`); + core.info(` Source: ${workflowMdPath}`); + core.info(` Lock file: ${lockFilePath}`); + + const { owner, repo } = context.repo; + const ref = context.sha; + + // Helper function to get the last commit for a file + async function getLastCommitForFile(path) { + try { + const response = await github.rest.repos.listCommits({ + owner, + repo, + path, + per_page: 1, + sha: ref, + }); + + if (response.data && response.data.length > 0) { + const commit = response.data[0]; + return { + sha: commit.sha, + date: commit.commit.committer.date, + message: commit.commit.message, + }; + } + return null; + } catch (error) { + core.info(`Could not fetch commit for ${path}: ${error.message}`); + return null; + } + } + + // Fetch last commits for both files + const workflowCommit = await getLastCommitForFile(workflowMdPath); + const lockCommit = await getLastCommitForFile(lockFilePath); + + // Handle cases where files don't exist + if (!workflowCommit) { + core.info(`Source file does not exist: ${workflowMdPath}`); + } + + if (!lockCommit) { + core.info(`Lock file does not exist: ${lockFilePath}`); + } + + if (!workflowCommit || !lockCommit) { + core.info("Skipping timestamp check - one or both files not found"); + return; + } + + // Parse dates for comparison + const workflowDate = new Date(workflowCommit.date); + const lockDate = new Date(lockCommit.date); + + core.info(` Source last commit: ${workflowDate.toISOString()} (${workflowCommit.sha.substring(0, 7)})`); + core.info(` Lock last commit: ${lockDate.toISOString()} (${lockCommit.sha.substring(0, 7)})`); + + // Check if workflow file is newer than lock file + if (workflowDate > lockDate) { + const warningMessage = `WARNING: Lock file '${lockFilePath}' is outdated! The workflow file '${workflowMdPath}' has been modified more recently. Run 'gh aw compile' to regenerate the lock file.`; + + core.error(warningMessage); + + // Format timestamps and commits for display + const workflowTimestamp = workflowDate.toISOString(); + const lockTimestamp = lockDate.toISOString(); + + // Add summary to GitHub Step Summary + let summary = core.summary + .addRaw("### ⚠️ Workflow Lock File Warning\n\n") + .addRaw("**WARNING**: Lock file is outdated and needs to be regenerated.\n\n") + .addRaw("**Files:**\n") + .addRaw(`- Source: \`${workflowMdPath}\`\n`) + .addRaw(` - Last commit: ${workflowTimestamp}\n`) + .addRaw(` - Commit SHA: [\`${workflowCommit.sha.substring(0, 7)}\`](https://github.com/${owner}/${repo}/commit/${workflowCommit.sha})\n`) + .addRaw(`- Lock: \`${lockFilePath}\`\n`) + .addRaw(` - Last commit: ${lockTimestamp}\n`) + .addRaw(` - Commit SHA: [\`${lockCommit.sha.substring(0, 7)}\`](https://github.com/${owner}/${repo}/commit/${lockCommit.sha})\n\n`) + .addRaw("**Action Required:** Run `gh aw compile` to regenerate the lock file.\n\n"); + + await summary.write(); + } else if (workflowCommit.sha === lockCommit.sha) { + core.info("✅ Lock file is up to date (same commit)"); + } else { + core.info("✅ Lock file is up to date"); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/checkout_pr_branch.cjs b/pkg/workflow/js/checkout_pr_branch.cjs new file mode 100644 index 0000000000..86f65dda23 --- /dev/null +++ b/pkg/workflow/js/checkout_pr_branch.cjs @@ -0,0 +1,45 @@ +// @ts-check +/// + +/** + * Checkout PR branch when PR context is available + * This script handles both pull_request events and comment events on PRs + */ + +async function main() { + const eventName = context.eventName; + const pullRequest = context.payload.pull_request; + + if (!pullRequest) { + core.info("No pull request context available, skipping checkout"); + return; + } + + core.info(`Event: ${eventName}`); + core.info(`Pull Request #${pullRequest.number}`); + + try { + if (eventName === "pull_request") { + // For pull_request events, use the head ref directly + const branchName = pullRequest.head.ref; + core.info(`Checking out PR branch: ${branchName}`); + + await exec.exec("git", ["fetch", "origin", branchName]); + await exec.exec("git", ["checkout", branchName]); + + core.info(`✅ Successfully checked out branch: ${branchName}`); + } else { + // For comment events on PRs, use gh pr checkout with PR number + const prNumber = pullRequest.number; + core.info(`Checking out PR #${prNumber} using gh pr checkout`); + + await exec.exec("gh", ["pr", "checkout", prNumber.toString()]); + + core.info(`✅ Successfully checked out PR #${prNumber}`); + } + } catch (error) { + core.setFailed(`Failed to checkout PR branch: ${error instanceof Error ? error.message : String(error)}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/close_discussion.cjs b/pkg/workflow/js/close_discussion.cjs new file mode 100644 index 0000000000..5db69bea76 --- /dev/null +++ b/pkg/workflow/js/close_discussion.cjs @@ -0,0 +1,316 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateFooter } = require("./generate_footer.cjs"); +const { getTrackerID } = require("./get_tracker_id.cjs"); +const { getRepositoryUrl } = require("./get_repository_url.cjs"); + +/** + * Get discussion details using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} discussionNumber - Discussion number + * @returns {Promise<{id: string, title: string, category: {name: string}, labels: {nodes: Array<{name: string}>}, url: string}>} Discussion details + */ +async function getDiscussionDetails(github, owner, repo, discussionNumber) { + const { repository } = await github.graphql( + ` + query($owner: String!, $repo: String!, $num: Int!) { + repository(owner: $owner, name: $repo) { + discussion(number: $num) { + id + title + category { + name + } + labels(first: 100) { + nodes { + name + } + } + url + } + } + }`, + { owner, repo, num: discussionNumber } + ); + + if (!repository || !repository.discussion) { + throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`); + } + + return repository.discussion; +} + +/** + * Add comment to a GitHub Discussion using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} discussionId - Discussion node ID + * @param {string} message - Comment body + * @returns {Promise<{id: string, url: string}>} Comment details + */ +async function addDiscussionComment(github, discussionId, message) { + const result = await github.graphql( + ` + mutation($dId: ID!, $body: String!) { + addDiscussionComment(input: { discussionId: $dId, body: $body }) { + comment { + id + url + } + } + }`, + { dId: discussionId, body: message } + ); + + return result.addDiscussionComment.comment; +} + +/** + * Close a GitHub Discussion using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} discussionId - Discussion node ID + * @param {string|undefined} reason - Optional close reason (RESOLVED, DUPLICATE, OUTDATED, or ANSWERED) + * @returns {Promise<{id: string, url: string}>} Discussion details + */ +async function closeDiscussion(github, discussionId, reason) { + const mutation = reason + ? ` + mutation($dId: ID!, $reason: DiscussionCloseReason!) { + closeDiscussion(input: { discussionId: $dId, reason: $reason }) { + discussion { + id + url + } + } + }` + : ` + mutation($dId: ID!) { + closeDiscussion(input: { discussionId: $dId }) { + discussion { + id + url + } + } + }`; + + const variables = reason ? { dId: discussionId, reason } : { dId: discussionId }; + const result = await github.graphql(mutation, variables); + + return result.closeDiscussion.discussion; +} + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all close-discussion items + const closeDiscussionItems = result.items.filter(/** @param {any} item */ item => item.type === "close_discussion"); + if (closeDiscussionItems.length === 0) { + core.info("No close-discussion items found in agent output"); + return; + } + + core.info(`Found ${closeDiscussionItems.length} close-discussion item(s)`); + + // Get configuration from environment + const requiredLabels = process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_LABELS ? process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_LABELS.split(",").map(l => l.trim()) : []; + const requiredTitlePrefix = process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_TITLE_PREFIX || ""; + const requiredCategory = process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_CATEGORY || ""; + const target = process.env.GH_AW_CLOSE_DISCUSSION_TARGET || "triggering"; + + core.info(`Configuration: requiredLabels=${requiredLabels.join(",")}, requiredTitlePrefix=${requiredTitlePrefix}, requiredCategory=${requiredCategory}, target=${target}`); + + // Check if we're in a discussion context + const isDiscussionContext = context.eventName === "discussion" || context.eventName === "discussion_comment"; + + // If in staged mode, emit step summary instead of closing discussions + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Close Discussions Preview\n\n"; + summaryContent += "The following discussions would be closed if staged mode was disabled:\n\n"; + + for (let i = 0; i < closeDiscussionItems.length; i++) { + const item = closeDiscussionItems[i]; + summaryContent += `### Discussion ${i + 1}\n`; + + const discussionNumber = item.discussion_number; + if (discussionNumber) { + const repoUrl = getRepositoryUrl(); + const discussionUrl = `${repoUrl}/discussions/${discussionNumber}`; + summaryContent += `**Target Discussion:** [#${discussionNumber}](${discussionUrl})\n\n`; + } else { + summaryContent += `**Target:** Current discussion\n\n`; + } + + if (item.reason) { + summaryContent += `**Reason:** ${item.reason}\n\n`; + } + + summaryContent += `**Comment:**\n${item.body || "No content provided"}\n\n`; + + if (requiredLabels.length > 0) { + summaryContent += `**Required Labels:** ${requiredLabels.join(", ")}\n\n`; + } + if (requiredTitlePrefix) { + summaryContent += `**Required Title Prefix:** ${requiredTitlePrefix}\n\n`; + } + if (requiredCategory) { + summaryContent += `**Required Category:** ${requiredCategory}\n\n`; + } + + summaryContent += "---\n\n"; + } + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Discussion close preview written to step summary"); + return; + } + + // Validate context based on target configuration + if (target === "triggering" && !isDiscussionContext) { + core.info('Target is "triggering" but not running in discussion context, skipping discussion close'); + return; + } + + // Extract triggering context for footer generation + const triggeringDiscussionNumber = context.payload?.discussion?.number; + + const closedDiscussions = []; + + // Process each close-discussion item + for (let i = 0; i < closeDiscussionItems.length; i++) { + const item = closeDiscussionItems[i]; + core.info(`Processing close-discussion item ${i + 1}/${closeDiscussionItems.length}: bodyLength=${item.body.length}`); + + // Determine the discussion number + let discussionNumber; + + if (target === "*") { + // For target "*", we need an explicit number from the item + const targetNumber = item.discussion_number; + if (targetNumber) { + discussionNumber = parseInt(targetNumber, 10); + if (isNaN(discussionNumber) || discussionNumber <= 0) { + core.info(`Invalid discussion number specified: ${targetNumber}`); + continue; + } + } else { + core.info(`Target is "*" but no discussion_number specified in close-discussion item`); + continue; + } + } else if (target && target !== "triggering") { + // Explicit number specified in target configuration + discussionNumber = parseInt(target, 10); + if (isNaN(discussionNumber) || discussionNumber <= 0) { + core.info(`Invalid discussion number in target configuration: ${target}`); + continue; + } + } else { + // Default behavior: use triggering discussion + if (isDiscussionContext) { + discussionNumber = context.payload.discussion?.number; + if (!discussionNumber) { + core.info("Discussion context detected but no discussion found in payload"); + continue; + } + } else { + core.info("Not in discussion context and no explicit target specified"); + continue; + } + } + + try { + // Fetch discussion details to check filters + const discussion = await getDiscussionDetails(github, context.repo.owner, context.repo.repo, discussionNumber); + + // Apply label filter + if (requiredLabels.length > 0) { + const discussionLabels = discussion.labels.nodes.map(l => l.name); + const hasRequiredLabel = requiredLabels.some(required => discussionLabels.includes(required)); + if (!hasRequiredLabel) { + core.info(`Discussion #${discussionNumber} does not have required labels: ${requiredLabels.join(", ")}`); + continue; + } + } + + // Apply title prefix filter + if (requiredTitlePrefix && !discussion.title.startsWith(requiredTitlePrefix)) { + core.info(`Discussion #${discussionNumber} does not have required title prefix: ${requiredTitlePrefix}`); + continue; + } + + // Apply category filter + if (requiredCategory && discussion.category.name !== requiredCategory) { + core.info(`Discussion #${discussionNumber} is not in required category: ${requiredCategory}`); + continue; + } + + // Extract body from the JSON item + let body = item.body.trim(); + + // Add AI disclaimer with workflow name and run url + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || ""; + const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || ""; + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + // Add fingerprint comment if present + body += getTrackerID("markdown"); + + body += generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, undefined, undefined, triggeringDiscussionNumber); + + core.info(`Adding comment to discussion #${discussionNumber}`); + core.info(`Comment content length: ${body.length}`); + + // Add comment first + const comment = await addDiscussionComment(github, discussion.id, body); + core.info("Added discussion comment: " + comment.url); + + // Then close the discussion + core.info(`Closing discussion #${discussionNumber} with reason: ${item.reason || "none"}`); + const closedDiscussion = await closeDiscussion(github, discussion.id, item.reason); + core.info("Closed discussion: " + closedDiscussion.url); + + closedDiscussions.push({ + number: discussionNumber, + url: discussion.url, + comment_url: comment.url, + }); + + // Set output for the last closed discussion (for backward compatibility) + if (i === closeDiscussionItems.length - 1) { + core.setOutput("discussion_number", discussionNumber); + core.setOutput("discussion_url", discussion.url); + core.setOutput("comment_url", comment.url); + } + } catch (error) { + core.error(`✗ Failed to close discussion #${discussionNumber}: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + + // Write summary for all closed discussions + if (closedDiscussions.length > 0) { + let summaryContent = "\n\n## Closed Discussions\n"; + for (const discussion of closedDiscussions) { + summaryContent += `- Discussion #${discussion.number}: [View Discussion](${discussion.url})\n`; + summaryContent += ` - Comment: [View Comment](${discussion.comment_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully closed ${closedDiscussions.length} discussion(s)`); + return closedDiscussions; +} + +module.exports = { main }; diff --git a/pkg/workflow/js/close_entity_helpers.cjs b/pkg/workflow/js/close_entity_helpers.cjs new file mode 100644 index 0000000000..08797458c5 --- /dev/null +++ b/pkg/workflow/js/close_entity_helpers.cjs @@ -0,0 +1,395 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateFooter } = require("./generate_footer.cjs"); +const { getTrackerID } = require("./get_tracker_id.cjs"); +const { getRepositoryUrl } = require("./get_repository_url.cjs"); + +/** + * @typedef {'issue' | 'pull_request'} EntityType + */ + +/** + * @typedef {Object} EntityConfig + * @property {EntityType} entityType - The type of entity (issue or pull_request) + * @property {string} itemType - The agent output item type (e.g., "close_issue") + * @property {string} itemTypeDisplay - Human-readable item type for log messages (e.g., "close-issue") + * @property {string} numberField - The field name for the entity number in agent output (e.g., "issue_number") + * @property {string} envVarPrefix - Environment variable prefix (e.g., "GH_AW_CLOSE_ISSUE") + * @property {string[]} contextEvents - GitHub event names for this entity context + * @property {string} contextPayloadField - The field name in context.payload (e.g., "issue") + * @property {string} urlPath - URL path segment (e.g., "issues" or "pull") + * @property {string} displayName - Human-readable display name (e.g., "issue" or "pull request") + * @property {string} displayNamePlural - Human-readable display name plural (e.g., "issues" or "pull requests") + * @property {string} displayNameCapitalized - Capitalized display name (e.g., "Issue" or "Pull Request") + * @property {string} displayNameCapitalizedPlural - Capitalized display name plural (e.g., "Issues" or "Pull Requests") + */ + +/** + * @typedef {Object} EntityCallbacks + * @property {(github: any, owner: string, repo: string, entityNumber: number) => Promise<{number: number, title: string, labels: Array<{name: string}>, html_url: string, state: string}>} getDetails + * @property {(github: any, owner: string, repo: string, entityNumber: number, message: string) => Promise<{id: number, html_url: string}>} addComment + * @property {(github: any, owner: string, repo: string, entityNumber: number) => Promise<{number: number, html_url: string, title: string}>} closeEntity + */ + +/** + * Build the run URL for the current workflow + * @returns {string} The workflow run URL + */ +function buildRunUrl() { + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + return context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; +} + +/** + * Build comment body with tracker ID and footer + * @param {string} body - The original comment body + * @param {number|undefined} triggeringIssueNumber - Issue number that triggered this workflow + * @param {number|undefined} triggeringPRNumber - PR number that triggered this workflow + * @returns {string} The complete comment body with tracker ID and footer + */ +function buildCommentBody(body, triggeringIssueNumber, triggeringPRNumber) { + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || ""; + const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || ""; + const runUrl = buildRunUrl(); + + let commentBody = body.trim(); + commentBody += getTrackerID("markdown"); + commentBody += generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, undefined); + + return commentBody; +} + +/** + * Check if labels match the required labels filter + * @param {Array<{name: string}>} entityLabels - Labels on the entity + * @param {string[]} requiredLabels - Required labels (any match) + * @returns {boolean} True if entity has at least one required label + */ +function checkLabelFilter(entityLabels, requiredLabels) { + if (requiredLabels.length === 0) { + return true; + } + const labelNames = entityLabels.map(l => l.name); + return requiredLabels.some(required => labelNames.includes(required)); +} + +/** + * Check if title matches the required prefix filter + * @param {string} title - Entity title + * @param {string} requiredTitlePrefix - Required title prefix + * @returns {boolean} True if title starts with required prefix + */ +function checkTitlePrefixFilter(title, requiredTitlePrefix) { + if (!requiredTitlePrefix) { + return true; + } + return title.startsWith(requiredTitlePrefix); +} + +/** + * Generate staged preview content for a close entity operation + * @param {EntityConfig} config - Entity configuration + * @param {any[]} items - Items to preview + * @param {string[]} requiredLabels - Required labels filter + * @param {string} requiredTitlePrefix - Required title prefix filter + * @returns {Promise} + */ +async function generateCloseEntityStagedPreview(config, items, requiredLabels, requiredTitlePrefix) { + let summaryContent = `## 🎭 Staged Mode: Close ${config.displayNameCapitalizedPlural} Preview\n\n`; + summaryContent += `The following ${config.displayNamePlural} would be closed if staged mode was disabled:\n\n`; + + for (let i = 0; i < items.length; i++) { + const item = items[i]; + summaryContent += `### ${config.displayNameCapitalized} ${i + 1}\n`; + + const entityNumber = item[config.numberField]; + if (entityNumber) { + const repoUrl = getRepositoryUrl(); + const entityUrl = `${repoUrl}/${config.urlPath}/${entityNumber}`; + summaryContent += `**Target ${config.displayNameCapitalized}:** [#${entityNumber}](${entityUrl})\n\n`; + } else { + summaryContent += `**Target:** Current ${config.displayName}\n\n`; + } + + summaryContent += `**Comment:**\n${item.body || "No content provided"}\n\n`; + + if (requiredLabels.length > 0) { + summaryContent += `**Required Labels:** ${requiredLabels.join(", ")}\n\n`; + } + if (requiredTitlePrefix) { + summaryContent += `**Required Title Prefix:** ${requiredTitlePrefix}\n\n`; + } + + summaryContent += "---\n\n"; + } + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info(`📝 ${config.displayNameCapitalized} close preview written to step summary`); +} + +/** + * Parse configuration from environment variables + * @param {string} envVarPrefix - Environment variable prefix + * @returns {{requiredLabels: string[], requiredTitlePrefix: string, target: string}} + */ +function parseEntityConfig(envVarPrefix) { + const labelsEnvVar = `${envVarPrefix}_REQUIRED_LABELS`; + const titlePrefixEnvVar = `${envVarPrefix}_REQUIRED_TITLE_PREFIX`; + const targetEnvVar = `${envVarPrefix}_TARGET`; + + const requiredLabels = process.env[labelsEnvVar] ? process.env[labelsEnvVar].split(",").map(l => l.trim()) : []; + const requiredTitlePrefix = process.env[titlePrefixEnvVar] || ""; + const target = process.env[targetEnvVar] || "triggering"; + + return { requiredLabels, requiredTitlePrefix, target }; +} + +/** + * Resolve the entity number based on target configuration and context + * @param {EntityConfig} config - Entity configuration + * @param {string} target - Target configuration ("triggering", "*", or explicit number) + * @param {any} item - The agent output item + * @param {boolean} isEntityContext - Whether we're in the correct entity context + * @returns {{success: true, number: number} | {success: false, message: string}} + */ +function resolveEntityNumber(config, target, item, isEntityContext) { + if (target === "*") { + const targetNumber = item[config.numberField]; + if (targetNumber) { + const parsed = parseInt(targetNumber, 10); + if (isNaN(parsed) || parsed <= 0) { + return { + success: false, + message: `Invalid ${config.displayName} number specified: ${targetNumber}`, + }; + } + return { success: true, number: parsed }; + } + return { + success: false, + message: `Target is "*" but no ${config.numberField} specified in ${config.itemTypeDisplay} item`, + }; + } + + if (target !== "triggering") { + const parsed = parseInt(target, 10); + if (isNaN(parsed) || parsed <= 0) { + return { + success: false, + message: `Invalid ${config.displayName} number in target configuration: ${target}`, + }; + } + return { success: true, number: parsed }; + } + + // Default behavior: use triggering entity + if (isEntityContext) { + const number = context.payload[config.contextPayloadField]?.number; + if (!number) { + return { + success: false, + message: `${config.displayNameCapitalized} context detected but no ${config.displayName} found in payload`, + }; + } + return { success: true, number }; + } + + return { + success: false, + message: `Not in ${config.displayName} context and no explicit target specified`, + }; +} + +/** + * Escape special markdown characters in a title + * @param {string} title - The title to escape + * @returns {string} Escaped title + */ +function escapeMarkdownTitle(title) { + return title.replace(/[[\]()]/g, "\\$&"); +} + +/** + * Process close entity items from agent output + * @param {EntityConfig} config - Entity configuration + * @param {EntityCallbacks} callbacks - Entity-specific API callbacks + * @returns {Promise|undefined>} + */ +async function processCloseEntityItems(config, callbacks) { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all items of this type + const items = result.items.filter(/** @param {any} item */ item => item.type === config.itemType); + if (items.length === 0) { + core.info(`No ${config.itemTypeDisplay} items found in agent output`); + return; + } + + core.info(`Found ${items.length} ${config.itemTypeDisplay} item(s)`); + + // Get configuration from environment + const { requiredLabels, requiredTitlePrefix, target } = parseEntityConfig(config.envVarPrefix); + + core.info(`Configuration: requiredLabels=${requiredLabels.join(",")}, requiredTitlePrefix=${requiredTitlePrefix}, target=${target}`); + + // Check if we're in the correct entity context + const isEntityContext = config.contextEvents.some(event => context.eventName === event); + + // If in staged mode, emit step summary instead of closing entities + if (isStaged) { + await generateCloseEntityStagedPreview(config, items, requiredLabels, requiredTitlePrefix); + return; + } + + // Validate context based on target configuration + if (target === "triggering" && !isEntityContext) { + core.info(`Target is "triggering" but not running in ${config.displayName} context, skipping ${config.displayName} close`); + return; + } + + // Extract triggering context for footer generation + const triggeringIssueNumber = context.payload?.issue?.number; + const triggeringPRNumber = context.payload?.pull_request?.number; + + const closedEntities = []; + + // Process each item + for (let i = 0; i < items.length; i++) { + const item = items[i]; + core.info(`Processing ${config.itemTypeDisplay} item ${i + 1}/${items.length}: bodyLength=${item.body.length}`); + + // Resolve entity number + const resolved = resolveEntityNumber(config, target, item, isEntityContext); + if (!resolved.success) { + core.info(resolved.message); + continue; + } + const entityNumber = resolved.number; + + try { + // Fetch entity details to check filters + const entity = await callbacks.getDetails(github, context.repo.owner, context.repo.repo, entityNumber); + + // Apply label filter + if (!checkLabelFilter(entity.labels, requiredLabels)) { + core.info(`${config.displayNameCapitalized} #${entityNumber} does not have required labels: ${requiredLabels.join(", ")}`); + continue; + } + + // Apply title prefix filter + if (!checkTitlePrefixFilter(entity.title, requiredTitlePrefix)) { + core.info(`${config.displayNameCapitalized} #${entityNumber} does not have required title prefix: ${requiredTitlePrefix}`); + continue; + } + + // Check if already closed + if (entity.state === "closed") { + core.info(`${config.displayNameCapitalized} #${entityNumber} is already closed, skipping`); + continue; + } + + // Build comment body + const commentBody = buildCommentBody(item.body, triggeringIssueNumber, triggeringPRNumber); + + // Add comment before closing + const comment = await callbacks.addComment(github, context.repo.owner, context.repo.repo, entityNumber, commentBody); + core.info(`✓ Added comment to ${config.displayName} #${entityNumber}: ${comment.html_url}`); + + // Close the entity + const closedEntity = await callbacks.closeEntity(github, context.repo.owner, context.repo.repo, entityNumber); + core.info(`✓ Closed ${config.displayName} #${entityNumber}: ${closedEntity.html_url}`); + + closedEntities.push({ + entity: closedEntity, + comment, + }); + + // Set outputs for the last closed entity (for backward compatibility) + if (i === items.length - 1) { + const numberOutputName = config.entityType === "issue" ? "issue_number" : "pull_request_number"; + const urlOutputName = config.entityType === "issue" ? "issue_url" : "pull_request_url"; + core.setOutput(numberOutputName, closedEntity.number); + core.setOutput(urlOutputName, closedEntity.html_url); + core.setOutput("comment_url", comment.html_url); + } + } catch (error) { + core.error(`✗ Failed to close ${config.displayName} #${entityNumber}: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + + // Write summary for all closed entities + if (closedEntities.length > 0) { + let summaryContent = `\n\n## Closed ${config.displayNameCapitalizedPlural}\n`; + for (const { entity, comment } of closedEntities) { + const escapedTitle = escapeMarkdownTitle(entity.title); + summaryContent += `- ${config.displayNameCapitalized} #${entity.number}: [${escapedTitle}](${entity.html_url}) ([comment](${comment.html_url}))\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully closed ${closedEntities.length} ${config.displayName}(s)`); + return closedEntities; +} + +/** + * Configuration for closing issues + * @type {EntityConfig} + */ +const ISSUE_CONFIG = { + entityType: "issue", + itemType: "close_issue", + itemTypeDisplay: "close-issue", + numberField: "issue_number", + envVarPrefix: "GH_AW_CLOSE_ISSUE", + contextEvents: ["issues", "issue_comment"], + contextPayloadField: "issue", + urlPath: "issues", + displayName: "issue", + displayNamePlural: "issues", + displayNameCapitalized: "Issue", + displayNameCapitalizedPlural: "Issues", +}; + +/** + * Configuration for closing pull requests + * @type {EntityConfig} + */ +const PULL_REQUEST_CONFIG = { + entityType: "pull_request", + itemType: "close_pull_request", + itemTypeDisplay: "close-pull-request", + numberField: "pull_request_number", + envVarPrefix: "GH_AW_CLOSE_PR", + contextEvents: ["pull_request", "pull_request_review_comment"], + contextPayloadField: "pull_request", + urlPath: "pull", + displayName: "pull request", + displayNamePlural: "pull requests", + displayNameCapitalized: "Pull Request", + displayNameCapitalizedPlural: "Pull Requests", +}; + +module.exports = { + processCloseEntityItems, + generateCloseEntityStagedPreview, + checkLabelFilter, + checkTitlePrefixFilter, + parseEntityConfig, + resolveEntityNumber, + buildCommentBody, + escapeMarkdownTitle, + ISSUE_CONFIG, + PULL_REQUEST_CONFIG, +}; diff --git a/pkg/workflow/js/close_expired_discussions.cjs b/pkg/workflow/js/close_expired_discussions.cjs new file mode 100644 index 0000000000..0cd3db2dec --- /dev/null +++ b/pkg/workflow/js/close_expired_discussions.cjs @@ -0,0 +1,282 @@ +// @ts-check +// + +/** + * Maximum number of discussions to update per run + */ +const MAX_UPDATES_PER_RUN = 100; + +/** + * Delay between GraphQL API calls in milliseconds to avoid rate limiting + */ +const GRAPHQL_DELAY_MS = 500; + +/** + * Delay execution for a specified number of milliseconds + * @param {number} ms - Milliseconds to delay + * @returns {Promise} + */ +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +/** + * Search for open discussions with expiration markers + * @param {any} github - GitHub GraphQL instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @returns {Promise>} Matching discussions + */ +async function searchDiscussionsWithExpiration(github, owner, repo) { + const discussions = []; + let hasNextPage = true; + let cursor = null; + + while (hasNextPage) { + const query = ` + query($owner: String!, $repo: String!, $cursor: String) { + repository(owner: $owner, name: $repo) { + discussions(first: 100, after: $cursor, states: [OPEN]) { + pageInfo { + hasNextPage + endCursor + } + nodes { + id + number + title + url + body + createdAt + } + } + } + } + `; + + const result = await github.graphql(query, { + owner: owner, + repo: repo, + cursor: cursor, + }); + + if (!result || !result.repository || !result.repository.discussions) { + break; + } + + const nodes = result.repository.discussions.nodes || []; + + // Filter for discussions with agentic workflow markers and expiration comments + for (const discussion of nodes) { + // Check if created by an agentic workflow (body contains "> AI generated by" at start of line) + const agenticPattern = /^> AI generated by/m; + const isAgenticWorkflow = discussion.body && agenticPattern.test(discussion.body); + + if (!isAgenticWorkflow) { + continue; + } + + // Check if has expiration marker + const expirationPattern = //; + const match = discussion.body ? discussion.body.match(expirationPattern) : null; + + if (match) { + discussions.push(discussion); + } + } + + hasNextPage = result.repository.discussions.pageInfo.hasNextPage; + cursor = result.repository.discussions.pageInfo.endCursor; + } + + return discussions; +} + +/** + * Extract expiration date from discussion body + * @param {string} body - Discussion body + * @returns {Date|null} Expiration date or null if not found/invalid + */ +function extractExpirationDate(body) { + const expirationPattern = //; + const match = body.match(expirationPattern); + + if (!match) { + return null; + } + + const expirationISO = match[1].trim(); + const expirationDate = new Date(expirationISO); + + // Validate the date + if (isNaN(expirationDate.getTime())) { + return null; + } + + return expirationDate; +} + +/** + * Validate discussion creation date + * @param {string} createdAt - ISO 8601 creation date + * @returns {boolean} True if valid + */ +function validateCreationDate(createdAt) { + const creationDate = new Date(createdAt); + return !isNaN(creationDate.getTime()); +} + +/** + * Add comment to a GitHub Discussion using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} discussionId - Discussion node ID + * @param {string} message - Comment body + * @returns {Promise<{id: string, url: string}>} Comment details + */ +async function addDiscussionComment(github, discussionId, message) { + const result = await github.graphql( + ` + mutation($dId: ID!, $body: String!) { + addDiscussionComment(input: { discussionId: $dId, body: $body }) { + comment { + id + url + } + } + }`, + { dId: discussionId, body: message } + ); + + return result.addDiscussionComment.comment; +} + +/** + * Close a GitHub Discussion as OUTDATED using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} discussionId - Discussion node ID + * @returns {Promise<{id: string, url: string}>} Discussion details + */ +async function closeDiscussionAsOutdated(github, discussionId) { + const result = await github.graphql( + ` + mutation($dId: ID!) { + closeDiscussion(input: { discussionId: $dId, reason: OUTDATED }) { + discussion { + id + url + } + } + }`, + { dId: discussionId } + ); + + return result.closeDiscussion.discussion; +} + +async function main() { + const owner = context.repo.owner; + const repo = context.repo.repo; + + core.info(`Searching for expired discussions in ${owner}/${repo}`); + + // Search for discussions with expiration markers + const discussionsWithExpiration = await searchDiscussionsWithExpiration(github, owner, repo); + + if (discussionsWithExpiration.length === 0) { + core.info("No discussions with expiration markers found"); + return; + } + + core.info(`Found ${discussionsWithExpiration.length} discussion(s) with expiration markers`); + + // Check which discussions are expired + const now = new Date(); + const expiredDiscussions = []; + + for (const discussion of discussionsWithExpiration) { + // Validate creation date + if (!validateCreationDate(discussion.createdAt)) { + core.warning(`Discussion #${discussion.number} has invalid creation date, skipping`); + continue; + } + + // Extract and validate expiration date + const expirationDate = extractExpirationDate(discussion.body); + if (!expirationDate) { + core.warning(`Discussion #${discussion.number} has invalid expiration date, skipping`); + continue; + } + + // Check if expired + if (now >= expirationDate) { + expiredDiscussions.push({ + ...discussion, + expirationDate: expirationDate, + }); + } + } + + if (expiredDiscussions.length === 0) { + core.info("No expired discussions found"); + return; + } + + core.info(`Found ${expiredDiscussions.length} expired discussion(s)`); + + // Limit to MAX_UPDATES_PER_RUN + const discussionsToClose = expiredDiscussions.slice(0, MAX_UPDATES_PER_RUN); + + if (expiredDiscussions.length > MAX_UPDATES_PER_RUN) { + core.warning(`Found ${expiredDiscussions.length} expired discussions, but only closing the first ${MAX_UPDATES_PER_RUN}`); + } + + let closedCount = 0; + const closedDiscussions = []; + + for (let i = 0; i < discussionsToClose.length; i++) { + const discussion = discussionsToClose[i]; + + try { + const closingMessage = `This discussion was automatically closed because it expired on ${discussion.expirationDate.toISOString()}.`; + + // Add comment first + core.info(`Adding closing comment to discussion #${discussion.number}`); + await addDiscussionComment(github, discussion.id, closingMessage); + + // Then close the discussion as outdated + core.info(`Closing discussion #${discussion.number} as outdated`); + await closeDiscussionAsOutdated(github, discussion.id); + + closedDiscussions.push({ + number: discussion.number, + url: discussion.url, + title: discussion.title, + }); + + closedCount++; + core.info(`✓ Closed discussion #${discussion.number}: ${discussion.url}`); + } catch (error) { + core.error(`✗ Failed to close discussion #${discussion.number}: ${error instanceof Error ? error.message : String(error)}`); + // Continue with other discussions even if one fails + } + + // Add delay between GraphQL operations to avoid rate limiting (except for the last item) + if (i < discussionsToClose.length - 1) { + await delay(GRAPHQL_DELAY_MS); + } + } + + // Write summary + if (closedCount > 0) { + let summaryContent = `## Closed Expired Discussions\n\n`; + summaryContent += `Closed **${closedCount}** expired discussion(s):\n\n`; + for (const closed of closedDiscussions) { + summaryContent += `- Discussion #${closed.number}: [${closed.title}](${closed.url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully closed ${closedCount} expired discussion(s)`); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/close_expired_issues.cjs b/pkg/workflow/js/close_expired_issues.cjs new file mode 100644 index 0000000000..63436f3668 --- /dev/null +++ b/pkg/workflow/js/close_expired_issues.cjs @@ -0,0 +1,275 @@ +// @ts-check +// + +/** + * Maximum number of issues to update per run + */ +const MAX_UPDATES_PER_RUN = 100; + +/** + * Delay between GraphQL API calls in milliseconds to avoid rate limiting + */ +const GRAPHQL_DELAY_MS = 500; + +/** + * Delay execution for a specified number of milliseconds + * @param {number} ms - Milliseconds to delay + * @returns {Promise} + */ +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +/** + * Search for open issues with expiration markers + * @param {any} github - GitHub GraphQL instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @returns {Promise>} Matching issues + */ +async function searchIssuesWithExpiration(github, owner, repo) { + const issues = []; + let hasNextPage = true; + let cursor = null; + + while (hasNextPage) { + const query = ` + query($owner: String!, $repo: String!, $cursor: String) { + repository(owner: $owner, name: $repo) { + issues(first: 100, after: $cursor, states: [OPEN]) { + pageInfo { + hasNextPage + endCursor + } + nodes { + id + number + title + url + body + createdAt + } + } + } + } + `; + + const result = await github.graphql(query, { + owner: owner, + repo: repo, + cursor: cursor, + }); + + if (!result || !result.repository || !result.repository.issues) { + break; + } + + const nodes = result.repository.issues.nodes || []; + + // Filter for issues with agentic workflow markers and expiration comments + for (const issue of nodes) { + // Check if created by an agentic workflow (body contains "> AI generated by" at start of line) + const agenticPattern = /^> AI generated by/m; + const isAgenticWorkflow = issue.body && agenticPattern.test(issue.body); + + if (!isAgenticWorkflow) { + continue; + } + + // Check if has expiration marker + const expirationPattern = //; + const match = issue.body ? issue.body.match(expirationPattern) : null; + + if (match) { + issues.push(issue); + } + } + + hasNextPage = result.repository.issues.pageInfo.hasNextPage; + cursor = result.repository.issues.pageInfo.endCursor; + } + + return issues; +} + +/** + * Extract expiration date from issue body + * @param {string} body - Issue body + * @returns {Date|null} Expiration date or null if not found/invalid + */ +function extractExpirationDate(body) { + const expirationPattern = //; + const match = body.match(expirationPattern); + + if (!match) { + return null; + } + + const expirationISO = match[1].trim(); + const expirationDate = new Date(expirationISO); + + // Validate the date + if (isNaN(expirationDate.getTime())) { + return null; + } + + return expirationDate; +} + +/** + * Validate issue creation date + * @param {string} createdAt - ISO 8601 creation date + * @returns {boolean} True if valid + */ +function validateCreationDate(createdAt) { + const creationDate = new Date(createdAt); + return !isNaN(creationDate.getTime()); +} + +/** + * Add comment to a GitHub Issue using REST API + * @param {any} github - GitHub REST instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @param {string} message - Comment body + * @returns {Promise} Comment details + */ +async function addIssueComment(github, owner, repo, issueNumber, message) { + const result = await github.rest.issues.createComment({ + owner: owner, + repo: repo, + issue_number: issueNumber, + body: message, + }); + + return result.data; +} + +/** + * Close a GitHub Issue using REST API + * @param {any} github - GitHub REST instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @returns {Promise} Issue details + */ +async function closeIssue(github, owner, repo, issueNumber) { + const result = await github.rest.issues.update({ + owner: owner, + repo: repo, + issue_number: issueNumber, + state: "closed", + state_reason: "not_planned", + }); + + return result.data; +} + +async function main() { + const owner = context.repo.owner; + const repo = context.repo.repo; + + core.info(`Searching for expired issues in ${owner}/${repo}`); + + // Search for issues with expiration markers + const issuesWithExpiration = await searchIssuesWithExpiration(github, owner, repo); + + if (issuesWithExpiration.length === 0) { + core.info("No issues with expiration markers found"); + return; + } + + core.info(`Found ${issuesWithExpiration.length} issue(s) with expiration markers`); + + // Check which issues are expired + const now = new Date(); + const expiredIssues = []; + + for (const issue of issuesWithExpiration) { + // Validate creation date + if (!validateCreationDate(issue.createdAt)) { + core.warning(`Issue #${issue.number} has invalid creation date, skipping`); + continue; + } + + // Extract and validate expiration date + const expirationDate = extractExpirationDate(issue.body); + if (!expirationDate) { + core.warning(`Issue #${issue.number} has invalid expiration date, skipping`); + continue; + } + + // Check if expired + if (now >= expirationDate) { + expiredIssues.push({ + ...issue, + expirationDate: expirationDate, + }); + } + } + + if (expiredIssues.length === 0) { + core.info("No expired issues found"); + return; + } + + core.info(`Found ${expiredIssues.length} expired issue(s)`); + + // Limit to MAX_UPDATES_PER_RUN + const issuesToClose = expiredIssues.slice(0, MAX_UPDATES_PER_RUN); + + if (expiredIssues.length > MAX_UPDATES_PER_RUN) { + core.warning(`Found ${expiredIssues.length} expired issues, but only closing the first ${MAX_UPDATES_PER_RUN}`); + } + + let closedCount = 0; + const closedIssues = []; + + for (let i = 0; i < issuesToClose.length; i++) { + const issue = issuesToClose[i]; + + try { + const closingMessage = `This issue was automatically closed because it expired on ${issue.expirationDate.toISOString()}.`; + + // Add comment first + core.info(`Adding closing comment to issue #${issue.number}`); + await addIssueComment(github, owner, repo, issue.number, closingMessage); + + // Then close the issue as not planned + core.info(`Closing issue #${issue.number} as not planned`); + await closeIssue(github, owner, repo, issue.number); + + closedIssues.push({ + number: issue.number, + url: issue.url, + title: issue.title, + }); + + closedCount++; + core.info(`✓ Closed issue #${issue.number}: ${issue.url}`); + } catch (error) { + core.error(`✗ Failed to close issue #${issue.number}: ${error instanceof Error ? error.message : String(error)}`); + // Continue with other issues even if one fails + } + + // Add delay between GraphQL operations to avoid rate limiting (except for the last item) + if (i < issuesToClose.length - 1) { + await delay(GRAPHQL_DELAY_MS); + } + } + + // Write summary + if (closedCount > 0) { + let summaryContent = `## Closed Expired Issues\n\n`; + summaryContent += `Closed **${closedCount}** expired issue(s):\n\n`; + for (const closed of closedIssues) { + summaryContent += `- Issue #${closed.number}: [${closed.title}](${closed.url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully closed ${closedCount} expired issue(s)`); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/close_issue.cjs b/pkg/workflow/js/close_issue.cjs new file mode 100644 index 0000000000..557007de80 --- /dev/null +++ b/pkg/workflow/js/close_issue.cjs @@ -0,0 +1,75 @@ +// @ts-check +/// + +const { processCloseEntityItems, ISSUE_CONFIG } = require("./close_entity_helpers.cjs"); + +/** + * Get issue details using REST API + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @returns {Promise<{number: number, title: string, labels: Array<{name: string}>, html_url: string, state: string}>} Issue details + */ +async function getIssueDetails(github, owner, repo, issueNumber) { + const { data: issue } = await github.rest.issues.get({ + owner, + repo, + issue_number: issueNumber, + }); + + if (!issue) { + throw new Error(`Issue #${issueNumber} not found in ${owner}/${repo}`); + } + + return issue; +} + +/** + * Add comment to a GitHub Issue using REST API + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @param {string} message - Comment body + * @returns {Promise<{id: number, html_url: string}>} Comment details + */ +async function addIssueComment(github, owner, repo, issueNumber, message) { + const { data: comment } = await github.rest.issues.createComment({ + owner, + repo, + issue_number: issueNumber, + body: message, + }); + + return comment; +} + +/** + * Close a GitHub Issue using REST API + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} issueNumber - Issue number + * @returns {Promise<{number: number, html_url: string, title: string}>} Issue details + */ +async function closeIssue(github, owner, repo, issueNumber) { + const { data: issue } = await github.rest.issues.update({ + owner, + repo, + issue_number: issueNumber, + state: "closed", + }); + + return issue; +} + +async function main() { + return processCloseEntityItems(ISSUE_CONFIG, { + getDetails: getIssueDetails, + addComment: addIssueComment, + closeEntity: closeIssue, + }); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/close_older_discussions.cjs b/pkg/workflow/js/close_older_discussions.cjs new file mode 100644 index 0000000000..b5aeda6a73 --- /dev/null +++ b/pkg/workflow/js/close_older_discussions.cjs @@ -0,0 +1,265 @@ +// @ts-check +/// + +const { getCloseOlderDiscussionMessage } = require("./messages_close_discussion.cjs"); + +/** + * Maximum number of older discussions to close + */ +const MAX_CLOSE_COUNT = 10; + +/** + * Delay between GraphQL API calls in milliseconds to avoid rate limiting + */ +const GRAPHQL_DELAY_MS = 500; + +/** + * Delay execution for a specified number of milliseconds + * @param {number} ms - Milliseconds to delay + * @returns {Promise} + */ +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +/** + * Search for open discussions with a matching title prefix and/or labels + * @param {any} github - GitHub GraphQL instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {string} titlePrefix - Title prefix to match (empty string to skip prefix matching) + * @param {string[]} labels - Labels to match (empty array to skip label matching) + * @param {string|undefined} categoryId - Optional category ID to filter by + * @param {number} excludeNumber - Discussion number to exclude (the newly created one) + * @returns {Promise>} Matching discussions + */ +async function searchOlderDiscussions(github, owner, repo, titlePrefix, labels, categoryId, excludeNumber) { + // Build GraphQL search query + // Search for open discussions, optionally with title prefix or labels + let searchQuery = `repo:${owner}/${repo} is:open`; + + if (titlePrefix) { + // Escape quotes in title prefix to prevent query injection + const escapedPrefix = titlePrefix.replace(/"/g, '\\"'); + searchQuery += ` in:title "${escapedPrefix}"`; + } + + // Add label filters to the search query + // Note: GitHub search uses AND logic for multiple labels, so discussions must have ALL labels. + // We add each label as a separate filter and also validate client-side for extra safety. + if (labels && labels.length > 0) { + for (const label of labels) { + // Escape quotes in label names to prevent query injection + const escapedLabel = label.replace(/"/g, '\\"'); + searchQuery += ` label:"${escapedLabel}"`; + } + } + + const result = await github.graphql( + ` + query($searchTerms: String!, $first: Int!) { + search(query: $searchTerms, type: DISCUSSION, first: $first) { + nodes { + ... on Discussion { + id + number + title + url + category { + id + } + labels(first: 100) { + nodes { + name + } + } + closed + } + } + } + }`, + { searchTerms: searchQuery, first: 50 } + ); + + if (!result || !result.search || !result.search.nodes) { + return []; + } + + // Filter results: + // 1. Must not be the excluded discussion (newly created one) + // 2. Must not be already closed + // 3. If titlePrefix is specified, must have title starting with the prefix + // 4. If labels are specified, must have ALL specified labels (AND logic, not OR) + // 5. If categoryId is specified, must match + return result.search.nodes + .filter( + /** @param {any} d */ d => { + if (!d || d.number === excludeNumber || d.closed) { + return false; + } + + // Check title prefix if specified + if (titlePrefix && d.title && !d.title.startsWith(titlePrefix)) { + return false; + } + + // Check labels if specified - requires ALL labels to match (AND logic) + // This is intentional: we only want to close discussions that have ALL the specified labels + if (labels && labels.length > 0) { + const discussionLabels = d.labels?.nodes?.map((/** @type {{name: string}} */ l) => l.name) || []; + const hasAllLabels = labels.every(label => discussionLabels.includes(label)); + if (!hasAllLabels) { + return false; + } + } + + // Check category if specified + if (categoryId && (!d.category || d.category.id !== categoryId)) { + return false; + } + + return true; + } + ) + .map( + /** @param {any} d */ d => ({ + id: d.id, + number: d.number, + title: d.title, + url: d.url, + }) + ); +} + +/** + * Add comment to a GitHub Discussion using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} discussionId - Discussion node ID + * @param {string} message - Comment body + * @returns {Promise<{id: string, url: string}>} Comment details + */ +async function addDiscussionComment(github, discussionId, message) { + const result = await github.graphql( + ` + mutation($dId: ID!, $body: String!) { + addDiscussionComment(input: { discussionId: $dId, body: $body }) { + comment { + id + url + } + } + }`, + { dId: discussionId, body: message } + ); + + return result.addDiscussionComment.comment; +} + +/** + * Close a GitHub Discussion as OUTDATED using GraphQL + * @param {any} github - GitHub GraphQL instance + * @param {string} discussionId - Discussion node ID + * @returns {Promise<{id: string, url: string}>} Discussion details + */ +async function closeDiscussionAsOutdated(github, discussionId) { + const result = await github.graphql( + ` + mutation($dId: ID!) { + closeDiscussion(input: { discussionId: $dId, reason: OUTDATED }) { + discussion { + id + url + } + } + }`, + { dId: discussionId } + ); + + return result.closeDiscussion.discussion; +} + +/** + * Close older discussions that match the title prefix and/or labels + * @param {any} github - GitHub GraphQL instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {string} titlePrefix - Title prefix to match (empty string to skip) + * @param {string[]} labels - Labels to match (empty array to skip) + * @param {string|undefined} categoryId - Optional category ID to filter by + * @param {{number: number, url: string}} newDiscussion - The newly created discussion + * @param {string} workflowName - Name of the workflow + * @param {string} runUrl - URL of the workflow run + * @returns {Promise>} List of closed discussions + */ +async function closeOlderDiscussions(github, owner, repo, titlePrefix, labels, categoryId, newDiscussion, workflowName, runUrl) { + // Build search criteria description for logging + const searchCriteria = []; + if (titlePrefix) searchCriteria.push(`title prefix: "${titlePrefix}"`); + if (labels && labels.length > 0) searchCriteria.push(`labels: [${labels.join(", ")}]`); + core.info(`Searching for older discussions with ${searchCriteria.join(" and ")}`); + + const olderDiscussions = await searchOlderDiscussions(github, owner, repo, titlePrefix, labels, categoryId, newDiscussion.number); + + if (olderDiscussions.length === 0) { + core.info("No older discussions found to close"); + return []; + } + + core.info(`Found ${olderDiscussions.length} older discussion(s) to close`); + + // Limit to MAX_CLOSE_COUNT discussions + const discussionsToClose = olderDiscussions.slice(0, MAX_CLOSE_COUNT); + + if (olderDiscussions.length > MAX_CLOSE_COUNT) { + core.warning(`Found ${olderDiscussions.length} older discussions, but only closing the first ${MAX_CLOSE_COUNT}`); + } + + const closedDiscussions = []; + + for (let i = 0; i < discussionsToClose.length; i++) { + const discussion = discussionsToClose[i]; + try { + // Generate closing message using the messages module + const closingMessage = getCloseOlderDiscussionMessage({ + newDiscussionUrl: newDiscussion.url, + newDiscussionNumber: newDiscussion.number, + workflowName, + runUrl, + }); + + // Add comment first + core.info(`Adding closing comment to discussion #${discussion.number}`); + await addDiscussionComment(github, discussion.id, closingMessage); + + // Then close the discussion as outdated + core.info(`Closing discussion #${discussion.number} as outdated`); + await closeDiscussionAsOutdated(github, discussion.id); + + closedDiscussions.push({ + number: discussion.number, + url: discussion.url, + }); + + core.info(`✓ Closed discussion #${discussion.number}: ${discussion.url}`); + } catch (error) { + core.error(`✗ Failed to close discussion #${discussion.number}: ${error instanceof Error ? error.message : String(error)}`); + // Continue with other discussions even if one fails + } + + // Add delay between GraphQL operations to avoid rate limiting (except for the last item) + if (i < discussionsToClose.length - 1) { + await delay(GRAPHQL_DELAY_MS); + } + } + + return closedDiscussions; +} + +module.exports = { + closeOlderDiscussions, + searchOlderDiscussions, + addDiscussionComment, + closeDiscussionAsOutdated, + MAX_CLOSE_COUNT, + GRAPHQL_DELAY_MS, +}; diff --git a/pkg/workflow/js/close_pull_request.cjs b/pkg/workflow/js/close_pull_request.cjs new file mode 100644 index 0000000000..f293beec87 --- /dev/null +++ b/pkg/workflow/js/close_pull_request.cjs @@ -0,0 +1,75 @@ +// @ts-check +/// + +const { processCloseEntityItems, PULL_REQUEST_CONFIG } = require("./close_entity_helpers.cjs"); + +/** + * Get pull request details using REST API + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} prNumber - Pull request number + * @returns {Promise<{number: number, title: string, labels: Array<{name: string}>, html_url: string, state: string}>} Pull request details + */ +async function getPullRequestDetails(github, owner, repo, prNumber) { + const { data: pr } = await github.rest.pulls.get({ + owner, + repo, + pull_number: prNumber, + }); + + if (!pr) { + throw new Error(`Pull request #${prNumber} not found in ${owner}/${repo}`); + } + + return pr; +} + +/** + * Add comment to a GitHub Pull Request using REST API + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} prNumber - Pull request number + * @param {string} message - Comment body + * @returns {Promise<{id: number, html_url: string}>} Comment details + */ +async function addPullRequestComment(github, owner, repo, prNumber, message) { + const { data: comment } = await github.rest.issues.createComment({ + owner, + repo, + issue_number: prNumber, + body: message, + }); + + return comment; +} + +/** + * Close a GitHub Pull Request using REST API + * @param {any} github - GitHub REST API instance + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {number} prNumber - Pull request number + * @returns {Promise<{number: number, html_url: string, title: string}>} Pull request details + */ +async function closePullRequest(github, owner, repo, prNumber) { + const { data: pr } = await github.rest.pulls.update({ + owner, + repo, + pull_number: prNumber, + state: "closed", + }); + + return pr; +} + +async function main() { + return processCloseEntityItems(PULL_REQUEST_CONFIG, { + getDetails: getPullRequestDetails, + addComment: addPullRequestComment, + closeEntity: closePullRequest, + }); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/collect_ndjson_output.cjs b/pkg/workflow/js/collect_ndjson_output.cjs new file mode 100644 index 0000000000..d53305392f --- /dev/null +++ b/pkg/workflow/js/collect_ndjson_output.cjs @@ -0,0 +1,359 @@ +// @ts-check +/// + +async function main() { + const fs = require("fs"); + const { sanitizeContent } = require("./sanitize_content.cjs"); + const { validateItem, getMaxAllowedForType, getMinRequiredForType, hasValidationConfig, MAX_BODY_LENGTH: maxBodyLength, resetValidationConfigCache } = require("./safe_output_type_validator.cjs"); + const { resolveAllowedMentionsFromPayload } = require("./resolve_mentions_from_payload.cjs"); + + // Load validation config from file and set it in environment for the validator to read + const validationConfigPath = process.env.GH_AW_VALIDATION_CONFIG_PATH || "/tmp/gh-aw/safeoutputs/validation.json"; + let validationConfig = null; + try { + if (fs.existsSync(validationConfigPath)) { + const validationConfigContent = fs.readFileSync(validationConfigPath, "utf8"); + process.env.GH_AW_VALIDATION_CONFIG = validationConfigContent; + validationConfig = JSON.parse(validationConfigContent); + resetValidationConfigCache(); // Reset cache so it reloads from new env var + core.info(`Loaded validation config from ${validationConfigPath}`); + } + } catch (error) { + core.warning(`Failed to read validation config from ${validationConfigPath}: ${error instanceof Error ? error.message : String(error)}`); + } + + // Extract mentions configuration from validation config + const mentionsConfig = validationConfig?.mentions || null; + + // Resolve allowed mentions for the output collector + // This determines which @mentions are allowed in the agent output + const allowedMentions = await resolveAllowedMentionsFromPayload(context, github, core, mentionsConfig); + + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + const _ctrl = { 8: "\\b", 9: "\\t", 10: "\\n", 12: "\\f", 13: "\\r" }; + repaired = repaired.replace(/[\u0000-\u001F]/g, ch => { + const c = ch.charCodeAt(0); + return _ctrl[c] || "\\u" + c.toString(16).padStart(4, "0"); + }); + repaired = repaired.replace(/'/g, '"'); + repaired = repaired.replace(/([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, '$1"$2":'); + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if (content.includes("\n") || content.includes("\r") || content.includes("\t")) { + const escaped = content.replace(/\\/g, "\\\\").replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + repaired = repaired.replace(/"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}`); + repaired = repaired.replace(/(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, "$1]"); + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + + function validateFieldWithInputSchema(value, fieldName, inputSchema, lineNum) { + if (inputSchema.required && (value === undefined || value === null)) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} is required`, + }; + } + if (value === undefined || value === null) { + return { + isValid: true, + normalizedValue: inputSchema.default || undefined, + }; + } + const inputType = inputSchema.type || "string"; + let normalizedValue = value; + switch (inputType) { + case "string": + if (typeof value !== "string") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a string`, + }; + } + normalizedValue = sanitizeContent(value, { allowedAliases: allowedMentions }); + break; + case "boolean": + if (typeof value !== "boolean") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a boolean`, + }; + } + break; + case "number": + if (typeof value !== "number") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a number`, + }; + } + break; + case "choice": + if (typeof value !== "string") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a string for choice type`, + }; + } + if (inputSchema.options && !inputSchema.options.includes(value)) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be one of: ${inputSchema.options.join(", ")}`, + }; + } + normalizedValue = sanitizeContent(value, { allowedAliases: allowedMentions }); + break; + default: + if (typeof value === "string") { + normalizedValue = sanitizeContent(value, { allowedAliases: allowedMentions }); + } + break; + } + return { + isValid: true, + normalizedValue, + }; + } + function validateItemWithSafeJobConfig(item, jobConfig, lineNum) { + const errors = []; + const normalizedItem = { ...item }; + if (!jobConfig.inputs) { + return { + isValid: true, + errors: [], + normalizedItem: item, + }; + } + for (const [fieldName, inputSchema] of Object.entries(jobConfig.inputs)) { + const fieldValue = item[fieldName]; + const validation = validateFieldWithInputSchema(fieldValue, fieldName, inputSchema, lineNum); + if (!validation.isValid && validation.error) { + errors.push(validation.error); + } else if (validation.normalizedValue !== undefined) { + normalizedItem[fieldName] = validation.normalizedValue; + } + } + return { + isValid: errors.length === 0, + errors, + normalizedItem, + }; + } + function parseJsonWithRepair(jsonStr) { + try { + return JSON.parse(jsonStr); + } catch (originalError) { + try { + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + core.info(`invalid input json: ${jsonStr}`); + const originalMsg = originalError instanceof Error ? originalError.message : String(originalError); + const repairMsg = repairError instanceof Error ? repairError.message : String(repairError); + throw new Error(`JSON parsing failed. Original: ${originalMsg}. After attempted repair: ${repairMsg}`); + } + } + } + const outputFile = process.env.GH_AW_SAFE_OUTPUTS; + // Read config from file instead of environment variable + const configPath = process.env.GH_AW_SAFE_OUTPUTS_CONFIG_PATH || "/tmp/gh-aw/safeoutputs/config.json"; + let safeOutputsConfig; + core.info(`[INGESTION] Reading config from: ${configPath}`); + try { + if (fs.existsSync(configPath)) { + const configFileContent = fs.readFileSync(configPath, "utf8"); + core.info(`[INGESTION] Raw config content: ${configFileContent}`); + safeOutputsConfig = JSON.parse(configFileContent); + core.info(`[INGESTION] Parsed config keys: ${JSON.stringify(Object.keys(safeOutputsConfig))}`); + } else { + core.info(`[INGESTION] Config file does not exist at: ${configPath}`); + } + } catch (error) { + core.warning(`Failed to read config file from ${configPath}: ${error instanceof Error ? error.message : String(error)}`); + } + + core.info(`[INGESTION] Output file path: ${outputFile}`); + if (!outputFile) { + core.info("GH_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); + return; + } + if (!fs.existsSync(outputFile)) { + core.info(`Output file does not exist: ${outputFile}`); + core.setOutput("output", ""); + return; + } + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + core.info("Output file is empty"); + } + core.info(`Raw output content length: ${outputContent.length}`); + core.info(`[INGESTION] First 500 chars of output: ${outputContent.substring(0, 500)}`); + let expectedOutputTypes = {}; + if (safeOutputsConfig) { + try { + // safeOutputsConfig is already a parsed object from the file + // Normalize all config keys to use underscores instead of dashes + core.info(`[INGESTION] Normalizing config keys (dash -> underscore)`); + expectedOutputTypes = Object.fromEntries(Object.entries(safeOutputsConfig).map(([key, value]) => [key.replace(/-/g, "_"), value])); + core.info(`[INGESTION] Expected output types after normalization: ${JSON.stringify(Object.keys(expectedOutputTypes))}`); + core.info(`[INGESTION] Expected output types full config: ${JSON.stringify(expectedOutputTypes)}`); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + core.info(`Warning: Could not parse safe-outputs config: ${errorMsg}`); + } + } + // Parse JSONL (JSON Lines) format: each line is a separate JSON object + // CRITICAL: This expects one JSON object per line. If JSON is formatted with + // indentation/pretty-printing, parsing will fail. + const lines = outputContent.trim().split("\n"); + const parsedItems = []; + const errors = []; + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + if (line === "") continue; + core.info(`[INGESTION] Processing line ${i + 1}: ${line.substring(0, 200)}...`); + try { + const item = parseJsonWithRepair(line); + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + if (!item.type) { + errors.push(`Line ${i + 1}: Missing required 'type' field`); + continue; + } + // Normalize type to use underscores (convert any dashes to underscores for resilience) + const originalType = item.type; + const itemType = item.type.replace(/-/g, "_"); + core.info(`[INGESTION] Line ${i + 1}: Original type='${originalType}', Normalized type='${itemType}'`); + // Update item.type to normalized value + item.type = itemType; + if (!expectedOutputTypes[itemType]) { + core.warning(`[INGESTION] Line ${i + 1}: Type '${itemType}' not found in expected types: ${JSON.stringify(Object.keys(expectedOutputTypes))}`); + errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}`); + continue; + } + const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); + if (typeCount >= maxAllowed) { + errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + continue; + } + core.info(`Line ${i + 1}: type '${itemType}'`); + + // Use the validation engine to validate the item + if (hasValidationConfig(itemType)) { + const validationResult = validateItem(item, itemType, i + 1, { allowedAliases: allowedMentions }); + if (!validationResult.isValid) { + if (validationResult.error) { + errors.push(validationResult.error); + } + continue; + } + // Update item with normalized values + Object.assign(item, validationResult.normalizedItem); + } else { + // Fall back to validateItemWithSafeJobConfig for unknown types + const jobOutputType = expectedOutputTypes[itemType]; + if (!jobOutputType) { + errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); + continue; + } + const safeJobConfig = jobOutputType; + if (safeJobConfig && safeJobConfig.inputs) { + const validation = validateItemWithSafeJobConfig(item, safeJobConfig, i + 1); + if (!validation.isValid) { + errors.push(...validation.errors); + continue; + } + Object.assign(item, validation.normalizedItem); + } + } + + core.info(`Line ${i + 1}: Valid ${itemType} item`); + parsedItems.push(item); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + errors.push(`Line ${i + 1}: Invalid JSON - ${errorMsg}`); + } + } + if (errors.length > 0) { + core.warning("Validation errors found:"); + errors.forEach(error => core.warning(` - ${error}`)); + } + for (const itemType of Object.keys(expectedOutputTypes)) { + const minRequired = getMinRequiredForType(itemType, expectedOutputTypes); + if (minRequired > 0) { + const actualCount = parsedItems.filter(item => item.type === itemType).length; + if (actualCount < minRequired) { + errors.push(`Too few items of type '${itemType}'. Minimum required: ${minRequired}, found: ${actualCount}.`); + } + } + } + core.info(`Successfully parsed ${parsedItems.length} valid output items`); + const validatedOutput = { + items: parsedItems, + errors: errors, + }; + const agentOutputFile = "/tmp/gh-aw/agent_output.json"; + const validatedOutputJson = JSON.stringify(validatedOutput); + try { + fs.mkdirSync("/tmp/gh-aw", { recursive: true }); + fs.writeFileSync(agentOutputFile, validatedOutputJson, "utf8"); + core.info(`Stored validated output to: ${agentOutputFile}`); + core.exportVariable("GH_AW_AGENT_OUTPUT", agentOutputFile); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + core.error(`Failed to write agent output file: ${errorMsg}`); + } + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); + const outputTypes = Array.from(new Set(parsedItems.map(item => item.type))); + core.info(`output_types: ${outputTypes.join(", ")}`); + core.setOutput("output_types", outputTypes.join(",")); + + // Check if patch file exists for detection job conditional + const patchPath = "/tmp/gh-aw/aw.patch"; + const hasPatch = fs.existsSync(patchPath); + core.info(`Patch file ${hasPatch ? "exists" : "does not exist"} at: ${patchPath}`); + + // Check if allow-empty is enabled for create_pull_request (reuse already loaded config) + let allowEmptyPR = false; + if (safeOutputsConfig) { + // Check if create-pull-request has allow-empty enabled + if (safeOutputsConfig["create-pull-request"]?.["allow-empty"] === true || safeOutputsConfig["create_pull_request"]?.["allow_empty"] === true) { + allowEmptyPR = true; + core.info(`allow-empty is enabled for create-pull-request`); + } + } + + // If allow-empty is enabled for create_pull_request and there's no patch, that's OK + // Set has_patch to true so the create_pull_request job will run + if (allowEmptyPR && !hasPatch && outputTypes.includes("create_pull_request")) { + core.info(`allow-empty is enabled and no patch exists - will create empty PR`); + core.setOutput("has_patch", "true"); + } else { + core.setOutput("has_patch", hasPatch ? "true" : "false"); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/compute_text.cjs b/pkg/workflow/js/compute_text.cjs new file mode 100644 index 0000000000..d4f77620d6 --- /dev/null +++ b/pkg/workflow/js/compute_text.cjs @@ -0,0 +1,173 @@ +// @ts-check +/// + +/** + * Sanitizes content for safe output in GitHub Actions + * @param {string} content - The content to sanitize + * @returns {string} The sanitized content + */ +const { sanitizeIncomingText, writeRedactedDomainsLog } = require("./sanitize_incoming_text.cjs"); + +async function main() { + let text = ""; + + const actor = context.actor; + const { owner, repo } = context.repo; + + // Check if the actor has repository access (admin, maintain permissions) + const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); + + const permission = repoPermission.data.permission; + core.info(`Repository permission level: ${permission}`); + + if (permission !== "admin" && permission !== "maintain") { + core.setOutput("text", ""); + return; + } + + // Determine current body text based on event context + switch (context.eventName) { + case "issues": + // For issues: title + body + if (context.payload.issue) { + const title = context.payload.issue.title || ""; + const body = context.payload.issue.body || ""; + text = `${title}\n\n${body}`; + } + break; + + case "pull_request": + // For pull requests: title + body + if (context.payload.pull_request) { + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; + text = `${title}\n\n${body}`; + } + break; + + case "pull_request_target": + // For pull request target events: title + body + if (context.payload.pull_request) { + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; + text = `${title}\n\n${body}`; + } + break; + + case "issue_comment": + // For issue comments: comment body + if (context.payload.comment) { + text = context.payload.comment.body || ""; + } + break; + + case "pull_request_review_comment": + // For PR review comments: comment body + if (context.payload.comment) { + text = context.payload.comment.body || ""; + } + break; + + case "pull_request_review": + // For PR reviews: review body + if (context.payload.review) { + text = context.payload.review.body || ""; + } + break; + + case "discussion": + // For discussions: title + body + if (context.payload.discussion) { + const title = context.payload.discussion.title || ""; + const body = context.payload.discussion.body || ""; + text = `${title}\n\n${body}`; + } + break; + + case "discussion_comment": + // For discussion comments: comment body + if (context.payload.comment) { + text = context.payload.comment.body || ""; + } + break; + + case "release": + // For releases: name + body + if (context.payload.release) { + const name = context.payload.release.name || context.payload.release.tag_name || ""; + const body = context.payload.release.body || ""; + text = `${name}\n\n${body}`; + } + break; + + case "workflow_dispatch": + // For workflow dispatch: check for release_url or release_id in inputs + if (context.payload.inputs) { + const releaseUrl = context.payload.inputs.release_url; + const releaseId = context.payload.inputs.release_id; + + // If release_url is provided, extract owner/repo/tag + if (releaseUrl) { + const urlMatch = releaseUrl.match(/github\.com\/([^\/]+)\/([^\/]+)\/releases\/tag\/([^\/]+)/); + if (urlMatch) { + const [, urlOwner, urlRepo, tag] = urlMatch; + try { + const { data: release } = await github.rest.repos.getReleaseByTag({ + owner: urlOwner, + repo: urlRepo, + tag: tag, + }); + const name = release.name || release.tag_name || ""; + const body = release.body || ""; + text = `${name}\n\n${body}`; + } catch (error) { + core.warning(`Failed to fetch release from URL: ${error instanceof Error ? error.message : String(error)}`); + } + } + } else if (releaseId) { + // If release_id is provided, fetch the release + try { + const { data: release } = await github.rest.repos.getRelease({ + owner: owner, + repo: repo, + release_id: parseInt(releaseId, 10), + }); + const name = release.name || release.tag_name || ""; + const body = release.body || ""; + text = `${name}\n\n${body}`; + } catch (error) { + core.warning(`Failed to fetch release by ID: ${error instanceof Error ? error.message : String(error)}`); + } + } + } + break; + + default: + // Default: empty text + text = ""; + break; + } + + // Sanitize the text before output + // All mentions are escaped (wrapped in backticks) to prevent unintended notifications + // Mention filtering will be applied by the agent output collector + const sanitizedText = sanitizeIncomingText(text); + + // Display sanitized text in logs + core.info(`text: ${sanitizedText}`); + + // Set the sanitized text as output + core.setOutput("text", sanitizedText); + + // Write redacted URL domains to log file if any were collected + const logPath = writeRedactedDomainsLog(); + if (logPath) { + core.info(`Redacted URL domains written to: ${logPath}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/create_agent_task.cjs b/pkg/workflow/js/create_agent_task.cjs new file mode 100644 index 0000000000..f6f50f08c8 --- /dev/null +++ b/pkg/workflow/js/create_agent_task.cjs @@ -0,0 +1,177 @@ +// @ts-check +/// + +const fs = require("fs"); +const path = require("path"); + +async function main() { + // Initialize outputs to empty strings to ensure they're always set + core.setOutput("task_number", ""); + core.setOutput("task_url", ""); + + const isStaged = process.env.GITHUB_AW_SAFE_OUTPUTS_STAGED === "true"; + const agentOutputFile = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!agentOutputFile) { + core.info("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + + // Read agent output from file + let outputContent; + try { + outputContent = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + core.setFailed(`Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return; + } + core.info(`Agent output content length: ${outputContent.length}`); + + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + core.setFailed(`Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + return; + } + + const createAgentTaskItems = validatedOutput.items.filter(item => item.type === "create_agent_task"); + if (createAgentTaskItems.length === 0) { + core.info("No create-agent-task items found in agent output"); + return; + } + + core.info(`Found ${createAgentTaskItems.length} create-agent-task item(s)`); + + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Create Agent Tasks Preview\n\n"; + summaryContent += "The following agent tasks would be created if staged mode was disabled:\n\n"; + + for (const [index, item] of createAgentTaskItems.entries()) { + summaryContent += `### Task ${index + 1}\n\n`; + summaryContent += `**Description:**\n${item.body || "No description provided"}\n\n`; + + const baseBranch = process.env.GITHUB_AW_AGENT_TASK_BASE || "main"; + summaryContent += `**Base Branch:** ${baseBranch}\n\n`; + + const targetRepo = process.env.GITHUB_AW_TARGET_REPO || process.env.GITHUB_REPOSITORY || "unknown"; + summaryContent += `**Target Repository:** ${targetRepo}\n\n`; + + summaryContent += "---\n\n"; + } + + core.info(summaryContent); + core.summary.addRaw(summaryContent); + await core.summary.write(); + return; + } + + // Get base branch from environment or use current branch + const baseBranch = process.env.GITHUB_AW_AGENT_TASK_BASE || process.env.GITHUB_REF_NAME || "main"; + const targetRepo = process.env.GITHUB_AW_TARGET_REPO; + + // Process all agent task items + const createdTasks = []; + let summaryContent = "## ✅ Agent Tasks Created\n\n"; + + for (const [index, taskItem] of createAgentTaskItems.entries()) { + const taskDescription = taskItem.body; + + if (!taskDescription || taskDescription.trim() === "") { + core.warning(`Task ${index + 1}: Agent task description is empty, skipping`); + continue; + } + + try { + // Write task description to a temporary file + const tmpDir = "/tmp/gh-aw"; + if (!fs.existsSync(tmpDir)) { + fs.mkdirSync(tmpDir, { recursive: true }); + } + + const taskFile = path.join(tmpDir, `agent-task-description-${index + 1}.md`); + fs.writeFileSync(taskFile, taskDescription, "utf8"); + core.info(`Task ${index + 1}: Task description written to ${taskFile}`); + + // Build gh agent-task create command + const ghArgs = ["agent-task", "create", "--from-file", taskFile, "--base", baseBranch]; + + if (targetRepo) { + ghArgs.push("--repo", targetRepo); + } + + core.info(`Task ${index + 1}: Creating agent task with command: gh ${ghArgs.join(" ")}`); + + // Execute gh agent-task create command + let taskOutput; + try { + taskOutput = await exec.getExecOutput("gh", ghArgs, { + silent: false, + ignoreReturnCode: false, + }); + } catch (execError) { + const errorMessage = execError instanceof Error ? execError.message : String(execError); + + // Check for authentication/permission errors + if (errorMessage.includes("authentication") || errorMessage.includes("permission") || errorMessage.includes("forbidden") || errorMessage.includes("401") || errorMessage.includes("403")) { + core.error(`Task ${index + 1}: Failed to create agent task due to authentication/permission error.`); + core.error(`The default GITHUB_TOKEN does not have permission to create agent tasks.`); + core.error(`You must configure a Personal Access Token (PAT) as COPILOT_GITHUB_TOKEN or GH_AW_GITHUB_TOKEN.`); + core.error(`See documentation: https://githubnext.github.io/gh-aw/reference/safe-outputs/#agent-task-creation-create-agent-task`); + } else { + core.error(`Task ${index + 1}: Failed to create agent task: ${errorMessage}`); + } + continue; + } + + // Parse the output to extract task number and URL + // Expected output format from gh agent-task create is typically: + // https://github.com/owner/repo/issues/123 + const output = taskOutput.stdout.trim(); + core.info(`Task ${index + 1}: Agent task created: ${output}`); + + // Extract task number from URL + const urlMatch = output.match(/github\.com\/[^/]+\/[^/]+\/issues\/(\d+)/); + if (urlMatch) { + const taskNumber = urlMatch[1]; + createdTasks.push({ number: taskNumber, url: output }); + + summaryContent += `### Task ${index + 1}\n\n`; + summaryContent += `**Task:** [#${taskNumber}](${output})\n\n`; + summaryContent += `**Base Branch:** ${baseBranch}\n\n`; + + core.info(`✅ Successfully created agent task #${taskNumber}`); + } else { + core.warning(`Task ${index + 1}: Could not parse task number from output: ${output}`); + createdTasks.push({ number: "", url: output }); + } + } catch (error) { + core.error(`Task ${index + 1}: Error creating agent task: ${error instanceof Error ? error.message : String(error)}`); + } + } + + // Set outputs for the first created task (for backward compatibility) + if (createdTasks.length > 0) { + core.setOutput("task_number", createdTasks[0].number); + core.setOutput("task_url", createdTasks[0].url); + } else { + core.setFailed("No agent tasks were created"); + return; + } + + // Write summary + core.info(summaryContent); + core.summary.addRaw(summaryContent); + await core.summary.write(); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/create_code_scanning_alert.cjs b/pkg/workflow/js/create_code_scanning_alert.cjs new file mode 100644 index 0000000000..bd70bc892e --- /dev/null +++ b/pkg/workflow/js/create_code_scanning_alert.cjs @@ -0,0 +1,245 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); + +async function main() { + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all create-code-scanning-alert items + const securityItems = result.items.filter(/** @param {any} item */ item => item.type === "create_code_scanning_alert"); + if (securityItems.length === 0) { + core.info("No create-code-scanning-alert items found in agent output"); + return; + } + + core.info(`Found ${securityItems.length} create-code-scanning-alert item(s)`); + + // If in staged mode, emit step summary instead of creating code scanning alerts + if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") { + let summaryContent = "## 🎭 Staged Mode: Create Code Scanning Alerts Preview\n\n"; + summaryContent += "The following code scanning alerts would be created if staged mode was disabled:\n\n"; + + for (let i = 0; i < securityItems.length; i++) { + const item = securityItems[i]; + summaryContent += `### Security Finding ${i + 1}\n`; + summaryContent += `**File:** ${item.file || "No file provided"}\n\n`; + summaryContent += `**Line:** ${item.line || "No line provided"}\n\n`; + summaryContent += `**Severity:** ${item.severity || "No severity provided"}\n\n`; + summaryContent += `**Message:**\n${item.message || "No message provided"}\n\n`; + summaryContent += "---\n\n"; + } + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Code scanning alert creation preview written to step summary"); + return; + } + + // Get the max configuration from environment variable + const maxFindings = process.env.GH_AW_SECURITY_REPORT_MAX ? parseInt(process.env.GH_AW_SECURITY_REPORT_MAX) : 0; // 0 means unlimited + core.info(`Max findings configuration: ${maxFindings === 0 ? "unlimited" : maxFindings}`); + + // Get the driver configuration from environment variable + const driverName = process.env.GH_AW_SECURITY_REPORT_DRIVER || "GitHub Agentic Workflows Security Scanner"; + core.info(`Driver name: ${driverName}`); + + // Get the workflow filename for rule ID prefix + const workflowFilename = process.env.GH_AW_WORKFLOW_FILENAME || "workflow"; + core.info(`Workflow filename for rule ID prefix: ${workflowFilename}`); + + const validFindings = []; + + // Process each security item and validate the findings + for (let i = 0; i < securityItems.length; i++) { + const securityItem = securityItems[i]; + core.info( + `Processing create-code-scanning-alert item ${i + 1}/${securityItems.length}: file=${securityItem.file}, line=${securityItem.line}, severity=${securityItem.severity}, messageLength=${securityItem.message ? securityItem.message.length : "undefined"}, ruleIdSuffix=${securityItem.ruleIdSuffix || "not specified"}` + ); + + // Validate required fields + if (!securityItem.file) { + core.info('Missing required field "file" in code scanning alert item'); + continue; + } + + if (!securityItem.line || (typeof securityItem.line !== "number" && typeof securityItem.line !== "string")) { + core.info('Missing or invalid required field "line" in code scanning alert item'); + continue; + } + + if (!securityItem.severity || typeof securityItem.severity !== "string") { + core.info('Missing or invalid required field "severity" in code scanning alert item'); + continue; + } + + if (!securityItem.message || typeof securityItem.message !== "string") { + core.info('Missing or invalid required field "message" in code scanning alert item'); + continue; + } + + // Parse line number + const line = parseInt(securityItem.line, 10); + if (isNaN(line) || line <= 0) { + core.info(`Invalid line number: ${securityItem.line}`); + continue; + } + + // Parse optional column number + let column = 1; // Default to column 1 + if (securityItem.column !== undefined) { + if (typeof securityItem.column !== "number" && typeof securityItem.column !== "string") { + core.info('Invalid field "column" in code scanning alert item (must be number or string)'); + continue; + } + const parsedColumn = parseInt(securityItem.column, 10); + if (isNaN(parsedColumn) || parsedColumn <= 0) { + core.info(`Invalid column number: ${securityItem.column}`); + continue; + } + column = parsedColumn; + } + + // Parse optional rule ID suffix + let ruleIdSuffix = null; + if (securityItem.ruleIdSuffix !== undefined) { + if (typeof securityItem.ruleIdSuffix !== "string") { + core.info('Invalid field "ruleIdSuffix" in code scanning alert item (must be string)'); + continue; + } + // Validate that the suffix doesn't contain invalid characters + const trimmedSuffix = securityItem.ruleIdSuffix.trim(); + if (trimmedSuffix.length === 0) { + core.info('Invalid field "ruleIdSuffix" in code scanning alert item (cannot be empty)'); + continue; + } + // Check for characters that would be problematic in rule IDs + if (!/^[a-zA-Z0-9_-]+$/.test(trimmedSuffix)) { + core.info(`Invalid ruleIdSuffix "${trimmedSuffix}" (must contain only alphanumeric characters, hyphens, and underscores)`); + continue; + } + ruleIdSuffix = trimmedSuffix; + } + + // Validate severity level and map to SARIF level + /** @type {Record} */ + const severityMap = { + error: "error", + warning: "warning", + info: "note", + note: "note", + }; + + const normalizedSeverity = securityItem.severity.toLowerCase(); + if (!severityMap[normalizedSeverity]) { + core.info(`Invalid severity level: ${securityItem.severity} (must be error, warning, info, or note)`); + continue; + } + + const sarifLevel = severityMap[normalizedSeverity]; + + // Create a valid finding object + validFindings.push({ + file: securityItem.file.trim(), + line: line, + column: column, + severity: normalizedSeverity, + sarifLevel: sarifLevel, + message: securityItem.message.trim(), + ruleIdSuffix: ruleIdSuffix, + }); + + // Check if we've reached the max limit + if (maxFindings > 0 && validFindings.length >= maxFindings) { + core.info(`Reached maximum findings limit: ${maxFindings}`); + break; + } + } + + if (validFindings.length === 0) { + core.info("No valid security findings to report"); + return; + } + + core.info(`Processing ${validFindings.length} valid security finding(s)`); + + // Generate SARIF file + const sarifContent = { + $schema: "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", + version: "2.1.0", + runs: [ + { + tool: { + driver: { + name: driverName, + version: "1.0.0", + informationUri: "https://github.com/githubnext/gh-aw", + }, + }, + results: validFindings.map((finding, index) => ({ + ruleId: finding.ruleIdSuffix ? `${workflowFilename}-${finding.ruleIdSuffix}` : `${workflowFilename}-security-finding-${index + 1}`, + message: { text: finding.message }, + level: finding.sarifLevel, + locations: [ + { + physicalLocation: { + artifactLocation: { uri: finding.file }, + region: { + startLine: finding.line, + startColumn: finding.column, + }, + }, + }, + ], + })), + }, + ], + }; + + // Write SARIF file to filesystem + const fs = require("fs"); + const path = require("path"); + const sarifFileName = "code-scanning-alert.sarif"; + const sarifFilePath = path.join(process.cwd(), sarifFileName); + + try { + fs.writeFileSync(sarifFilePath, JSON.stringify(sarifContent, null, 2)); + core.info(`✓ Created SARIF file: ${sarifFilePath}`); + core.info(`SARIF file size: ${fs.statSync(sarifFilePath).size} bytes`); + + // Set outputs for the GitHub Action + core.setOutput("sarif_file", sarifFilePath); + core.setOutput("findings_count", validFindings.length); + core.setOutput("artifact_uploaded", "pending"); + core.setOutput("codeql_uploaded", "pending"); + + // Write summary with findings + let summaryContent = "\n\n## Code Scanning Alert\n"; + summaryContent += `Found **${validFindings.length}** security finding(s):\n\n`; + + for (const finding of validFindings) { + const emoji = finding.severity === "error" ? "🔴" : finding.severity === "warning" ? "🟡" : "🔵"; + summaryContent += `${emoji} **${finding.severity.toUpperCase()}** in \`${finding.file}:${finding.line}\`: ${finding.message}\n`; + } + + summaryContent += `\n📄 SARIF file created: \`${sarifFileName}\`\n`; + summaryContent += `🔍 Findings will be uploaded to GitHub Code Scanning\n`; + + await core.summary.addRaw(summaryContent).write(); + } catch (error) { + core.error(`✗ Failed to create SARIF file: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + + core.info(`Successfully created code scanning alert with ${validFindings.length} finding(s)`); + return { + sarifFile: sarifFilePath, + findingsCount: validFindings.length, + findings: validFindings, + }; +} + +module.exports = { main }; diff --git a/pkg/workflow/js/create_discussion.cjs b/pkg/workflow/js/create_discussion.cjs new file mode 100644 index 0000000000..066bd6aa04 --- /dev/null +++ b/pkg/workflow/js/create_discussion.cjs @@ -0,0 +1,346 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { getTrackerID } = require("./get_tracker_id.cjs"); +const { closeOlderDiscussions } = require("./close_older_discussions.cjs"); +const { replaceTemporaryIdReferences, loadTemporaryIdMap } = require("./temporary_id.cjs"); +const { parseAllowedRepos, getDefaultTargetRepo, validateRepo, parseRepoSlug } = require("./repo_helpers.cjs"); +const { addExpirationComment } = require("./expiration_helpers.cjs"); +const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs"); + +/** + * Fetch repository ID and discussion categories for a repository + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @returns {Promise<{repositoryId: string, discussionCategories: Array<{id: string, name: string, slug: string, description: string}>}|null>} + */ +async function fetchRepoDiscussionInfo(owner, repo) { + const repositoryQuery = ` + query($owner: String!, $repo: String!) { + repository(owner: $owner, name: $repo) { + id + discussionCategories(first: 20) { + nodes { + id + name + slug + description + } + } + } + } + `; + const queryResult = await github.graphql(repositoryQuery, { + owner: owner, + repo: repo, + }); + if (!queryResult || !queryResult.repository) { + return null; + } + return { + repositoryId: queryResult.repository.id, + discussionCategories: queryResult.repository.discussionCategories.nodes || [], + }; +} + +/** + * Resolve category ID for a repository + * @param {string} categoryConfig - Category ID, name, or slug from config + * @param {string} itemCategory - Category from agent output item (optional) + * @param {Array<{id: string, name: string, slug: string}>} categories - Available categories + * @returns {{id: string, matchType: string, name: string, requestedCategory?: string}|undefined} Resolved category info + */ +function resolveCategoryId(categoryConfig, itemCategory, categories) { + // Use item category if provided, otherwise use config + const categoryToMatch = itemCategory || categoryConfig; + + if (categoryToMatch) { + // Try to match against category IDs first + const categoryById = categories.find(cat => cat.id === categoryToMatch); + if (categoryById) { + return { id: categoryById.id, matchType: "id", name: categoryById.name }; + } + // Try to match against category names + const categoryByName = categories.find(cat => cat.name === categoryToMatch); + if (categoryByName) { + return { id: categoryByName.id, matchType: "name", name: categoryByName.name }; + } + // Try to match against category slugs (routes) + const categoryBySlug = categories.find(cat => cat.slug === categoryToMatch); + if (categoryBySlug) { + return { id: categoryBySlug.id, matchType: "slug", name: categoryBySlug.name }; + } + } + + // Fall back to first category if available + if (categories.length > 0) { + return { + id: categories[0].id, + matchType: "fallback", + name: categories[0].name, + requestedCategory: categoryToMatch, + }; + } + + return undefined; +} + +async function main() { + // Initialize outputs to empty strings to ensure they're always set + core.setOutput("discussion_number", ""); + core.setOutput("discussion_url", ""); + + // Load the temporary ID map from create_issue job + const temporaryIdMap = loadTemporaryIdMap(); + if (temporaryIdMap.size > 0) { + core.info(`Loaded temporary ID map with ${temporaryIdMap.size} entries`); + } + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + const createDiscussionItems = result.items.filter(item => item.type === "create_discussion"); + if (createDiscussionItems.length === 0) { + core.warning("No create-discussion items found in agent output"); + return; + } + core.info(`Found ${createDiscussionItems.length} create-discussion item(s)`); + + // Parse allowed repos and default target + const allowedRepos = parseAllowedRepos(); + const defaultTargetRepo = getDefaultTargetRepo(); + core.info(`Default target repo: ${defaultTargetRepo}`); + if (allowedRepos.size > 0) { + core.info(`Allowed repos: ${Array.from(allowedRepos).join(", ")}`); + } + + if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") { + let summaryContent = "## 🎭 Staged Mode: Create Discussions Preview\n\n"; + summaryContent += "The following discussions would be created if staged mode was disabled:\n\n"; + for (let i = 0; i < createDiscussionItems.length; i++) { + const item = createDiscussionItems[i]; + summaryContent += `### Discussion ${i + 1}\n`; + summaryContent += `**Title:** ${item.title || "No title provided"}\n\n`; + if (item.repo) { + summaryContent += `**Repository:** ${item.repo}\n\n`; + } + if (item.body) { + summaryContent += `**Body:**\n${item.body}\n\n`; + } + if (item.category) { + summaryContent += `**Category:** ${item.category}\n\n`; + } + summaryContent += "---\n\n"; + } + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Discussion creation preview written to step summary"); + return; + } + + // Cache for repository info to avoid redundant API calls + /** @type {Map}>} */ + const repoInfoCache = new Map(); + + // Get configuration for close-older-discussions + const closeOlderEnabled = process.env.GH_AW_CLOSE_OLDER_DISCUSSIONS === "true"; + const titlePrefix = process.env.GH_AW_DISCUSSION_TITLE_PREFIX || ""; + const configCategory = process.env.GH_AW_DISCUSSION_CATEGORY || ""; + const labelsEnvVar = process.env.GH_AW_DISCUSSION_LABELS || ""; + const labels = labelsEnvVar + ? labelsEnvVar + .split(",") + .map(l => l.trim()) + .filter(l => l.length > 0) + : []; + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + const createdDiscussions = []; + const closedDiscussionsSummary = []; + + for (let i = 0; i < createDiscussionItems.length; i++) { + const createDiscussionItem = createDiscussionItems[i]; + + // Determine target repository for this discussion + const itemRepo = createDiscussionItem.repo ? String(createDiscussionItem.repo).trim() : defaultTargetRepo; + + // Validate the repository is allowed + const repoValidation = validateRepo(itemRepo, defaultTargetRepo, allowedRepos); + if (!repoValidation.valid) { + core.warning(`Skipping discussion: ${repoValidation.error}`); + continue; + } + + // Parse the repository slug + const repoParts = parseRepoSlug(itemRepo); + if (!repoParts) { + core.warning(`Skipping discussion: Invalid repository format '${itemRepo}'. Expected 'owner/repo'.`); + continue; + } + + // Get repository info (cached) + let repoInfo = repoInfoCache.get(itemRepo); + if (!repoInfo) { + try { + const fetchedInfo = await fetchRepoDiscussionInfo(repoParts.owner, repoParts.repo); + if (!fetchedInfo) { + core.warning(`Skipping discussion: Failed to fetch repository information for '${itemRepo}'`); + continue; + } + repoInfo = fetchedInfo; + repoInfoCache.set(itemRepo, repoInfo); + core.info(`Fetched discussion categories for ${itemRepo}: ${JSON.stringify(repoInfo.discussionCategories.map(cat => ({ name: cat.name, id: cat.id })))}`); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + if (errorMessage.includes("Not Found") || errorMessage.includes("not found") || errorMessage.includes("Could not resolve to a Repository")) { + core.warning(`Skipping discussion: Discussions are not enabled for repository '${itemRepo}'`); + continue; + } + core.error(`Failed to get discussion categories for ${itemRepo}: ${errorMessage}`); + throw error; + } + } + + // Resolve category ID for this discussion + const categoryInfo = resolveCategoryId(configCategory, createDiscussionItem.category, repoInfo.discussionCategories); + if (!categoryInfo) { + core.warning(`Skipping discussion in ${itemRepo}: No discussion category available`); + continue; + } + + // Log how the category was resolved + if (categoryInfo.matchType === "name") { + core.info(`Using category by name: ${categoryInfo.name} (${categoryInfo.id})`); + } else if (categoryInfo.matchType === "slug") { + core.info(`Using category by slug: ${categoryInfo.name} (${categoryInfo.id})`); + } else if (categoryInfo.matchType === "fallback") { + if (categoryInfo.requestedCategory) { + const availableCategoryNames = repoInfo.discussionCategories.map(cat => cat.name).join(", "); + core.warning(`Category "${categoryInfo.requestedCategory}" not found by ID, name, or slug. Available categories: ${availableCategoryNames}`); + core.info(`Falling back to default category: ${categoryInfo.name} (${categoryInfo.id})`); + } else { + core.info(`Using default first category: ${categoryInfo.name} (${categoryInfo.id})`); + } + } + + const categoryId = categoryInfo.id; + + core.info(`Processing create-discussion item ${i + 1}/${createDiscussionItems.length}: title=${createDiscussionItem.title}, bodyLength=${createDiscussionItem.body?.length || 0}, repo=${itemRepo}`); + + // Replace temporary ID references in title + let title = createDiscussionItem.title ? replaceTemporaryIdReferences(createDiscussionItem.title.trim(), temporaryIdMap, itemRepo) : ""; + // Replace temporary ID references in body (with defensive null check) + const bodyText = createDiscussionItem.body || ""; + let processedBody = replaceTemporaryIdReferences(bodyText, temporaryIdMap, itemRepo); + + // Remove duplicate title from description if it starts with a header matching the title + processedBody = removeDuplicateTitleFromDescription(title, processedBody); + + let bodyLines = processedBody.split("\n"); + if (!title) { + title = replaceTemporaryIdReferences(bodyText, temporaryIdMap, itemRepo) || "Agent Output"; + } + if (titlePrefix && !title.startsWith(titlePrefix)) { + title = titlePrefix + title; + } + + // Add tracker-id comment if present + const trackerIDComment = getTrackerID("markdown"); + if (trackerIDComment) { + bodyLines.push(trackerIDComment); + } + + // Add expiration comment if expires is set + addExpirationComment(bodyLines, "GH_AW_DISCUSSION_EXPIRES", "Discussion"); + + bodyLines.push(``, ``, `> AI generated by [${workflowName}](${runUrl})`, ""); + const body = bodyLines.join("\n").trim(); + core.info(`Creating discussion in ${itemRepo} with title: ${title}`); + core.info(`Category ID: ${categoryId}`); + core.info(`Body length: ${body.length}`); + try { + const createDiscussionMutation = ` + mutation($repositoryId: ID!, $categoryId: ID!, $title: String!, $body: String!) { + createDiscussion(input: { + repositoryId: $repositoryId, + categoryId: $categoryId, + title: $title, + body: $body + }) { + discussion { + id + number + title + url + } + } + } + `; + const mutationResult = await github.graphql(createDiscussionMutation, { + repositoryId: repoInfo.repositoryId, + categoryId: categoryId, + title: title, + body: body, + }); + const discussion = mutationResult.createDiscussion.discussion; + if (!discussion) { + core.error(`Failed to create discussion in ${itemRepo}: No discussion data returned`); + continue; + } + core.info(`Created discussion ${itemRepo}#${discussion.number}: ${discussion.url}`); + createdDiscussions.push({ ...discussion, _repo: itemRepo }); + if (i === createDiscussionItems.length - 1) { + core.setOutput("discussion_number", discussion.number); + core.setOutput("discussion_url", discussion.url); + } + + // Close older discussions if enabled and title prefix or labels are set + // Note: close-older-discussions only works within the same repository + const hasMatchingCriteria = titlePrefix || labels.length > 0; + if (closeOlderEnabled && hasMatchingCriteria) { + core.info("close-older-discussions is enabled, searching for older discussions to close..."); + try { + const closedDiscussions = await closeOlderDiscussions(github, repoParts.owner, repoParts.repo, titlePrefix, labels, categoryId, { number: discussion.number, url: discussion.url }, workflowName, runUrl); + + if (closedDiscussions.length > 0) { + closedDiscussionsSummary.push(...closedDiscussions); + core.info(`Closed ${closedDiscussions.length} older discussion(s) as outdated`); + } + } catch (closeError) { + // Log error but don't fail the workflow - closing older discussions is a nice-to-have + core.warning(`Failed to close older discussions: ${closeError instanceof Error ? closeError.message : String(closeError)}`); + } + } else if (closeOlderEnabled && !hasMatchingCriteria) { + core.warning("close-older-discussions is enabled but no title-prefix or labels are set - skipping close older discussions"); + } + } catch (error) { + core.error(`✗ Failed to create discussion "${title}" in ${itemRepo}: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + if (createdDiscussions.length > 0) { + let summaryContent = "\n\n## GitHub Discussions\n"; + for (const discussion of createdDiscussions) { + const repoLabel = discussion._repo !== defaultTargetRepo ? ` (${discussion._repo})` : ""; + summaryContent += `- Discussion #${discussion.number}${repoLabel}: [${discussion.title}](${discussion.url})\n`; + } + + // Add closed discussions to summary + if (closedDiscussionsSummary.length > 0) { + summaryContent += "\n### Closed Older Discussions\n"; + for (const closed of closedDiscussionsSummary) { + summaryContent += `- Discussion #${closed.number}: [View](${closed.url}) (marked as outdated)\n`; + } + } + + await core.summary.addRaw(summaryContent).write(); + } + core.info(`Successfully created ${createdDiscussions.length} discussion(s)`); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/create_issue.cjs b/pkg/workflow/js/create_issue.cjs new file mode 100644 index 0000000000..d20ecf2236 --- /dev/null +++ b/pkg/workflow/js/create_issue.cjs @@ -0,0 +1,351 @@ +// @ts-check +/// + +const { sanitizeLabelContent } = require("./sanitize_label_content.cjs"); +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { generateFooter } = require("./generate_footer.cjs"); +const { getTrackerID } = require("./get_tracker_id.cjs"); +const { generateTemporaryId, isTemporaryId, normalizeTemporaryId, replaceTemporaryIdReferences, serializeTemporaryIdMap } = require("./temporary_id.cjs"); +const { parseAllowedRepos, getDefaultTargetRepo, validateRepo, parseRepoSlug } = require("./repo_helpers.cjs"); +const { addExpirationComment } = require("./expiration_helpers.cjs"); +const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs"); + +async function main() { + // Initialize outputs to empty strings to ensure they're always set + core.setOutput("issue_number", ""); + core.setOutput("issue_url", ""); + core.setOutput("temporary_id_map", "{}"); + core.setOutput("issues_to_assign_copilot", ""); + + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + const createIssueItems = result.items.filter(item => item.type === "create_issue"); + if (createIssueItems.length === 0) { + core.info("No create-issue items found in agent output"); + return; + } + core.info(`Found ${createIssueItems.length} create-issue item(s)`); + + // Parse allowed repos and default target + const allowedRepos = parseAllowedRepos(); + const defaultTargetRepo = getDefaultTargetRepo(); + core.info(`Default target repo: ${defaultTargetRepo}`); + if (allowedRepos.size > 0) { + core.info(`Allowed repos: ${Array.from(allowedRepos).join(", ")}`); + } + + if (isStaged) { + await generateStagedPreview({ + title: "Create Issues", + description: "The following issues would be created if staged mode was disabled:", + items: createIssueItems, + renderItem: (item, index) => { + let content = `#### Issue ${index + 1}\n`; + content += `**Title:** ${item.title || "No title provided"}\n\n`; + if (item.temporary_id) { + content += `**Temporary ID:** ${item.temporary_id}\n\n`; + } + if (item.repo) { + content += `**Repository:** ${item.repo}\n\n`; + } + if (item.body) { + content += `**Body:**\n${item.body}\n\n`; + } + if (item.labels && item.labels.length > 0) { + content += `**Labels:** ${item.labels.join(", ")}\n\n`; + } + if (item.parent) { + content += `**Parent:** ${item.parent}\n\n`; + } + return content; + }, + }); + return; + } + const parentIssueNumber = context.payload?.issue?.number; + + // Map to track temporary_id -> {repo, number} relationships + /** @type {Map} */ + const temporaryIdMap = new Map(); + + // Extract triggering context for footer generation + const triggeringIssueNumber = context.payload?.issue?.number && !context.payload?.issue?.pull_request ? context.payload.issue.number : undefined; + const triggeringPRNumber = context.payload?.pull_request?.number || (context.payload?.issue?.pull_request ? context.payload.issue.number : undefined); + const triggeringDiscussionNumber = context.payload?.discussion?.number; + + const labelsEnv = process.env.GH_AW_ISSUE_LABELS; + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(label => label.trim()) + .filter(label => label) + : []; + const createdIssues = []; + for (let i = 0; i < createIssueItems.length; i++) { + const createIssueItem = createIssueItems[i]; + + // Determine target repository for this issue + const itemRepo = createIssueItem.repo ? String(createIssueItem.repo).trim() : defaultTargetRepo; + + // Validate the repository is allowed + const repoValidation = validateRepo(itemRepo, defaultTargetRepo, allowedRepos); + if (!repoValidation.valid) { + core.warning(`Skipping issue: ${repoValidation.error}`); + continue; + } + + // Parse the repository slug + const repoParts = parseRepoSlug(itemRepo); + if (!repoParts) { + core.warning(`Skipping issue: Invalid repository format '${itemRepo}'. Expected 'owner/repo'.`); + continue; + } + + // Get or generate the temporary ID for this issue + const temporaryId = createIssueItem.temporary_id || generateTemporaryId(); + core.info(`Processing create-issue item ${i + 1}/${createIssueItems.length}: title=${createIssueItem.title}, bodyLength=${createIssueItem.body.length}, temporaryId=${temporaryId}, repo=${itemRepo}`); + + // Debug logging for parent field + core.info(`Debug: createIssueItem.parent = ${JSON.stringify(createIssueItem.parent)}`); + core.info(`Debug: parentIssueNumber from context = ${JSON.stringify(parentIssueNumber)}`); + + // Resolve parent: check if it's a temporary ID reference + let effectiveParentIssueNumber; + let effectiveParentRepo = itemRepo; // Default to same repo + if (createIssueItem.parent !== undefined) { + if (isTemporaryId(createIssueItem.parent)) { + // It's a temporary ID, look it up in the map + const resolvedParent = temporaryIdMap.get(normalizeTemporaryId(createIssueItem.parent)); + if (resolvedParent !== undefined) { + effectiveParentIssueNumber = resolvedParent.number; + effectiveParentRepo = resolvedParent.repo; + core.info(`Resolved parent temporary ID '${createIssueItem.parent}' to ${effectiveParentRepo}#${effectiveParentIssueNumber}`); + } else { + core.warning(`Parent temporary ID '${createIssueItem.parent}' not found in map. Ensure parent issue is created before sub-issues.`); + effectiveParentIssueNumber = undefined; + } + } else { + // It's a real issue number + effectiveParentIssueNumber = parseInt(String(createIssueItem.parent), 10); + if (isNaN(effectiveParentIssueNumber)) { + core.warning(`Invalid parent value: ${createIssueItem.parent}`); + effectiveParentIssueNumber = undefined; + } + } + } else { + // Only use context parent if we're in the same repo as context + const contextRepo = `${context.repo.owner}/${context.repo.repo}`; + if (itemRepo === contextRepo) { + effectiveParentIssueNumber = parentIssueNumber; + } + } + core.info(`Debug: effectiveParentIssueNumber = ${JSON.stringify(effectiveParentIssueNumber)}, effectiveParentRepo = ${effectiveParentRepo}`); + + if (effectiveParentIssueNumber && createIssueItem.parent !== undefined) { + core.info(`Using explicit parent issue number from item: ${effectiveParentRepo}#${effectiveParentIssueNumber}`); + } + let labels = [...envLabels]; + if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { + labels = [...labels, ...createIssueItem.labels]; + } + labels = labels + .filter(label => !!label) + .map(label => String(label).trim()) + .filter(label => label) + .map(label => sanitizeLabelContent(label)) + .filter(label => label) + .map(label => (label.length > 64 ? label.substring(0, 64) : label)) + .filter((label, index, arr) => arr.indexOf(label) === index); + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + + // Replace temporary ID references in the body using already-created issues + let processedBody = replaceTemporaryIdReferences(createIssueItem.body, temporaryIdMap, itemRepo); + + // Remove duplicate title from description if it starts with a header matching the title + processedBody = removeDuplicateTitleFromDescription(title, processedBody); + + let bodyLines = processedBody.split("\n"); + + if (!title) { + title = createIssueItem.body || "Agent Output"; + } + const titlePrefix = process.env.GH_AW_ISSUE_TITLE_PREFIX; + if (titlePrefix && !title.startsWith(titlePrefix)) { + title = titlePrefix + title; + } + if (effectiveParentIssueNumber) { + core.info("Detected issue context, parent issue " + effectiveParentRepo + "#" + effectiveParentIssueNumber); + // Use full repo reference if cross-repo, short reference if same repo + if (effectiveParentRepo === itemRepo) { + bodyLines.push(`Related to #${effectiveParentIssueNumber}`); + } else { + bodyLines.push(`Related to ${effectiveParentRepo}#${effectiveParentIssueNumber}`); + } + } + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || ""; + const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || ""; + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + // Add tracker-id comment if present + const trackerIDComment = getTrackerID("markdown"); + if (trackerIDComment) { + bodyLines.push(trackerIDComment); + } + + // Add expiration comment if expires is set + addExpirationComment(bodyLines, "GH_AW_ISSUE_EXPIRES", "Issue"); + + bodyLines.push(``, ``, generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber).trimEnd(), ""); + const body = bodyLines.join("\n").trim(); + core.info(`Creating issue in ${itemRepo} with title: ${title}`); + core.info(`Labels: ${labels}`); + core.info(`Body length: ${body.length}`); + try { + const { data: issue } = await github.rest.issues.create({ + owner: repoParts.owner, + repo: repoParts.repo, + title: title, + body: body, + labels: labels, + }); + core.info(`Created issue ${itemRepo}#${issue.number}: ${issue.html_url}`); + createdIssues.push({ ...issue, _repo: itemRepo }); + + // Store the mapping of temporary_id -> {repo, number} + temporaryIdMap.set(normalizeTemporaryId(temporaryId), { repo: itemRepo, number: issue.number }); + core.info(`Stored temporary ID mapping: ${temporaryId} -> ${itemRepo}#${issue.number}`); + + // Debug logging for sub-issue linking + core.info(`Debug: About to check if sub-issue linking is needed. effectiveParentIssueNumber = ${effectiveParentIssueNumber}`); + + // Sub-issue linking only works within the same repository + if (effectiveParentIssueNumber && effectiveParentRepo === itemRepo) { + core.info(`Attempting to link issue #${issue.number} as sub-issue of #${effectiveParentIssueNumber}`); + try { + // First, get the node IDs for both parent and child issues + core.info(`Fetching node ID for parent issue #${effectiveParentIssueNumber}...`); + const getIssueNodeIdQuery = ` + query($owner: String!, $repo: String!, $issueNumber: Int!) { + repository(owner: $owner, name: $repo) { + issue(number: $issueNumber) { + id + } + } + } + `; + + // Get parent issue node ID + const parentResult = await github.graphql(getIssueNodeIdQuery, { + owner: repoParts.owner, + repo: repoParts.repo, + issueNumber: effectiveParentIssueNumber, + }); + const parentNodeId = parentResult.repository.issue.id; + core.info(`Parent issue node ID: ${parentNodeId}`); + + // Get child issue node ID + core.info(`Fetching node ID for child issue #${issue.number}...`); + const childResult = await github.graphql(getIssueNodeIdQuery, { + owner: repoParts.owner, + repo: repoParts.repo, + issueNumber: issue.number, + }); + const childNodeId = childResult.repository.issue.id; + core.info(`Child issue node ID: ${childNodeId}`); + + // Link the child issue as a sub-issue of the parent + core.info(`Executing addSubIssue mutation...`); + const addSubIssueMutation = ` + mutation($issueId: ID!, $subIssueId: ID!) { + addSubIssue(input: { + issueId: $issueId, + subIssueId: $subIssueId + }) { + subIssue { + id + number + } + } + } + `; + + await github.graphql(addSubIssueMutation, { + issueId: parentNodeId, + subIssueId: childNodeId, + }); + + core.info("✓ Successfully linked issue #" + issue.number + " as sub-issue of #" + effectiveParentIssueNumber); + } catch (error) { + core.info(`Warning: Could not link sub-issue to parent: ${error instanceof Error ? error.message : String(error)}`); + core.info(`Error details: ${error instanceof Error ? error.stack : String(error)}`); + // Fallback: add a comment if sub-issue linking fails + try { + core.info(`Attempting fallback: adding comment to parent issue #${effectiveParentIssueNumber}...`); + await github.rest.issues.createComment({ + owner: repoParts.owner, + repo: repoParts.repo, + issue_number: effectiveParentIssueNumber, + body: `Created related issue: #${issue.number}`, + }); + core.info("✓ Added comment to parent issue #" + effectiveParentIssueNumber + " (sub-issue linking not available)"); + } catch (commentError) { + core.info(`Warning: Could not add comment to parent issue: ${commentError instanceof Error ? commentError.message : String(commentError)}`); + } + } + } else if (effectiveParentIssueNumber && effectiveParentRepo !== itemRepo) { + core.info(`Skipping sub-issue linking: parent is in different repository (${effectiveParentRepo})`); + } else { + core.info(`Debug: No parent issue number set, skipping sub-issue linking`); + } + if (i === createIssueItems.length - 1) { + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + if (errorMessage.includes("Issues has been disabled in this repository")) { + core.info(`⚠ Cannot create issue "${title}" in ${itemRepo}: Issues are disabled for this repository`); + core.info("Consider enabling issues in repository settings if you want to create issues automatically"); + continue; + } + core.error(`✗ Failed to create issue "${title}" in ${itemRepo}: ${errorMessage}`); + throw error; + } + } + if (createdIssues.length > 0) { + let summaryContent = "\n\n## GitHub Issues\n"; + for (const issue of createdIssues) { + const repoLabel = issue._repo !== defaultTargetRepo ? ` (${issue._repo})` : ""; + summaryContent += `- Issue #${issue.number}${repoLabel}: [${issue.title}](${issue.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + // Output the temporary ID map as JSON for use by downstream jobs + const tempIdMapOutput = serializeTemporaryIdMap(temporaryIdMap); + core.setOutput("temporary_id_map", tempIdMapOutput); + core.info(`Temporary ID map: ${tempIdMapOutput}`); + + // Output issues that need copilot assignment for assign_to_agent job + // This is used when create-issue has assignees: [copilot] + const assignCopilot = process.env.GH_AW_ASSIGN_COPILOT === "true"; + if (assignCopilot && createdIssues.length > 0) { + // Format: repo:number for each issue (for cross-repo support) + const issuesToAssign = createdIssues.map(issue => `${issue._repo}:${issue.number}`).join(","); + core.setOutput("issues_to_assign_copilot", issuesToAssign); + core.info(`Issues to assign copilot: ${issuesToAssign}`); + } + + core.info(`Successfully created ${createdIssues.length} issue(s)`); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/create_pr_review_comment.cjs b/pkg/workflow/js/create_pr_review_comment.cjs new file mode 100644 index 0000000000..65f6d6cb48 --- /dev/null +++ b/pkg/workflow/js/create_pr_review_comment.cjs @@ -0,0 +1,259 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { generateFooter } = require("./generate_footer.cjs"); +const { getRepositoryUrl } = require("./get_repository_url.cjs"); + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all create-pr-review-comment items + const reviewCommentItems = result.items.filter(/** @param {any} item */ item => item.type === "create_pull_request_review_comment"); + if (reviewCommentItems.length === 0) { + core.info("No create-pull-request-review-comment items found in agent output"); + return; + } + + core.info(`Found ${reviewCommentItems.length} create-pull-request-review-comment item(s)`); + + // If in staged mode, emit step summary instead of creating review comments + if (isStaged) { + await generateStagedPreview({ + title: "Create PR Review Comments", + description: "The following review comments would be created if staged mode was disabled:", + items: reviewCommentItems, + renderItem: (item, index) => { + let content = `#### Review Comment ${index + 1}\n`; + if (item.pull_request_number) { + const repoUrl = getRepositoryUrl(); + const pullUrl = `${repoUrl}/pull/${item.pull_request_number}`; + content += `**Target PR:** [#${item.pull_request_number}](${pullUrl})\n\n`; + } else { + content += `**Target:** Current PR\n\n`; + } + content += `**File:** ${item.path || "No path provided"}\n\n`; + content += `**Line:** ${item.line || "No line provided"}\n\n`; + if (item.start_line) { + content += `**Start Line:** ${item.start_line}\n\n`; + } + content += `**Side:** ${item.side || "RIGHT"}\n\n`; + content += `**Body:**\n${item.body || "No content provided"}\n\n`; + return content; + }, + }); + return; + } + + // Get the side configuration from environment variable + const defaultSide = process.env.GH_AW_PR_REVIEW_COMMENT_SIDE || "RIGHT"; + core.info(`Default comment side configuration: ${defaultSide}`); + + // Get the target configuration from environment variable + const commentTarget = process.env.GH_AW_PR_REVIEW_COMMENT_TARGET || "triggering"; + core.info(`PR review comment target configuration: ${commentTarget}`); + + // Check if we're in a pull request context, or an issue comment context on a PR + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment" || + (context.eventName === "issue_comment" && context.payload.issue && context.payload.issue.pull_request); + + // Validate context based on target configuration + if (commentTarget === "triggering" && !isPRContext) { + core.info('Target is "triggering" but not running in pull request context, skipping review comment creation'); + return; + } + + // Extract triggering context for footer generation + const triggeringIssueNumber = context.payload?.issue?.number && !context.payload?.issue?.pull_request ? context.payload.issue.number : undefined; + const triggeringPRNumber = context.payload?.pull_request?.number || (context.payload?.issue?.pull_request ? context.payload.issue.number : undefined); + const triggeringDiscussionNumber = context.payload?.discussion?.number; + + const createdComments = []; + + // Process each review comment item + for (let i = 0; i < reviewCommentItems.length; i++) { + const commentItem = reviewCommentItems[i]; + core.info( + `Processing create-pull-request-review-comment item ${i + 1}/${reviewCommentItems.length}: bodyLength=${commentItem.body ? commentItem.body.length : "undefined"}, path=${commentItem.path}, line=${commentItem.line}, startLine=${commentItem.start_line}` + ); + + // Validate required fields + if (!commentItem.path) { + core.info('Missing required field "path" in review comment item'); + continue; + } + + if (!commentItem.line || (typeof commentItem.line !== "number" && typeof commentItem.line !== "string")) { + core.info('Missing or invalid required field "line" in review comment item'); + continue; + } + + if (!commentItem.body || typeof commentItem.body !== "string") { + core.info('Missing or invalid required field "body" in review comment item'); + continue; + } + + // Determine the PR number for this review comment + let pullRequestNumber; + let pullRequest; + + if (commentTarget === "*") { + // For target "*", we need an explicit PR number from the comment item + if (commentItem.pull_request_number) { + pullRequestNumber = parseInt(commentItem.pull_request_number, 10); + if (isNaN(pullRequestNumber) || pullRequestNumber <= 0) { + core.info(`Invalid pull request number specified: ${commentItem.pull_request_number}`); + continue; + } + } else { + core.info('Target is "*" but no pull_request_number specified in comment item'); + continue; + } + } else if (commentTarget && commentTarget !== "triggering") { + // Explicit PR number specified in target + pullRequestNumber = parseInt(commentTarget, 10); + if (isNaN(pullRequestNumber) || pullRequestNumber <= 0) { + core.info(`Invalid pull request number in target configuration: ${commentTarget}`); + continue; + } + } else { + // Default behavior: use triggering PR + if (context.payload.pull_request) { + pullRequestNumber = context.payload.pull_request.number; + pullRequest = context.payload.pull_request; + } else if (context.payload.issue && context.payload.issue.pull_request) { + pullRequestNumber = context.payload.issue.number; + } else { + core.info("Pull request context detected but no pull request found in payload"); + continue; + } + } + + if (!pullRequestNumber) { + core.info("Could not determine pull request number"); + continue; + } + + // If we don't have the full PR details yet, fetch them + if (!pullRequest || !pullRequest.head || !pullRequest.head.sha) { + try { + const { data: fullPR } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pullRequestNumber, + }); + pullRequest = fullPR; + core.info(`Fetched full pull request details for PR #${pullRequestNumber}`); + } catch (error) { + core.info(`Failed to fetch pull request details for PR #${pullRequestNumber}: ${error instanceof Error ? error.message : String(error)}`); + continue; + } + } + + // Check if we have the commit SHA needed for creating review comments + if (!pullRequest || !pullRequest.head || !pullRequest.head.sha) { + core.info(`Pull request head commit SHA not found for PR #${pullRequestNumber} - cannot create review comment`); + continue; + } + + core.info(`Creating review comment on PR #${pullRequestNumber}`); + + // Parse line numbers + const line = parseInt(commentItem.line, 10); + if (isNaN(line) || line <= 0) { + core.info(`Invalid line number: ${commentItem.line}`); + continue; + } + + let startLine = undefined; + if (commentItem.start_line) { + startLine = parseInt(commentItem.start_line, 10); + if (isNaN(startLine) || startLine <= 0 || startLine > line) { + core.info(`Invalid start_line number: ${commentItem.start_line} (must be <= line: ${line})`); + continue; + } + } + + // Determine side (LEFT or RIGHT) + const side = commentItem.side || defaultSide; + if (side !== "LEFT" && side !== "RIGHT") { + core.info(`Invalid side value: ${side} (must be LEFT or RIGHT)`); + continue; + } + + // Extract body from the JSON item + let body = commentItem.body.trim(); + + // Add AI disclaimer with workflow name and run url + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || ""; + const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || ""; + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + body += generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber); + + core.info(`Creating review comment on PR #${pullRequestNumber} at ${commentItem.path}:${line}${startLine ? ` (lines ${startLine}-${line})` : ""} [${side}]`); + core.info(`Comment content length: ${body.length}`); + + try { + // Prepare the request parameters + /** @type {any} */ + const requestParams = { + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pullRequestNumber, + body: body, + path: commentItem.path, + commit_id: pullRequest && pullRequest.head ? pullRequest.head.sha : "", // Required for creating review comments + line: line, + side: side, + }; + + // Add start_line for multi-line comments + if (startLine !== undefined) { + requestParams.start_line = startLine; + requestParams.start_side = side; // start_side should match side for consistency + } + + // Create the review comment using GitHub API + const { data: comment } = await github.rest.pulls.createReviewComment(requestParams); + + core.info("Created review comment #" + comment.id + ": " + comment.html_url); + createdComments.push(comment); + + // Set output for the last created comment (for backward compatibility) + if (i === reviewCommentItems.length - 1) { + core.setOutput("review_comment_id", comment.id); + core.setOutput("review_comment_url", comment.html_url); + } + } catch (error) { + core.error(`✗ Failed to create review comment: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + + // Write summary for all created comments + if (createdComments.length > 0) { + let summaryContent = "\n\n## GitHub PR Review Comments\n"; + for (const comment of createdComments) { + summaryContent += `- Review Comment #${comment.id}: [View Comment](${comment.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully created ${createdComments.length} review comment(s)`); + return createdComments; +} + +module.exports = { main }; diff --git a/pkg/workflow/js/create_pull_request.cjs b/pkg/workflow/js/create_pull_request.cjs new file mode 100644 index 0000000000..7436d2ce43 --- /dev/null +++ b/pkg/workflow/js/create_pull_request.cjs @@ -0,0 +1,684 @@ +// @ts-check +/// + +/** @type {typeof import("fs")} */ +const fs = require("fs"); +/** @type {typeof import("crypto")} */ +const crypto = require("crypto"); +const { updateActivationComment } = require("./update_activation_comment.cjs"); +const { getTrackerID } = require("./get_tracker_id.cjs"); +const { addExpirationComment } = require("./expiration_helpers.cjs"); +const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs"); + +/** + * Generate a patch preview with max 500 lines and 2000 chars for issue body + * @param {string} patchContent - The full patch content + * @returns {string} Formatted patch preview + */ +function generatePatchPreview(patchContent) { + if (!patchContent || !patchContent.trim()) { + return ""; + } + + const lines = patchContent.split("\n"); + const maxLines = 500; + const maxChars = 2000; + + // Apply line limit first + let preview = lines.length <= maxLines ? patchContent : lines.slice(0, maxLines).join("\n"); + const lineTruncated = lines.length > maxLines; + + // Apply character limit + const charTruncated = preview.length > maxChars; + if (charTruncated) { + preview = preview.slice(0, maxChars); + } + + const truncated = lineTruncated || charTruncated; + const summary = truncated ? `Show patch preview (${Math.min(maxLines, lines.length)} of ${lines.length} lines)` : `Show patch (${lines.length} lines)`; + + return `\n\n
${summary}\n\n\`\`\`diff\n${preview}${truncated ? "\n... (truncated)" : ""}\n\`\`\`\n\n
`; +} + +async function main() { + // Initialize outputs to empty strings to ensure they're always set + core.setOutput("pull_request_number", ""); + core.setOutput("pull_request_url", ""); + core.setOutput("issue_number", ""); + core.setOutput("issue_url", ""); + core.setOutput("branch_name", ""); + core.setOutput("fallback_used", ""); + + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + // Environment validation - fail early if required variables are missing + const workflowId = process.env.GH_AW_WORKFLOW_ID; + if (!workflowId) { + throw new Error("GH_AW_WORKFLOW_ID environment variable is required"); + } + + const baseBranch = process.env.GH_AW_BASE_BRANCH; + if (!baseBranch) { + throw new Error("GH_AW_BASE_BRANCH environment variable is required"); + } + + const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT || ""; + + // Read agent output from file + let outputContent = ""; + if (agentOutputFile.trim() !== "") { + try { + outputContent = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + core.setFailed(`Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`); + return; + } + } + + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + } + + const ifNoChanges = process.env.GH_AW_PR_IF_NO_CHANGES || "warn"; + const allowEmpty = (process.env.GH_AW_PR_ALLOW_EMPTY || "false").toLowerCase() === "true"; + + // Check if patch file exists and has valid content + if (!fs.existsSync("/tmp/gh-aw/aw.patch")) { + // If allow-empty is enabled, we can proceed without a patch file + if (allowEmpty) { + core.info("No patch file found, but allow-empty is enabled - will create empty PR"); + } else { + const message = "No patch file found - cannot create pull request without changes"; + + // If in staged mode, still show preview + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Create Pull Request Preview\n\n"; + summaryContent += "The following pull request would be created if staged mode was disabled:\n\n"; + summaryContent += `**Status:** ⚠️ No patch file found\n\n`; + summaryContent += `**Message:** ${message}\n\n`; + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Pull request creation preview written to step summary (no patch file)"); + return; + } + + switch (ifNoChanges) { + case "error": + throw new Error(message); + case "ignore": + // Silent success - no console output + return; + case "warn": + default: + core.warning(message); + return; + } + } + } + + let patchContent = ""; + let isEmpty = true; + + if (fs.existsSync("/tmp/gh-aw/aw.patch")) { + patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + isEmpty = !patchContent || !patchContent.trim(); + } + + // Check for actual error conditions (but allow empty patches as valid noop) + if (patchContent.includes("Failed to generate patch")) { + // If allow-empty is enabled, ignore patch errors and proceed + if (allowEmpty) { + core.info("Patch file contains error, but allow-empty is enabled - will create empty PR"); + patchContent = ""; + isEmpty = true; + } else { + const message = "Patch file contains error message - cannot create pull request without changes"; + + // If in staged mode, still show preview + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Create Pull Request Preview\n\n"; + summaryContent += "The following pull request would be created if staged mode was disabled:\n\n"; + summaryContent += `**Status:** ⚠️ Patch file contains error\n\n`; + summaryContent += `**Message:** ${message}\n\n`; + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Pull request creation preview written to step summary (patch error)"); + return; + } + + switch (ifNoChanges) { + case "error": + throw new Error(message); + case "ignore": + // Silent success - no console output + return; + case "warn": + default: + core.warning(message); + return; + } + } + } + + // Validate patch size (unless empty) + if (!isEmpty) { + // Get maximum patch size from environment (default: 1MB = 1024 KB) + const maxSizeKb = parseInt(process.env.GH_AW_MAX_PATCH_SIZE || "1024", 10); + const patchSizeBytes = Buffer.byteLength(patchContent, "utf8"); + const patchSizeKb = Math.ceil(patchSizeBytes / 1024); + + core.info(`Patch size: ${patchSizeKb} KB (maximum allowed: ${maxSizeKb} KB)`); + + if (patchSizeKb > maxSizeKb) { + const message = `Patch size (${patchSizeKb} KB) exceeds maximum allowed size (${maxSizeKb} KB)`; + + // If in staged mode, still show preview with error + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Create Pull Request Preview\n\n"; + summaryContent += "The following pull request would be created if staged mode was disabled:\n\n"; + summaryContent += `**Status:** ❌ Patch size exceeded\n\n`; + summaryContent += `**Message:** ${message}\n\n`; + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Pull request creation preview written to step summary (patch size error)"); + return; + } + + throw new Error(message); + } + + core.info("Patch size validation passed"); + } + + if (isEmpty && !isStaged && !allowEmpty) { + const message = "Patch file is empty - no changes to apply (noop operation)"; + + switch (ifNoChanges) { + case "error": + throw new Error("No changes to push - failing as configured by if-no-changes: error"); + case "ignore": + // Silent success - no console output + return; + case "warn": + default: + core.warning(message); + return; + } + } + + core.info(`Agent output content length: ${outputContent.length}`); + if (!isEmpty) { + core.info("Patch content validation passed"); + } else if (allowEmpty) { + core.info("Patch file is empty - processing empty PR creation (allow-empty is enabled)"); + } else { + core.info("Patch file is empty - processing noop operation"); + } + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + core.setFailed(`Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.warning("No valid items found in agent output"); + return; + } + + // Find the create-pull-request item + const pullRequestItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === "create_pull_request"); + if (!pullRequestItem) { + core.warning("No create-pull-request item found in agent output"); + return; + } + + core.info(`Found create-pull-request item: title="${pullRequestItem.title}", bodyLength=${pullRequestItem.body.length}`); + + // If in staged mode, emit step summary instead of creating PR + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Create Pull Request Preview\n\n"; + summaryContent += "The following pull request would be created if staged mode was disabled:\n\n"; + + summaryContent += `**Title:** ${pullRequestItem.title || "No title provided"}\n\n`; + summaryContent += `**Branch:** ${pullRequestItem.branch || "auto-generated"}\n\n`; + summaryContent += `**Base:** ${baseBranch}\n\n`; + + if (pullRequestItem.body) { + summaryContent += `**Body:**\n${pullRequestItem.body}\n\n`; + } + + if (fs.existsSync("/tmp/gh-aw/aw.patch")) { + const patchStats = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + if (patchStats.trim()) { + summaryContent += `**Changes:** Patch file exists with ${patchStats.split("\n").length} lines\n\n`; + summaryContent += `
Show patch preview\n\n\`\`\`diff\n${patchStats.slice(0, 2000)}${patchStats.length > 2000 ? "\n... (truncated)" : ""}\n\`\`\`\n\n
\n\n`; + } else { + summaryContent += `**Changes:** No changes (empty patch)\n\n`; + } + } + + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Pull request creation preview written to step summary"); + return; + } + + // Extract title, body, and branch from the JSON item + let title = pullRequestItem.title.trim(); + let processedBody = pullRequestItem.body; + + // Remove duplicate title from description if it starts with a header matching the title + processedBody = removeDuplicateTitleFromDescription(title, processedBody); + + let bodyLines = processedBody.split("\n"); + let branchName = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + + // If no title was found, use a default + if (!title) { + title = "Agent Output"; + } + + // Apply title prefix if provided via environment variable + const titlePrefix = process.env.GH_AW_PR_TITLE_PREFIX; + if (titlePrefix && !title.startsWith(titlePrefix)) { + title = titlePrefix + title; + } + + // Add AI disclaimer with workflow name and run url + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + // Add fingerprint comment if present + const trackerIDComment = getTrackerID("markdown"); + if (trackerIDComment) { + bodyLines.push(trackerIDComment); + } + + // Add expiration comment if expires is set (only for same-repo PRs) + addExpirationComment(bodyLines, "GH_AW_PR_EXPIRES", "Pull Request"); + + bodyLines.push(``, ``, `> AI generated by [${workflowName}](${runUrl})`, ""); + + // Prepare the body content + const body = bodyLines.join("\n").trim(); + + // Parse labels from environment variable (comma-separated string) + const labelsEnv = process.env.GH_AW_PR_LABELS; + const labels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; + + // Parse draft setting from environment variable (defaults to true) + const draftEnv = process.env.GH_AW_PR_DRAFT; + const draft = draftEnv ? draftEnv.toLowerCase() === "true" : true; + + core.info(`Creating pull request with title: ${title}`); + core.info(`Labels: ${JSON.stringify(labels)}`); + core.info(`Draft: ${draft}`); + core.info(`Body length: ${body.length}`); + + const randomHex = crypto.randomBytes(8).toString("hex"); + // Use branch name from JSONL if provided, otherwise generate unique branch name + if (!branchName) { + core.info("No branch name provided in JSONL, generating unique branch name"); + // Generate unique branch name using cryptographic random hex + branchName = `${workflowId}-${randomHex}`; + } else { + branchName = `${branchName}-${randomHex}`; + core.info(`Using branch name from JSONL with added salt: ${branchName}`); + } + + core.info(`Generated branch name: ${branchName}`); + core.info(`Base branch: ${baseBranch}`); + + // Create a new branch using git CLI, ensuring it's based on the correct base branch + + // First, fetch the base branch specifically (since we use shallow checkout) + core.info(`Fetching base branch: ${baseBranch}`); + + // Fetch without creating/updating local branch to avoid conflicts with current branch + // This works even when we're already on the base branch + await exec.exec(`git fetch origin ${baseBranch}`); + + // Checkout the base branch (using origin/${baseBranch} if local doesn't exist) + try { + await exec.exec(`git checkout ${baseBranch}`); + } catch (checkoutError) { + // If local branch doesn't exist, create it from origin + core.info(`Local branch ${baseBranch} doesn't exist, creating from origin/${baseBranch}`); + await exec.exec(`git checkout -b ${baseBranch} origin/${baseBranch}`); + } + + // Handle branch creation/checkout + core.info(`Branch should not exist locally, creating new branch from base: ${branchName}`); + await exec.exec(`git checkout -b ${branchName}`); + core.info(`Created new branch from base: ${branchName}`); + + // Apply the patch using git CLI (skip if empty) + if (!isEmpty) { + core.info("Applying patch..."); + + // Log first 500 lines of patch for debugging + const patchLines = patchContent.split("\n"); + const previewLineCount = Math.min(500, patchLines.length); + core.info(`Patch preview (first ${previewLineCount} of ${patchLines.length} lines):`); + for (let i = 0; i < previewLineCount; i++) { + core.info(patchLines[i]); + } + + // Patches are created with git format-patch, so use git am to apply them + try { + await exec.exec("git am /tmp/gh-aw/aw.patch"); + core.info("Patch applied successfully"); + } catch (patchError) { + core.error(`Failed to apply patch: ${patchError instanceof Error ? patchError.message : String(patchError)}`); + + // Investigate why the patch failed by logging git status and the failed patch + try { + core.info("Investigating patch failure..."); + + // Log git status to see the current state + const statusResult = await exec.getExecOutput("git", ["status"]); + core.info("Git status output:"); + core.info(statusResult.stdout); + + // Log the failed patch diff + const patchResult = await exec.getExecOutput("git", ["am", "--show-current-patch=diff"]); + core.info("Failed patch content:"); + core.info(patchResult.stdout); + } catch (investigateError) { + core.warning(`Failed to investigate patch failure: ${investigateError instanceof Error ? investigateError.message : String(investigateError)}`); + } + + core.setFailed("Failed to apply patch"); + return; + } + + // Push the applied commits to the branch (with fallback to issue creation on failure) + try { + // Check if remote branch already exists (optional precheck) + let remoteBranchExists = false; + try { + const { stdout } = await exec.getExecOutput(`git ls-remote --heads origin ${branchName}`); + if (stdout.trim()) { + remoteBranchExists = true; + } + } catch (checkError) { + core.info(`Remote branch check failed (non-fatal): ${checkError instanceof Error ? checkError.message : String(checkError)}`); + } + + if (remoteBranchExists) { + core.warning(`Remote branch ${branchName} already exists - appending random suffix`); + const extraHex = crypto.randomBytes(4).toString("hex"); + const oldBranch = branchName; + branchName = `${branchName}-${extraHex}`; + // Rename local branch + await exec.exec(`git branch -m ${oldBranch} ${branchName}`); + core.info(`Renamed branch to ${branchName}`); + } + + await exec.exec(`git push origin ${branchName}`); + core.info("Changes pushed to branch"); + } catch (pushError) { + // Push failed - create fallback issue instead of PR + core.error(`Git push failed: ${pushError instanceof Error ? pushError.message : String(pushError)}`); + core.warning("Git push operation failed - creating fallback issue instead of pull request"); + + const runId = context.runId; + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + + // Read patch content for preview + let patchPreview = ""; + if (fs.existsSync("/tmp/gh-aw/aw.patch")) { + const patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + patchPreview = generatePatchPreview(patchContent); + } + + const fallbackBody = `${body} + +--- + +> [!NOTE] +> This was originally intended as a pull request, but the git push operation failed. +> +> **Workflow Run:** [View run details and download patch artifact](${runUrl}) +> +> The patch file is available as an artifact (\`aw.patch\`) in the workflow run linked above. + +To apply the patch locally: + +\`\`\`sh +# Download the artifact from the workflow run ${runUrl} +# (Use GitHub MCP tools if gh CLI is not available) +gh run download ${runId} -n aw.patch + +# Apply the patch +git am aw.patch +\`\`\` +${patchPreview}`; + + try { + const { data: issue } = await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: title, + body: fallbackBody, + labels: labels, + }); + + core.info(`Created fallback issue #${issue.number}: ${issue.html_url}`); + + // Update the activation comment with issue link (if a comment was created) + await updateActivationComment(github, context, core, issue.html_url, issue.number, "issue"); + + // Set outputs for push failure fallback + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); + core.setOutput("branch_name", branchName); + core.setOutput("fallback_used", "true"); + core.setOutput("push_failed", "true"); + + // Write summary to GitHub Actions summary + await core.summary + .addRaw( + ` + +## Push Failure Fallback +- **Push Error:** ${pushError instanceof Error ? pushError.message : String(pushError)} +- **Fallback Issue:** [#${issue.number}](${issue.html_url}) +- **Patch Artifact:** Available in workflow run artifacts +- **Note:** Push failed, created issue as fallback +` + ) + .write(); + + return; + } catch (issueError) { + core.setFailed( + `Failed to push and failed to create fallback issue. Push error: ${pushError instanceof Error ? pushError.message : String(pushError)}. Issue error: ${issueError instanceof Error ? issueError.message : String(issueError)}` + ); + return; + } + } + } else { + core.info("Skipping patch application (empty patch)"); + + // For empty patches with allow-empty, we still need to push the branch + if (allowEmpty) { + core.info("allow-empty is enabled - will create branch and push with empty commit"); + // Push the branch with an empty commit to allow PR creation + try { + // Create an empty commit to ensure there's a commit difference + await exec.exec(`git commit --allow-empty -m "Initialize"`); + core.info("Created empty commit"); + + // Check if remote branch already exists (optional precheck) + let remoteBranchExists = false; + try { + const { stdout } = await exec.getExecOutput(`git ls-remote --heads origin ${branchName}`); + if (stdout.trim()) { + remoteBranchExists = true; + } + } catch (checkError) { + core.info(`Remote branch check failed (non-fatal): ${checkError instanceof Error ? checkError.message : String(checkError)}`); + } + + if (remoteBranchExists) { + core.warning(`Remote branch ${branchName} already exists - appending random suffix`); + const extraHex = crypto.randomBytes(4).toString("hex"); + const oldBranch = branchName; + branchName = `${branchName}-${extraHex}`; + // Rename local branch + await exec.exec(`git branch -m ${oldBranch} ${branchName}`); + core.info(`Renamed branch to ${branchName}`); + } + + await exec.exec(`git push origin ${branchName}`); + core.info("Empty branch pushed successfully"); + } catch (pushError) { + core.setFailed(`Failed to push empty branch: ${pushError instanceof Error ? pushError.message : String(pushError)}`); + return; + } + } else { + // For empty patches without allow-empty, handle if-no-changes configuration + const message = "No changes to apply - noop operation completed successfully"; + + switch (ifNoChanges) { + case "error": + throw new Error("No changes to apply - failing as configured by if-no-changes: error"); + case "ignore": + // Silent success - no console output + return; + case "warn": + default: + core.warning(message); + return; + } + } + } + + // Try to create the pull request, with fallback to issue creation + try { + const { data: pullRequest } = await github.rest.pulls.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: title, + body: body, + head: branchName, + base: baseBranch, + draft: draft, + }); + + core.info(`Created pull request #${pullRequest.number}: ${pullRequest.html_url}`); + + // Add labels if specified + if (labels.length > 0) { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pullRequest.number, + labels: labels, + }); + core.info(`Added labels to pull request: ${JSON.stringify(labels)}`); + } + + // Set output for other jobs to use + core.setOutput("pull_request_number", pullRequest.number); + core.setOutput("pull_request_url", pullRequest.html_url); + core.setOutput("branch_name", branchName); + + // Update the activation comment with PR link (if a comment was created) + await updateActivationComment(github, context, core, pullRequest.html_url, pullRequest.number); + + // Write summary to GitHub Actions summary + await core.summary + .addRaw( + ` + +## Pull Request +- **Pull Request**: [#${pullRequest.number}](${pullRequest.html_url}) +- **Branch**: \`${branchName}\` +- **Base Branch**: \`${baseBranch}\` +` + ) + .write(); + } catch (prError) { + core.warning(`Failed to create pull request: ${prError instanceof Error ? prError.message : String(prError)}`); + core.info("Falling back to creating an issue instead"); + + // Create issue as fallback with enhanced body content + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const branchUrl = context.payload.repository ? `${context.payload.repository.html_url}/tree/${branchName}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/tree/${branchName}`; + + // Read patch content for preview + let patchPreview = ""; + if (fs.existsSync("/tmp/gh-aw/aw.patch")) { + const patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + patchPreview = generatePatchPreview(patchContent); + } + + const fallbackBody = `${body} + +--- + +**Note:** This was originally intended as a pull request, but PR creation failed. The changes have been pushed to the branch [\`${branchName}\`](${branchUrl}). + +**Original error:** ${prError instanceof Error ? prError.message : String(prError)} + +You can manually create a pull request from the branch if needed.${patchPreview}`; + + try { + const { data: issue } = await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: title, + body: fallbackBody, + labels: labels, + }); + + core.info(`Created fallback issue #${issue.number}: ${issue.html_url}`); + + // Update the activation comment with issue link (if a comment was created) + await updateActivationComment(github, context, core, issue.html_url, issue.number, "issue"); + + // Set output for other jobs to use (issue instead of PR) + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); + core.setOutput("branch_name", branchName); + core.setOutput("fallback_used", "true"); + + // Write summary to GitHub Actions summary + await core.summary + .addRaw( + ` + +## Fallback Issue Created +- **Issue**: [#${issue.number}](${issue.html_url}) +- **Branch**: [\`${branchName}\`](${branchUrl}) +- **Base Branch**: \`${baseBranch}\` +- **Note**: Pull request creation failed, created issue as fallback +` + ) + .write(); + } catch (issueError) { + core.setFailed(`Failed to create both pull request and fallback issue. PR error: ${prError instanceof Error ? prError.message : String(prError)}. Issue error: ${issueError instanceof Error ? issueError.message : String(issueError)}`); + return; + } + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/estimate_tokens.cjs b/pkg/workflow/js/estimate_tokens.cjs new file mode 100644 index 0000000000..a5fc23d08b --- /dev/null +++ b/pkg/workflow/js/estimate_tokens.cjs @@ -0,0 +1,16 @@ +// @ts-check +/// + +/** + * Estimates token count from text using 4 chars per token estimate + * @param {string} text - The text to estimate tokens for + * @returns {number} Approximate token count + */ +function estimateTokens(text) { + if (!text) return 0; + return Math.ceil(text.length / 4); +} + +module.exports = { + estimateTokens, +}; diff --git a/pkg/workflow/js/expiration_helpers.cjs b/pkg/workflow/js/expiration_helpers.cjs new file mode 100644 index 0000000000..fa405ee761 --- /dev/null +++ b/pkg/workflow/js/expiration_helpers.cjs @@ -0,0 +1,27 @@ +// @ts-check +/// + +/** + * Add expiration XML comment to body lines if expires is set + * @param {string[]} bodyLines - Array of body lines to append to + * @param {string} envVarName - Name of the environment variable containing expires days (e.g., "GH_AW_DISCUSSION_EXPIRES") + * @param {string} entityType - Type of entity for logging (e.g., "Discussion", "Issue", "Pull Request") + * @returns {void} + */ +function addExpirationComment(bodyLines, envVarName, entityType) { + const expiresEnv = process.env[envVarName]; + if (expiresEnv) { + const expiresDays = parseInt(expiresEnv, 10); + if (!isNaN(expiresDays) && expiresDays > 0) { + const expirationDate = new Date(); + expirationDate.setDate(expirationDate.getDate() + expiresDays); + const expirationISO = expirationDate.toISOString(); + bodyLines.push(``); + core.info(`${entityType} will expire on ${expirationISO} (${expiresDays} days)`); + } + } +} + +module.exports = { + addExpirationComment, +}; diff --git a/pkg/workflow/js/generate_compact_schema.cjs b/pkg/workflow/js/generate_compact_schema.cjs new file mode 100644 index 0000000000..f990c75e2c --- /dev/null +++ b/pkg/workflow/js/generate_compact_schema.cjs @@ -0,0 +1,43 @@ +// @ts-check +/// + +/** + * Generates a compact schema description from JSON content + * @param {string} content - The JSON content to analyze + * @returns {string} Compact schema description for jq/agent + */ +function generateCompactSchema(content) { + try { + const parsed = JSON.parse(content); + + // Generate a compact schema based on the structure + if (Array.isArray(parsed)) { + if (parsed.length === 0) { + return "[]"; + } + // For arrays, describe the first element's structure + const firstItem = parsed[0]; + if (typeof firstItem === "object" && firstItem !== null) { + const keys = Object.keys(firstItem); + return `[{${keys.join(", ")}}] (${parsed.length} items)`; + } + return `[${typeof firstItem}] (${parsed.length} items)`; + } else if (typeof parsed === "object" && parsed !== null) { + // For objects, list top-level keys + const keys = Object.keys(parsed); + if (keys.length > 10) { + return `{${keys.slice(0, 10).join(", ")}, ...} (${keys.length} keys)`; + } + return `{${keys.join(", ")}}`; + } + + return `${typeof parsed}`; + } catch { + // If not valid JSON, return generic description + return "text content"; + } +} + +module.exports = { + generateCompactSchema, +}; diff --git a/pkg/workflow/js/generate_footer.cjs b/pkg/workflow/js/generate_footer.cjs new file mode 100644 index 0000000000..bd0426e6ed --- /dev/null +++ b/pkg/workflow/js/generate_footer.cjs @@ -0,0 +1,94 @@ +// @ts-check +/// + +/** + * Generates an XML comment marker with agentic workflow metadata for traceability. + * This marker enables searching and tracing back items generated by an agentic workflow. + * + * Note: This function is duplicated in messages_footer.cjs. While normally we would + * consolidate to a shared module, importing messages_footer.cjs here would cause the + * bundler to inline messages_core.cjs which contains 'GH_AW_SAFE_OUTPUT_MESSAGES:' in + * a warning message, breaking tests that check for env var declarations. + * + * @param {string} workflowName - Name of the workflow + * @param {string} runUrl - URL of the workflow run + * @returns {string} XML comment marker with workflow metadata + */ +function generateXMLMarker(workflowName, runUrl) { + // Read engine metadata from environment variables + const engineId = process.env.GH_AW_ENGINE_ID || ""; + const engineVersion = process.env.GH_AW_ENGINE_VERSION || ""; + const engineModel = process.env.GH_AW_ENGINE_MODEL || ""; + const trackerId = process.env.GH_AW_TRACKER_ID || ""; + + // Build the key-value pairs for the marker + const parts = []; + + // Always include agentic-workflow name + parts.push(`agentic-workflow: ${workflowName}`); + + // Add tracker-id if available (for searchability and tracing) + if (trackerId) { + parts.push(`tracker-id: ${trackerId}`); + } + + // Add engine ID if available + if (engineId) { + parts.push(`engine: ${engineId}`); + } + + // Add version if available + if (engineVersion) { + parts.push(`version: ${engineVersion}`); + } + + // Add model if available + if (engineModel) { + parts.push(`model: ${engineModel}`); + } + + // Always include run URL + parts.push(`run: ${runUrl}`); + + // Return the XML comment marker + return ``; +} + +/** + * Generate footer with AI attribution and workflow installation instructions + * @param {string} workflowName - Name of the workflow + * @param {string} runUrl - URL of the workflow run + * @param {string} workflowSource - Source of the workflow (owner/repo/path@ref) + * @param {string} workflowSourceURL - GitHub URL for the workflow source + * @param {number|undefined} triggeringIssueNumber - Issue number that triggered this workflow + * @param {number|undefined} triggeringPRNumber - Pull request number that triggered this workflow + * @param {number|undefined} triggeringDiscussionNumber - Discussion number that triggered this workflow + * @returns {string} Footer text + */ +function generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber) { + let footer = `\n\n> AI generated by [${workflowName}](${runUrl})`; + + // Add reference to triggering issue/PR/discussion if available + if (triggeringIssueNumber) { + footer += ` for #${triggeringIssueNumber}`; + } else if (triggeringPRNumber) { + footer += ` for #${triggeringPRNumber}`; + } else if (triggeringDiscussionNumber) { + footer += ` for discussion #${triggeringDiscussionNumber}`; + } + + if (workflowSource && workflowSourceURL) { + footer += `\n>\n> To add this workflow in your repository, run \`gh aw add ${workflowSource}\`. See [usage guide](https://githubnext.github.io/gh-aw/tools/cli/).`; + } + + // Add XML comment marker for traceability + footer += "\n\n" + generateXMLMarker(workflowName, runUrl); + + footer += "\n"; + return footer; +} + +module.exports = { + generateFooter, + generateXMLMarker, +}; diff --git a/pkg/workflow/js/generate_git_patch.cjs b/pkg/workflow/js/generate_git_patch.cjs new file mode 100644 index 0000000000..af9654f28f --- /dev/null +++ b/pkg/workflow/js/generate_git_patch.cjs @@ -0,0 +1,141 @@ +// @ts-check +/// + +const fs = require("fs"); +const path = require("path"); +const { execSync } = require("child_process"); + +const { getBaseBranch } = require("./get_base_branch.cjs"); + +/** + * Generates a git patch file for the current changes + * @param {string} branchName - The branch name to generate patch for + * @returns {Object} Object with patch info or error + */ +function generateGitPatch(branchName) { + const patchPath = "/tmp/gh-aw/aw.patch"; + const cwd = process.env.GITHUB_WORKSPACE || process.cwd(); + const defaultBranch = process.env.DEFAULT_BRANCH || getBaseBranch(); + const githubSha = process.env.GITHUB_SHA; + + // Ensure /tmp/gh-aw directory exists + const patchDir = path.dirname(patchPath); + if (!fs.existsSync(patchDir)) { + fs.mkdirSync(patchDir, { recursive: true }); + } + + let patchGenerated = false; + let errorMessage = null; + + try { + // Strategy 1: If we have a branch name, check if that branch exists and get its diff + if (branchName) { + // Check if the branch exists locally + try { + execSync(`git show-ref --verify --quiet refs/heads/${branchName}`, { cwd, encoding: "utf8" }); + + // Determine base ref for patch generation + let baseRef; + try { + // Check if origin/branchName exists + execSync(`git show-ref --verify --quiet refs/remotes/origin/${branchName}`, { cwd, encoding: "utf8" }); + baseRef = `origin/${branchName}`; + } catch { + // Use merge-base with default branch + execSync(`git fetch origin ${defaultBranch}`, { cwd, encoding: "utf8" }); + baseRef = execSync(`git merge-base origin/${defaultBranch} ${branchName}`, { cwd, encoding: "utf8" }).trim(); + } + + // Count commits to be included + const commitCount = parseInt(execSync(`git rev-list --count ${baseRef}..${branchName}`, { cwd, encoding: "utf8" }).trim(), 10); + + if (commitCount > 0) { + // Generate patch from the determined base to the branch + const patchContent = execSync(`git format-patch ${baseRef}..${branchName} --stdout`, { + cwd, + encoding: "utf8", + }); + + if (patchContent && patchContent.trim()) { + fs.writeFileSync(patchPath, patchContent, "utf8"); + patchGenerated = true; + } + } + } catch (branchError) { + // Branch does not exist locally + } + } + + // Strategy 2: Check if commits were made to current HEAD since checkout + if (!patchGenerated) { + const currentHead = execSync("git rev-parse HEAD", { cwd, encoding: "utf8" }).trim(); + + if (!githubSha) { + errorMessage = "GITHUB_SHA environment variable is not set"; + } else if (currentHead === githubSha) { + // No commits have been made since checkout + } else { + // Check if GITHUB_SHA is an ancestor of current HEAD + try { + execSync(`git merge-base --is-ancestor ${githubSha} HEAD`, { cwd, encoding: "utf8" }); + + // Count commits between GITHUB_SHA and HEAD + const commitCount = parseInt(execSync(`git rev-list --count ${githubSha}..HEAD`, { cwd, encoding: "utf8" }).trim(), 10); + + if (commitCount > 0) { + // Generate patch from GITHUB_SHA to HEAD + const patchContent = execSync(`git format-patch ${githubSha}..HEAD --stdout`, { + cwd, + encoding: "utf8", + }); + + if (patchContent && patchContent.trim()) { + fs.writeFileSync(patchPath, patchContent, "utf8"); + patchGenerated = true; + } + } + } catch { + // GITHUB_SHA is not an ancestor of HEAD - repository state has diverged + } + } + } + } catch (error) { + errorMessage = `Failed to generate patch: ${error instanceof Error ? error.message : String(error)}`; + } + + // Check if patch was generated and has content + if (patchGenerated && fs.existsSync(patchPath)) { + const patchContent = fs.readFileSync(patchPath, "utf8"); + const patchSize = Buffer.byteLength(patchContent, "utf8"); + const patchLines = patchContent.split("\n").length; + + if (!patchContent.trim()) { + // Empty patch + return { + success: false, + error: "No changes to commit - patch is empty", + patchPath: patchPath, + patchSize: 0, + patchLines: 0, + }; + } + + return { + success: true, + patchPath: patchPath, + patchSize: patchSize, + patchLines: patchLines, + }; + } + + // No patch generated + return { + success: false, + error: errorMessage || "No changes to commit - no commits found", + patchPath: patchPath, + }; +} + +module.exports = { + generateGitPatch, +}; diff --git a/pkg/workflow/js/generate_safe_inputs_config.cjs b/pkg/workflow/js/generate_safe_inputs_config.cjs new file mode 100644 index 0000000000..8e09bae044 --- /dev/null +++ b/pkg/workflow/js/generate_safe_inputs_config.cjs @@ -0,0 +1,34 @@ +// @ts-check +/// + +/** + * Generates configuration for the Safe Inputs MCP HTTP server + * @param {object} params - Parameters for config generation + * @param {typeof import("@actions/core")} params.core - GitHub Actions core library + * @param {typeof import("crypto")} params.crypto - Node.js crypto library + * @returns {{apiKey: string, port: number}} Generated configuration + */ +function generateSafeInputsConfig({ core, crypto }) { + // Generate a secure random API key for the MCP server + // Using 45 bytes gives us 360 bits of entropy and ensures at least 40 characters + // after base64 encoding and removing special characters (base64 of 45 bytes = 60 chars) + const apiKeyBuffer = crypto.randomBytes(45); + const apiKey = apiKeyBuffer.toString("base64").replace(/[/+=]/g, ""); + + // Choose a port for the HTTP server (default 3000) + const port = 3000; + + // Set outputs with descriptive names to avoid conflicts + core.setOutput("safe_inputs_api_key", apiKey); + core.setOutput("safe_inputs_port", port.toString()); + + core.info(`Safe Inputs MCP server will run on port ${port}`); + + return { apiKey, port }; +} + +if (typeof module !== "undefined" && module.exports) { + module.exports = { + generateSafeInputsConfig, + }; +} diff --git a/pkg/workflow/js/get_base_branch.cjs b/pkg/workflow/js/get_base_branch.cjs new file mode 100644 index 0000000000..ded46f56b5 --- /dev/null +++ b/pkg/workflow/js/get_base_branch.cjs @@ -0,0 +1,14 @@ +// @ts-check +/// + +/** + * Get the base branch name from environment variable + * @returns {string} The base branch name (defaults to "main") + */ +function getBaseBranch() { + return process.env.GH_AW_BASE_BRANCH || "main"; +} + +module.exports = { + getBaseBranch, +}; diff --git a/pkg/workflow/js/get_current_branch.cjs b/pkg/workflow/js/get_current_branch.cjs new file mode 100644 index 0000000000..b0a5e0f2a2 --- /dev/null +++ b/pkg/workflow/js/get_current_branch.cjs @@ -0,0 +1,44 @@ +// @ts-check +/// + +const { execSync } = require("child_process"); + +/** + * Get the current git branch name + * @returns {string} The current branch name + */ +function getCurrentBranch() { + // Priority 1: Try git command first to get the actual checked-out branch + // This is more reliable than environment variables which may not reflect + // branch changes made during the workflow execution + const cwd = process.env.GITHUB_WORKSPACE || process.cwd(); + try { + const branch = execSync("git rev-parse --abbrev-ref HEAD", { + encoding: "utf8", + cwd: cwd, + }).trim(); + return branch; + } catch (error) { + // Ignore error and try fallback + } + + // Priority 2: Fallback to GitHub Actions environment variables + // GITHUB_HEAD_REF is set for pull_request events and contains the source branch name + // GITHUB_REF_NAME is set for all events and contains the branch/tag name + const ghHeadRef = process.env.GITHUB_HEAD_REF; + const ghRefName = process.env.GITHUB_REF_NAME; + + if (ghHeadRef) { + return ghHeadRef; + } + + if (ghRefName) { + return ghRefName; + } + + throw new Error("Failed to determine current branch: git command failed and no GitHub environment variables available"); +} + +module.exports = { + getCurrentBranch, +}; diff --git a/pkg/workflow/js/get_repository_url.cjs b/pkg/workflow/js/get_repository_url.cjs new file mode 100644 index 0000000000..d9f5556b47 --- /dev/null +++ b/pkg/workflow/js/get_repository_url.cjs @@ -0,0 +1,29 @@ +// @ts-check +/// + +/** + * Get the repository URL for different purposes + * This helper handles trial mode where target repository URLs are different from execution context + * @returns {string} Repository URL + */ +function getRepositoryUrl() { + // For trial mode, use target repository for issue/PR URLs but execution context for action runs + const targetRepoSlug = process.env.GH_AW_TARGET_REPO_SLUG; + + if (targetRepoSlug) { + // Use target repository for issue/PR URLs in trial mode + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + return `${githubServer}/${targetRepoSlug}`; + } else if (context.payload.repository?.html_url) { + // Use execution context repository (default behavior) + return context.payload.repository.html_url; + } else { + // Final fallback for action runs when context repo is not available + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + return `${githubServer}/${context.repo.owner}/${context.repo.repo}`; + } +} + +module.exports = { + getRepositoryUrl, +}; diff --git a/pkg/workflow/js/get_tracker_id.cjs b/pkg/workflow/js/get_tracker_id.cjs new file mode 100644 index 0000000000..418f0b3287 --- /dev/null +++ b/pkg/workflow/js/get_tracker_id.cjs @@ -0,0 +1,20 @@ +// @ts-check +/// + +/** + * Get tracker-id from environment variable, log it, and optionally format it + * @param {string} [format] - Output format: "markdown" for HTML comment, "text" for plain text, or undefined for raw value + * @returns {string} Tracker ID in requested format or empty string + */ +function getTrackerID(format) { + const trackerID = process.env.GH_AW_TRACKER_ID || ""; + if (trackerID) { + core.info(`Tracker ID: ${trackerID}`); + return format === "markdown" ? `\n\n` : trackerID; + } + return ""; +} + +module.exports = { + getTrackerID, +}; diff --git a/pkg/workflow/js/hide_comment.cjs b/pkg/workflow/js/hide_comment.cjs new file mode 100644 index 0000000000..c63dbf1291 --- /dev/null +++ b/pkg/workflow/js/hide_comment.cjs @@ -0,0 +1,121 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); + +/** + * Hide a comment using the GraphQL API. + * @param {any} github - GitHub GraphQL instance + * @param {string} nodeId - Comment node ID (e.g., 'IC_kwDOABCD123456') + * @param {string} reason - Reason for hiding (default: spam) + * @returns {Promise<{id: string, isMinimized: boolean}>} Hidden comment details + */ +async function hideComment(github, nodeId, reason = "spam") { + const query = /* GraphQL */ ` + mutation ($nodeId: ID!, $classifier: ReportedContentClassifiers!) { + minimizeComment(input: { subjectId: $nodeId, classifier: $classifier }) { + minimizedComment { + isMinimized + } + } + } + `; + + const result = await github.graphql(query, { nodeId, classifier: reason }); + + return { + id: nodeId, + isMinimized: result.minimizeComment.minimizedComment.isMinimized, + }; +} + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + // Parse allowed reasons from environment variable + let allowedReasons = null; + if (process.env.GH_AW_HIDE_COMMENT_ALLOWED_REASONS) { + try { + allowedReasons = JSON.parse(process.env.GH_AW_HIDE_COMMENT_ALLOWED_REASONS); + core.info(`Allowed reasons for hiding: [${allowedReasons.join(", ")}]`); + } catch (error) { + core.warning(`Failed to parse GH_AW_HIDE_COMMENT_ALLOWED_REASONS: ${error instanceof Error ? error.message : String(error)}`); + } + } + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all hide-comment items + const hideCommentItems = result.items.filter(/** @param {any} item */ item => item.type === "hide_comment"); + if (hideCommentItems.length === 0) { + core.info("No hide-comment items found in agent output"); + return; + } + + core.info(`Found ${hideCommentItems.length} hide-comment item(s)`); + + // If in staged mode, emit step summary instead of hiding comments + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Hide Comments Preview\n\n"; + summaryContent += "The following comments would be hidden if staged mode was disabled:\n\n"; + + for (let i = 0; i < hideCommentItems.length; i++) { + const item = hideCommentItems[i]; + const reason = item.reason || "spam"; + summaryContent += `### Comment ${i + 1}\n`; + summaryContent += `**Node ID**: ${item.comment_id}\n`; + summaryContent += `**Action**: Would be hidden as ${reason}\n`; + summaryContent += "\n"; + } + + core.summary.addRaw(summaryContent).write(); + return; + } + + // Process each hide-comment item + for (const item of hideCommentItems) { + try { + const commentId = item.comment_id; + if (!commentId || typeof commentId !== "string") { + throw new Error("comment_id is required and must be a string (GraphQL node ID)"); + } + + const reason = item.reason || "spam"; + + // Normalize reason to uppercase for GitHub API + const normalizedReason = reason.toUpperCase(); + + // Validate reason against allowed reasons if specified (case-insensitive) + if (allowedReasons && allowedReasons.length > 0) { + const normalizedAllowedReasons = allowedReasons.map(r => r.toUpperCase()); + if (!normalizedAllowedReasons.includes(normalizedReason)) { + core.warning(`Reason "${reason}" is not in allowed-reasons list [${allowedReasons.join(", ")}]. Skipping comment ${commentId}.`); + continue; + } + } + + core.info(`Hiding comment: ${commentId} (reason: ${normalizedReason})`); + + const hideResult = await hideComment(github, commentId, normalizedReason); + + if (hideResult.isMinimized) { + core.info(`Successfully hidden comment: ${commentId}`); + core.setOutput("comment_id", commentId); + core.setOutput("is_hidden", "true"); + } else { + throw new Error(`Failed to hide comment: ${commentId}`); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to hide comment: ${errorMessage}`); + core.setFailed(`Failed to hide comment: ${errorMessage}`); + return; + } + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/interpolate_prompt.cjs b/pkg/workflow/js/interpolate_prompt.cjs new file mode 100644 index 0000000000..3b7d798c32 --- /dev/null +++ b/pkg/workflow/js/interpolate_prompt.cjs @@ -0,0 +1,125 @@ +// @ts-check +/// + +// interpolate_prompt.cjs +// Interpolates GitHub Actions expressions and renders template conditionals in the prompt file. +// This combines variable interpolation and template filtering into a single step. + +const fs = require("fs"); +const { isTruthy } = require("./is_truthy.cjs"); +const { processRuntimeImports } = require("./runtime_import.cjs"); + +/** + * Interpolates variables in the prompt content + * @param {string} content - The prompt content with ${GH_AW_EXPR_*} placeholders + * @param {Record} variables - Map of variable names to their values + * @returns {string} - The interpolated content + */ +function interpolateVariables(content, variables) { + let result = content; + + // Replace each ${VAR_NAME} with its corresponding value + for (const [varName, value] of Object.entries(variables)) { + const pattern = new RegExp(`\\$\\{${varName}\\}`, "g"); + result = result.replace(pattern, value); + } + + return result; +} + +/** + * Renders a Markdown template by processing {{#if}} conditional blocks. + * When a conditional block is removed (falsy condition) and the template tags + * were on their own lines, the empty lines are cleaned up to avoid + * leaving excessive blank lines in the output. + * @param {string} markdown - The markdown content to process + * @returns {string} - The processed markdown content + */ +function renderMarkdownTemplate(markdown) { + // First pass: Handle blocks where tags are on their own lines + // Captures: (leading newline)(opening tag line)(condition)(body)(closing tag line)(trailing newline) + let result = markdown.replace(/(\n?)([ \t]*{{#if\s+([^}]*)}}[ \t]*\n)([\s\S]*?)([ \t]*{{\/if}}[ \t]*)(\n?)/g, (match, leadNL, openLine, cond, body, closeLine, trailNL) => { + if (isTruthy(cond)) { + // Keep body with leading newline if there was one before the opening tag + return leadNL + body; + } else { + // Remove entire block completely - the line containing the template is removed + return ""; + } + }); + + // Second pass: Handle inline conditionals (tags not on their own lines) + result = result.replace(/{{#if\s+([^}]*)}}([\s\S]*?){{\/if}}/g, (_, cond, body) => (isTruthy(cond) ? body : "")); + + // Clean up excessive blank lines (more than one blank line = 2 newlines) + result = result.replace(/\n{3,}/g, "\n\n"); + + return result; +} + +/** + * Main function for prompt variable interpolation and template rendering + */ +async function main() { + try { + const promptPath = process.env.GH_AW_PROMPT; + if (!promptPath) { + core.setFailed("GH_AW_PROMPT environment variable is not set"); + return; + } + + // Get the workspace directory for runtime imports + const workspaceDir = process.env.GITHUB_WORKSPACE; + if (!workspaceDir) { + core.setFailed("GITHUB_WORKSPACE environment variable is not set"); + return; + } + + // Read the prompt file + let content = fs.readFileSync(promptPath, "utf8"); + + // Step 1: Process runtime imports + const hasRuntimeImports = /{{#runtime-import\??[ \t]+[^\}]+}}/.test(content); + if (hasRuntimeImports) { + core.info("Processing runtime import macros"); + content = processRuntimeImports(content, workspaceDir); + core.info("Runtime imports processed successfully"); + } else { + core.info("No runtime import macros found, skipping runtime import processing"); + } + + // Step 2: Interpolate variables + const variables = {}; + for (const [key, value] of Object.entries(process.env)) { + if (key.startsWith("GH_AW_EXPR_")) { + variables[key] = value || ""; + } + } + + const varCount = Object.keys(variables).length; + if (varCount > 0) { + core.info(`Found ${varCount} expression variable(s) to interpolate`); + content = interpolateVariables(content, variables); + core.info(`Successfully interpolated ${varCount} variable(s) in prompt`); + } else { + core.info("No expression variables found, skipping interpolation"); + } + + // Step 3: Render template conditionals + const hasConditionals = /{{#if\s+[^}]+}}/.test(content); + if (hasConditionals) { + core.info("Processing conditional template blocks"); + content = renderMarkdownTemplate(content); + core.info("Template rendered successfully"); + } else { + core.info("No conditional blocks found in prompt, skipping template rendering"); + } + + // Write back to the same file + fs.writeFileSync(promptPath, content, "utf8"); + } catch (error) { + core.setFailed(error instanceof Error ? error.message : String(error)); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/is_truthy.cjs b/pkg/workflow/js/is_truthy.cjs new file mode 100644 index 0000000000..84207526da --- /dev/null +++ b/pkg/workflow/js/is_truthy.cjs @@ -0,0 +1,12 @@ +// @ts-check +/** + * Determines if a value is truthy according to template logic + * @param {string} expr - The expression to evaluate + * @returns {boolean} - Whether the expression is truthy + */ +function isTruthy(expr) { + const v = expr.trim().toLowerCase(); + return !(v === "" || v === "false" || v === "0" || v === "null" || v === "undefined"); +} + +module.exports = { isTruthy }; diff --git a/pkg/workflow/js/link_sub_issue.cjs b/pkg/workflow/js/link_sub_issue.cjs new file mode 100644 index 0000000000..d97e486663 --- /dev/null +++ b/pkg/workflow/js/link_sub_issue.cjs @@ -0,0 +1,361 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { loadTemporaryIdMap, resolveIssueNumber } = require("./temporary_id.cjs"); + +async function main() { + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + const linkItems = result.items.filter(item => item.type === "link_sub_issue"); + if (linkItems.length === 0) { + core.info("No link_sub_issue items found in agent output"); + return; + } + + core.info(`Found ${linkItems.length} link_sub_issue item(s)`); + + // Load the temporary ID map from create_issue job + const temporaryIdMap = loadTemporaryIdMap(); + if (temporaryIdMap.size > 0) { + core.info(`Loaded temporary ID map with ${temporaryIdMap.size} entries`); + } + + // Check if we're in staged mode + if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") { + await generateStagedPreview({ + title: "Link Sub-Issue", + description: "The following sub-issue links would be created if staged mode was disabled:", + items: linkItems, + renderItem: item => { + // Resolve temporary IDs for display + const parentResolved = resolveIssueNumber(item.parent_issue_number, temporaryIdMap); + const subResolved = resolveIssueNumber(item.sub_issue_number, temporaryIdMap); + + let parentDisplay = parentResolved.resolved ? `${parentResolved.resolved.repo}#${parentResolved.resolved.number}` : `${item.parent_issue_number} (unresolved)`; + let subDisplay = subResolved.resolved ? `${subResolved.resolved.repo}#${subResolved.resolved.number}` : `${item.sub_issue_number} (unresolved)`; + + if (parentResolved.wasTemporaryId && parentResolved.resolved) { + parentDisplay += ` (from ${item.parent_issue_number})`; + } + if (subResolved.wasTemporaryId && subResolved.resolved) { + subDisplay += ` (from ${item.sub_issue_number})`; + } + + let content = `**Parent Issue:** ${parentDisplay}\n`; + content += `**Sub-Issue:** ${subDisplay}\n\n`; + return content; + }, + }); + return; + } + + // Get filter configurations + const parentRequiredLabelsEnv = process.env.GH_AW_LINK_SUB_ISSUE_PARENT_REQUIRED_LABELS?.trim(); + const parentRequiredLabels = parentRequiredLabelsEnv + ? parentRequiredLabelsEnv + .split(",") + .map(l => l.trim()) + .filter(l => l) + : []; + + const parentTitlePrefix = process.env.GH_AW_LINK_SUB_ISSUE_PARENT_TITLE_PREFIX?.trim() || ""; + + const subRequiredLabelsEnv = process.env.GH_AW_LINK_SUB_ISSUE_SUB_REQUIRED_LABELS?.trim(); + const subRequiredLabels = subRequiredLabelsEnv + ? subRequiredLabelsEnv + .split(",") + .map(l => l.trim()) + .filter(l => l) + : []; + + const subTitlePrefix = process.env.GH_AW_LINK_SUB_ISSUE_SUB_TITLE_PREFIX?.trim() || ""; + + if (parentRequiredLabels.length > 0) { + core.info(`Parent required labels: ${JSON.stringify(parentRequiredLabels)}`); + } + if (parentTitlePrefix) { + core.info(`Parent title prefix: ${parentTitlePrefix}`); + } + if (subRequiredLabels.length > 0) { + core.info(`Sub-issue required labels: ${JSON.stringify(subRequiredLabels)}`); + } + if (subTitlePrefix) { + core.info(`Sub-issue title prefix: ${subTitlePrefix}`); + } + + // Get max count configuration + const maxCountEnv = process.env.GH_AW_LINK_SUB_ISSUE_MAX_COUNT; + const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 5; + if (isNaN(maxCount) || maxCount < 1) { + core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`); + return; + } + core.info(`Max count: ${maxCount}`); + + // Limit items to max count + const itemsToProcess = linkItems.slice(0, maxCount); + if (linkItems.length > maxCount) { + core.warning(`Found ${linkItems.length} link_sub_issue items, but max is ${maxCount}. Processing first ${maxCount}.`); + } + + // Process each link request + const results = []; + for (const item of itemsToProcess) { + // Resolve issue numbers, supporting temporary IDs from create_issue job + const parentResolved = resolveIssueNumber(item.parent_issue_number, temporaryIdMap); + const subResolved = resolveIssueNumber(item.sub_issue_number, temporaryIdMap); + + // Check for resolution errors + if (parentResolved.errorMessage) { + core.warning(`Failed to resolve parent issue: ${parentResolved.errorMessage}`); + results.push({ + parent_issue_number: item.parent_issue_number, + sub_issue_number: item.sub_issue_number, + success: false, + error: parentResolved.errorMessage, + }); + continue; + } + + if (subResolved.errorMessage) { + core.warning(`Failed to resolve sub-issue: ${subResolved.errorMessage}`); + results.push({ + parent_issue_number: item.parent_issue_number, + sub_issue_number: item.sub_issue_number, + success: false, + error: subResolved.errorMessage, + }); + continue; + } + + const parentIssueNumber = parentResolved.resolved.number; + const subIssueNumber = subResolved.resolved.number; + + if (parentResolved.wasTemporaryId) { + core.info(`Resolved parent temporary ID '${item.parent_issue_number}' to ${parentResolved.resolved.repo}#${parentIssueNumber}`); + } + if (subResolved.wasTemporaryId) { + core.info(`Resolved sub-issue temporary ID '${item.sub_issue_number}' to ${subResolved.resolved.repo}#${subIssueNumber}`); + } + + // Fetch parent issue to validate filters + let parentIssue; + try { + const parentResponse = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: parentIssueNumber, + }); + parentIssue = parentResponse.data; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.warning(`Failed to fetch parent issue #${parentIssueNumber}: ${errorMessage}`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Failed to fetch parent issue: ${errorMessage}`, + }); + continue; + } + + // Validate parent issue filters + if (parentRequiredLabels.length > 0) { + const parentLabels = parentIssue.labels.map(l => (typeof l === "string" ? l : l.name || "")); + const missingLabels = parentRequiredLabels.filter(required => !parentLabels.includes(required)); + if (missingLabels.length > 0) { + core.warning(`Parent issue #${parentIssueNumber} is missing required labels: ${missingLabels.join(", ")}. Skipping.`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Parent issue missing required labels: ${missingLabels.join(", ")}`, + }); + continue; + } + } + + if (parentTitlePrefix && !parentIssue.title.startsWith(parentTitlePrefix)) { + core.warning(`Parent issue #${parentIssueNumber} title does not start with "${parentTitlePrefix}". Skipping.`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Parent issue title does not start with "${parentTitlePrefix}"`, + }); + continue; + } + + // Fetch sub-issue to validate filters + let subIssue; + try { + const subResponse = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: subIssueNumber, + }); + subIssue = subResponse.data; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to fetch sub-issue #${subIssueNumber}: ${errorMessage}`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Failed to fetch sub-issue: ${errorMessage}`, + }); + continue; + } + + // Check if the sub-issue already has a parent using GraphQL + try { + const parentCheckQuery = ` + query($owner: String!, $repo: String!, $number: Int!) { + repository(owner: $owner, name: $repo) { + issue(number: $number) { + parent { + number + title + } + } + } + } + `; + const parentCheckResult = await github.graphql(parentCheckQuery, { + owner: context.repo.owner, + repo: context.repo.repo, + number: subIssueNumber, + }); + + const existingParent = parentCheckResult?.repository?.issue?.parent; + if (existingParent) { + core.warning(`Sub-issue #${subIssueNumber} is already a sub-issue of #${existingParent.number} ("${existingParent.title}"). Skipping.`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Sub-issue is already a sub-issue of #${existingParent.number}`, + }); + continue; + } + } catch (error) { + // If the GraphQL query fails (e.g., parent field not available), log warning but continue + const errorMessage = error instanceof Error ? error.message : String(error); + core.warning(`Could not check if sub-issue #${subIssueNumber} has a parent: ${errorMessage}. Proceeding with link attempt.`); + } + + // Validate sub-issue filters + if (subRequiredLabels.length > 0) { + const subLabels = subIssue.labels.map(l => (typeof l === "string" ? l : l.name || "")); + const missingLabels = subRequiredLabels.filter(required => !subLabels.includes(required)); + if (missingLabels.length > 0) { + core.warning(`Sub-issue #${subIssueNumber} is missing required labels: ${missingLabels.join(", ")}. Skipping.`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Sub-issue missing required labels: ${missingLabels.join(", ")}`, + }); + continue; + } + } + + if (subTitlePrefix && !subIssue.title.startsWith(subTitlePrefix)) { + core.warning(`Sub-issue #${subIssueNumber} title does not start with "${subTitlePrefix}". Skipping.`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: `Sub-issue title does not start with "${subTitlePrefix}"`, + }); + continue; + } + + // Link the sub-issue using GraphQL mutation + try { + // Get the parent issue's node ID for GraphQL + const parentNodeId = parentIssue.node_id; + const subNodeId = subIssue.node_id; + + // Use GraphQL mutation to add sub-issue + await github.graphql( + ` + mutation AddSubIssue($parentId: ID!, $subIssueId: ID!) { + addSubIssue(input: { issueId: $parentId, subIssueId: $subIssueId }) { + issue { + id + number + } + subIssue { + id + number + } + } + } + `, + { + parentId: parentNodeId, + subIssueId: subNodeId, + } + ); + + core.info(`Successfully linked issue #${subIssueNumber} as sub-issue of #${parentIssueNumber}`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: true, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.warning(`Failed to link issue #${subIssueNumber} as sub-issue of #${parentIssueNumber}: ${errorMessage}`); + results.push({ + parent_issue_number: parentIssueNumber, + sub_issue_number: subIssueNumber, + success: false, + error: errorMessage, + }); + } + } + + // Generate step summary + const successCount = results.filter(r => r.success).length; + const failureCount = results.filter(r => !r.success).length; + + let summaryContent = "## Link Sub-Issue\n\n"; + + if (successCount > 0) { + summaryContent += `✅ Successfully linked ${successCount} sub-issue(s):\n\n`; + for (const result of results.filter(r => r.success)) { + summaryContent += `- Issue #${result.sub_issue_number} → Parent #${result.parent_issue_number}\n`; + } + summaryContent += "\n"; + } + + if (failureCount > 0) { + summaryContent += `⚠️ Failed to link ${failureCount} sub-issue(s):\n\n`; + for (const result of results.filter(r => !r.success)) { + summaryContent += `- Issue #${result.sub_issue_number} → Parent #${result.parent_issue_number}: ${result.error}\n`; + } + } + + await core.summary.addRaw(summaryContent).write(); + + // Set outputs + const linkedIssues = results + .filter(r => r.success) + .map(r => `${r.parent_issue_number}:${r.sub_issue_number}`) + .join("\n"); + core.setOutput("linked_issues", linkedIssues); + + // Warn if any linking failed (do not fail the job) + if (failureCount > 0) { + core.warning(`Failed to link ${failureCount} sub-issue(s). See step summary for details.`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/load_agent_output.cjs b/pkg/workflow/js/load_agent_output.cjs new file mode 100644 index 0000000000..caaa944e6c --- /dev/null +++ b/pkg/workflow/js/load_agent_output.cjs @@ -0,0 +1,90 @@ +// @ts-check +/// + +const fs = require("fs"); + +/** + * Maximum content length to log for debugging purposes + * @type {number} + */ +const MAX_LOG_CONTENT_LENGTH = 10000; + +/** + * Truncate content for logging if it exceeds the maximum length + * @param {string} content - Content to potentially truncate + * @returns {string} Truncated content with indicator if truncated + */ +function truncateForLogging(content) { + if (content.length <= MAX_LOG_CONTENT_LENGTH) { + return content; + } + return content.substring(0, MAX_LOG_CONTENT_LENGTH) + `\n... (truncated, total length: ${content.length})`; +} + +/** + * Load and parse agent output from the GH_AW_AGENT_OUTPUT file + * + * This utility handles the common pattern of: + * 1. Reading the GH_AW_AGENT_OUTPUT environment variable + * 2. Loading the file content + * 3. Validating the JSON structure + * 4. Returning parsed items array + * + * @returns {{ + * success: true, + * items: any[] + * } | { + * success: false, + * items?: undefined, + * error?: string + * }} Result object with success flag and items array (if successful) or error message + */ +function loadAgentOutput() { + const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT; + + // No agent output file specified + if (!agentOutputFile) { + core.info("No GH_AW_AGENT_OUTPUT environment variable found"); + return { success: false }; + } + + // Read agent output from file + let outputContent; + try { + outputContent = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + const errorMessage = `Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`; + core.error(errorMessage); + return { success: false, error: errorMessage }; + } + + // Check for empty content + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return { success: false }; + } + + core.info(`Agent output content length: ${outputContent.length}`); + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + const errorMessage = `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`; + core.error(errorMessage); + core.info(`Failed to parse content:\n${truncateForLogging(outputContent)}`); + return { success: false, error: errorMessage }; + } + + // Validate items array exists + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + core.info(`Parsed content: ${truncateForLogging(JSON.stringify(validatedOutput))}`); + return { success: false }; + } + + return { success: true, items: validatedOutput.items }; +} + +module.exports = { loadAgentOutput, truncateForLogging, MAX_LOG_CONTENT_LENGTH }; diff --git a/pkg/workflow/js/lock-issue.cjs b/pkg/workflow/js/lock-issue.cjs new file mode 100644 index 0000000000..0e26b67b8e --- /dev/null +++ b/pkg/workflow/js/lock-issue.cjs @@ -0,0 +1,69 @@ +// @ts-check +/// + +/** + * Lock a GitHub issue without providing a reason + * This script is used in the activation job when lock-for-agent is enabled + * to prevent concurrent modifications during agent workflow execution + */ + +async function main() { + // Log actor and event information for debugging + core.info(`Lock-issue debug: actor=${context.actor}, eventName=${context.eventName}`); + + // Get issue number from context + const issueNumber = context.issue.number; + + if (!issueNumber) { + core.setFailed("Issue number not found in context"); + return; + } + + const owner = context.repo.owner; + const repo = context.repo.repo; + + core.info(`Lock-issue debug: owner=${owner}, repo=${repo}, issueNumber=${issueNumber}`); + + try { + // Check if issue is already locked + core.info(`Checking if issue #${issueNumber} is already locked`); + const { data: issue } = await github.rest.issues.get({ + owner, + repo, + issue_number: issueNumber, + }); + + // Skip locking if this is a pull request (PRs cannot be locked via issues API) + if (issue.pull_request) { + core.info(`ℹ️ Issue #${issueNumber} is a pull request, skipping lock operation`); + core.setOutput("locked", "false"); + return; + } + + if (issue.locked) { + core.info(`ℹ️ Issue #${issueNumber} is already locked, skipping lock operation`); + core.setOutput("locked", "false"); + return; + } + + core.info(`Locking issue #${issueNumber} for agent workflow execution`); + + // Lock the issue without providing a lock_reason parameter + await github.rest.issues.lock({ + owner, + repo, + issue_number: issueNumber, + }); + + core.info(`✅ Successfully locked issue #${issueNumber}`); + // Set output to indicate the issue was locked and needs to be unlocked + core.setOutput("locked", "true"); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.error(`Failed to lock issue: ${errorMessage}`); + core.setFailed(`Failed to lock issue #${issueNumber}: ${errorMessage}`); + core.setOutput("locked", "false"); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/log_parser_bootstrap.cjs b/pkg/workflow/js/log_parser_bootstrap.cjs new file mode 100644 index 0000000000..81de526050 --- /dev/null +++ b/pkg/workflow/js/log_parser_bootstrap.cjs @@ -0,0 +1,139 @@ +// @ts-check +/// + +const { generatePlainTextSummary, generateCopilotCliStyleSummary } = require("./log_parser_shared.cjs"); + +/** + * Bootstrap helper for log parser entry points. + * Handles common logic for environment variable lookup, file existence checks, + * content reading (file or directory), and summary emission. + * + * @param {Object} options - Configuration options + * @param {function(string): string|{markdown: string, mcpFailures?: string[], maxTurnsHit?: boolean, logEntries?: Array}} options.parseLog - Parser function that takes log content and returns markdown or result object + * @param {string} options.parserName - Name of the parser (e.g., "Codex", "Claude", "Copilot") + * @param {boolean} [options.supportsDirectories=false] - Whether the parser supports reading from directories + * @returns {void} + */ +function runLogParser(options) { + const fs = require("fs"); + const path = require("path"); + const { parseLog, parserName, supportsDirectories = false } = options; + + try { + const logPath = process.env.GH_AW_AGENT_OUTPUT; + if (!logPath) { + core.info("No agent log file specified"); + return; + } + + if (!fs.existsSync(logPath)) { + core.info(`Log path not found: ${logPath}`); + return; + } + + let content = ""; + + // Check if logPath is a directory or a file + const stat = fs.statSync(logPath); + if (stat.isDirectory()) { + if (!supportsDirectories) { + core.info(`Log path is a directory but ${parserName} parser does not support directories: ${logPath}`); + return; + } + + // Read all log files from the directory and concatenate them + const files = fs.readdirSync(logPath); + const logFiles = files.filter(file => file.endsWith(".log") || file.endsWith(".txt")); + + if (logFiles.length === 0) { + core.info(`No log files found in directory: ${logPath}`); + return; + } + + // Sort log files by name to ensure consistent ordering + logFiles.sort(); + + // Concatenate all log files + for (const file of logFiles) { + const filePath = path.join(logPath, file); + const fileContent = fs.readFileSync(filePath, "utf8"); + + // Add a newline before this file if the previous content doesn't end with one + if (content.length > 0 && !content.endsWith("\n")) { + content += "\n"; + } + + content += fileContent; + } + } else { + // Read the single log file + content = fs.readFileSync(logPath, "utf8"); + } + + const result = parseLog(content); + + // Handle result that may be a simple string or an object with metadata + let markdown = ""; + let mcpFailures = []; + let maxTurnsHit = false; + let logEntries = null; + + if (typeof result === "string") { + markdown = result; + } else if (result && typeof result === "object") { + markdown = result.markdown || ""; + mcpFailures = result.mcpFailures || []; + maxTurnsHit = result.maxTurnsHit || false; + logEntries = result.logEntries || null; + } + + if (markdown) { + // Generate lightweight plain text summary for core.info and Copilot CLI style for step summary + if (logEntries && Array.isArray(logEntries) && logEntries.length > 0) { + // Extract model from init entry if available + const initEntry = logEntries.find(entry => entry.type === "system" && entry.subtype === "init"); + const model = initEntry?.model || null; + + const plainTextSummary = generatePlainTextSummary(logEntries, { + model, + parserName, + }); + core.info(plainTextSummary); + + // Generate Copilot CLI style markdown for step summary + const copilotCliStyleMarkdown = generateCopilotCliStyleSummary(logEntries, { + model, + parserName, + }); + core.summary.addRaw(copilotCliStyleMarkdown).write(); + } else { + // Fallback: just log success message for parsers without log entries + core.info(`${parserName} log parsed successfully`); + // Write original markdown to step summary if available + core.summary.addRaw(markdown).write(); + } + } else { + core.error(`Failed to parse ${parserName} log`); + } + + // Handle MCP server failures if present + if (mcpFailures && mcpFailures.length > 0) { + const failedServers = mcpFailures.join(", "); + core.setFailed(`MCP server(s) failed to launch: ${failedServers}`); + } + + // Handle max-turns limit if hit + if (maxTurnsHit) { + core.setFailed(`Agent execution stopped: max-turns limit reached. The agent did not complete its task successfully.`); + } + } catch (error) { + core.setFailed(error instanceof Error ? error : String(error)); + } +} + +// Export for testing and usage +if (typeof module !== "undefined" && module.exports) { + module.exports = { + runLogParser, + }; +} diff --git a/pkg/workflow/js/log_parser_shared.cjs b/pkg/workflow/js/log_parser_shared.cjs new file mode 100644 index 0000000000..4bbe37d365 --- /dev/null +++ b/pkg/workflow/js/log_parser_shared.cjs @@ -0,0 +1,1400 @@ +// @ts-check +/// + +/** + * Shared utility functions for log parsers + * Used by parse_claude_log.cjs, parse_copilot_log.cjs, and parse_codex_log.cjs + */ + +/** + * Maximum length for tool output content in characters. + * Tool output/response sections are truncated to this length to keep step summaries readable. + * Reduced from 500 to 256 for more compact output. + */ +const MAX_TOOL_OUTPUT_LENGTH = 256; + +/** + * Maximum step summary size in bytes (1000KB). + * GitHub Actions step summaries have a limit of 1024KB. We use 1000KB to leave buffer space. + * We stop rendering additional content when approaching this limit to prevent workflow failures. + */ +const MAX_STEP_SUMMARY_SIZE = 1000 * 1024; + +/** + * Maximum length for bash command display in plain text summaries. + * Commands are truncated to this length for compact display. + */ +const MAX_BASH_COMMAND_DISPLAY_LENGTH = 40; + +/** + * Warning message shown when step summary size limit is reached. + * This message is added directly to markdown (not tracked) to ensure it's always visible. + * The message is small (~70 bytes) and won't cause practical issues with the 8MB limit. + */ +const SIZE_LIMIT_WARNING = "\n\n⚠️ *Step summary size limit reached. Additional content truncated.*\n\n"; + +/** + * Tracks the size of content being added to a step summary. + * Used to prevent exceeding GitHub Actions step summary size limits. + */ +class StepSummaryTracker { + /** + * Creates a new step summary size tracker. + * @param {number} [maxSize=MAX_STEP_SUMMARY_SIZE] - Maximum allowed size in bytes + */ + constructor(maxSize = MAX_STEP_SUMMARY_SIZE) { + /** @type {number} */ + this.currentSize = 0; + /** @type {number} */ + this.maxSize = maxSize; + /** @type {boolean} */ + this.limitReached = false; + } + + /** + * Adds content to the tracker and returns whether the limit has been reached. + * @param {string} content - Content to add + * @returns {boolean} True if the content was added, false if the limit was reached + */ + add(content) { + if (this.limitReached) { + return false; + } + + const contentSize = Buffer.byteLength(content, "utf8"); + if (this.currentSize + contentSize > this.maxSize) { + this.limitReached = true; + return false; + } + + this.currentSize += contentSize; + return true; + } + + /** + * Checks if the limit has been reached. + * @returns {boolean} True if the limit has been reached + */ + isLimitReached() { + return this.limitReached; + } + + /** + * Gets the current accumulated size. + * @returns {number} Current size in bytes + */ + getSize() { + return this.currentSize; + } + + /** + * Resets the tracker. + */ + reset() { + this.currentSize = 0; + this.limitReached = false; + } +} + +/** + * Formats duration in milliseconds to human-readable string + * @param {number} ms - Duration in milliseconds + * @returns {string} Formatted duration string (e.g., "1s", "1m 30s") + */ +function formatDuration(ms) { + if (!ms || ms <= 0) return ""; + + const seconds = Math.round(ms / 1000); + if (seconds < 60) { + return `${seconds}s`; + } + + const minutes = Math.floor(seconds / 60); + const remainingSeconds = seconds % 60; + if (remainingSeconds === 0) { + return `${minutes}m`; + } + return `${minutes}m ${remainingSeconds}s`; +} + +/** + * Formats a bash command by normalizing whitespace and escaping + * @param {string} command - The raw bash command string + * @returns {string} Formatted and escaped command string + */ +function formatBashCommand(command) { + if (!command) return ""; + + // Convert multi-line commands to single line by replacing newlines with spaces + // and collapsing multiple spaces + let formatted = command + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + + // Escape backticks to prevent markdown issues + formatted = formatted.replace(/`/g, "\\`"); + + // Truncate if too long (keep reasonable length for summary) + const maxLength = 300; + if (formatted.length > maxLength) { + formatted = formatted.substring(0, maxLength) + "..."; + } + + return formatted; +} + +/** + * Truncates a string to a maximum length with ellipsis + * @param {string} str - The string to truncate + * @param {number} maxLength - Maximum allowed length + * @returns {string} Truncated string with ellipsis if needed + */ +function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; +} + +/** + * Calculates approximate token count from text using 4 chars per token estimate + * @param {string} text - The text to estimate tokens for + * @returns {number} Approximate token count + */ +function estimateTokens(text) { + if (!text) return 0; + return Math.ceil(text.length / 4); +} + +/** + * Formats MCP tool name from internal format to display format + * @param {string} toolName - The raw tool name (e.g., mcp__github__search_issues) + * @returns {string} Formatted tool name (e.g., github::search_issues) + */ +function formatMcpName(toolName) { + // Convert mcp__github__search_issues to github::search_issues + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); + if (parts.length >= 3) { + const provider = parts[1]; // github, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. + return `${provider}::${method}`; + } + } + return toolName; +} + +/** + * Checks if a tool name looks like a custom agent (kebab-case with multiple words) + * Custom agents have names like: add-safe-output-type, cli-consistency-checker, etc. + * @param {string} toolName - The tool name to check + * @returns {boolean} True if the tool name appears to be a custom agent + */ +function isLikelyCustomAgent(toolName) { + // Custom agents are kebab-case with at least one hyphen and multiple word segments + // They should not start with common prefixes like 'mcp__', 'safe', etc. + if (!toolName || typeof toolName !== "string") { + return false; + } + + // Must contain at least one hyphen + if (!toolName.includes("-")) { + return false; + } + + // Should not contain double underscores (MCP tools) + if (toolName.includes("__")) { + return false; + } + + // Should not start with safe (safeoutputs, safeinputs handled separately) + if (toolName.toLowerCase().startsWith("safe")) { + return false; + } + + // Should be all lowercase with hyphens (kebab-case) + // Allow letters, numbers, and hyphens only + if (!/^[a-z0-9]+(-[a-z0-9]+)+$/.test(toolName)) { + return false; + } + + return true; +} + +/** + * Generates markdown summary from conversation log entries + * This is the core shared logic between Claude and Copilot log parsers + * + * When a summaryTracker is provided, the function tracks the accumulated size + * and stops rendering additional content when approaching the step summary limit. + * + * @param {Array} logEntries - Array of log entries with type, message, etc. + * @param {Object} options - Configuration options + * @param {Function} options.formatToolCallback - Callback function to format tool use (content, toolResult) => string + * @param {Function} options.formatInitCallback - Callback function to format initialization (initEntry) => string or {markdown: string, mcpFailures: string[]} + * @param {StepSummaryTracker} [options.summaryTracker] - Optional tracker for step summary size limits + * @returns {{markdown: string, commandSummary: Array, sizeLimitReached: boolean}} Generated markdown, command summary, and size limit status + */ +function generateConversationMarkdown(logEntries, options) { + const { formatToolCallback, formatInitCallback, summaryTracker } = options; + + const toolUsePairs = new Map(); // Map tool_use_id to tool_result + + // First pass: collect tool results by tool_use_id + for (const entry of logEntries) { + if (entry.type === "user" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_result" && content.tool_use_id) { + toolUsePairs.set(content.tool_use_id, content); + } + } + } + } + + let markdown = ""; + let sizeLimitReached = false; + + /** + * Helper to add content with size tracking + * @param {string} content - Content to add + * @returns {boolean} True if content was added, false if limit reached + */ + function addContent(content) { + if (summaryTracker && !summaryTracker.add(content)) { + sizeLimitReached = true; + return false; + } + markdown += content; + return true; + } + + // Check for initialization data first + const initEntry = logEntries.find(entry => entry.type === "system" && entry.subtype === "init"); + + if (initEntry && formatInitCallback) { + if (!addContent("## 🚀 Initialization\n\n")) { + return { markdown, commandSummary: [], sizeLimitReached }; + } + const initResult = formatInitCallback(initEntry); + // Handle both string and object returns (for backward compatibility) + if (typeof initResult === "string") { + if (!addContent(initResult)) { + return { markdown, commandSummary: [], sizeLimitReached }; + } + } else if (initResult && initResult.markdown) { + if (!addContent(initResult.markdown)) { + return { markdown, commandSummary: [], sizeLimitReached }; + } + } + if (!addContent("\n")) { + return { markdown, commandSummary: [], sizeLimitReached }; + } + } + + if (!addContent("\n## 🤖 Reasoning\n\n")) { + return { markdown, commandSummary: [], sizeLimitReached }; + } + + // Second pass: process assistant messages in sequence + for (const entry of logEntries) { + if (sizeLimitReached) break; + + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (sizeLimitReached) break; + + if (content.type === "text" && content.text) { + // Add reasoning text directly + const text = content.text.trim(); + if (text && text.length > 0) { + if (!addContent(text + "\n\n")) { + break; + } + } + } else if (content.type === "tool_use") { + // Process tool use with its result + const toolResult = toolUsePairs.get(content.id); + const toolMarkdown = formatToolCallback(content, toolResult); + if (toolMarkdown) { + if (!addContent(toolMarkdown)) { + break; + } + } + } + } + } + } + + // Add size limit notice if limit was reached + if (sizeLimitReached) { + markdown += SIZE_LIMIT_WARNING; + return { markdown, commandSummary: [], sizeLimitReached }; + } + + if (!addContent("## 🤖 Commands and Tools\n\n")) { + markdown += SIZE_LIMIT_WARNING; + return { markdown, commandSummary: [], sizeLimitReached: true }; + } + + const commandSummary = []; // For the succinct summary + + // Collect all tool uses for summary + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_use") { + const toolName = content.name; + const input = content.input || {}; + + // Skip internal tools - only show external commands and API calls + if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) { + continue; // Skip internal file operations and searches + } + + // Find the corresponding tool result to get status + const toolResult = toolUsePairs.get(content.id); + let statusIcon = "❓"; + if (toolResult) { + statusIcon = toolResult.is_error === true ? "❌" : "✅"; + } + + // Add to command summary (only external tools) + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); + } else if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); + } else { + // Handle other external tools (if any) + commandSummary.push(`* ${statusIcon} ${toolName}`); + } + } + } + } + } + + // Add command summary + if (commandSummary.length > 0) { + for (const cmd of commandSummary) { + if (!addContent(`${cmd}\n`)) { + markdown += SIZE_LIMIT_WARNING; + return { markdown, commandSummary, sizeLimitReached: true }; + } + } + } else { + if (!addContent("No commands or tools used.\n")) { + markdown += SIZE_LIMIT_WARNING; + return { markdown, commandSummary, sizeLimitReached: true }; + } + } + + return { markdown, commandSummary, sizeLimitReached }; +} + +/** + * Generates information section markdown from the last log entry + * @param {any} lastEntry - The last log entry with metadata (num_turns, duration_ms, etc.) + * @param {Object} options - Configuration options + * @param {Function} [options.additionalInfoCallback] - Optional callback for additional info (lastEntry) => string + * @returns {string} Information section markdown + */ +function generateInformationSection(lastEntry, options = {}) { + const { additionalInfoCallback } = options; + + let markdown = "\n## 📊 Information\n\n"; + + if (!lastEntry) { + return markdown; + } + + if (lastEntry.num_turns) { + markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; + } + + if (lastEntry.duration_ms) { + const durationSec = Math.round(lastEntry.duration_ms / 1000); + const minutes = Math.floor(durationSec / 60); + const seconds = durationSec % 60; + markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`; + } + + if (lastEntry.total_cost_usd) { + markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`; + } + + // Call additional info callback if provided (for engine-specific info like premium requests) + if (additionalInfoCallback) { + const additionalInfo = additionalInfoCallback(lastEntry); + if (additionalInfo) { + markdown += additionalInfo; + } + } + + if (lastEntry.usage) { + const usage = lastEntry.usage; + if (usage.input_tokens || usage.output_tokens) { + // Calculate total tokens (matching Go parser logic) + const inputTokens = usage.input_tokens || 0; + const outputTokens = usage.output_tokens || 0; + const cacheCreationTokens = usage.cache_creation_input_tokens || 0; + const cacheReadTokens = usage.cache_read_input_tokens || 0; + const totalTokens = inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens; + + markdown += `**Token Usage:**\n`; + if (totalTokens > 0) markdown += `- Total: ${totalTokens.toLocaleString()}\n`; + if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; + } + } + + if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; + } + + return markdown; +} + +/** + * Formats MCP parameters into a human-readable string + * @param {Record} input - The input object containing parameters + * @returns {string} Formatted parameters string + */ +function formatMcpParameters(input) { + const keys = Object.keys(input); + if (keys.length === 0) return ""; + + const paramStrs = []; + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); + paramStrs.push(`${key}: ${truncateString(value, 40)}`); + } + + if (keys.length > 4) { + paramStrs.push("..."); + } + + return paramStrs.join(", "); +} + +/** + * Formats initialization information from system init entry + * @param {any} initEntry - The system init entry containing tools, mcp_servers, etc. + * @param {Object} options - Configuration options + * @param {Function} [options.mcpFailureCallback] - Optional callback for tracking MCP failures (server) => void + * @param {Function} [options.modelInfoCallback] - Optional callback for rendering model info (initEntry) => string + * @param {boolean} [options.includeSlashCommands] - Whether to include slash commands section (default: false) + * @returns {{markdown: string, mcpFailures?: string[]}} Result with formatted markdown string and optional MCP failure list + */ +function formatInitializationSummary(initEntry, options = {}) { + const { mcpFailureCallback, modelInfoCallback, includeSlashCommands = false } = options; + let markdown = ""; + const mcpFailures = []; + + // Display model and session info + if (initEntry.model) { + markdown += `**Model:** ${initEntry.model}\n\n`; + } + + // Call model info callback for engine-specific model information (e.g., Copilot premium info) + if (modelInfoCallback) { + const modelInfo = modelInfoCallback(initEntry); + if (modelInfo) { + markdown += modelInfo; + } + } + + if (initEntry.session_id) { + markdown += `**Session ID:** ${initEntry.session_id}\n\n`; + } + + if (initEntry.cwd) { + // Show a cleaner path by removing common prefixes + const cleanCwd = initEntry.cwd.replace(/^\/home\/runner\/work\/[^\/]+\/[^\/]+/, "."); + markdown += `**Working Directory:** ${cleanCwd}\n\n`; + } + + // Display MCP servers status + if (initEntry.mcp_servers && Array.isArray(initEntry.mcp_servers)) { + markdown += "**MCP Servers:**\n"; + for (const server of initEntry.mcp_servers) { + const statusIcon = server.status === "connected" ? "✅" : server.status === "failed" ? "❌" : "❓"; + markdown += `- ${statusIcon} ${server.name} (${server.status})\n`; + + // Track failed MCP servers - call callback if provided (for Claude's detailed error tracking) + if (server.status === "failed") { + mcpFailures.push(server.name); + + // Call callback to allow engine-specific failure handling + if (mcpFailureCallback) { + const failureDetails = mcpFailureCallback(server); + if (failureDetails) { + markdown += failureDetails; + } + } + } + } + markdown += "\n"; + } + + // Display tools by category + if (initEntry.tools && Array.isArray(initEntry.tools)) { + markdown += "**Available Tools:**\n"; + + // Categorize tools with improved groupings + /** @type {{ [key: string]: string[] }} */ + const categories = { + Core: [], + "File Operations": [], + Builtin: [], + "Safe Outputs": [], + "Safe Inputs": [], + "Git/GitHub": [], + Playwright: [], + Serena: [], + MCP: [], + "Custom Agents": [], + Other: [], + }; + + // Builtin tools that come with gh-aw / Copilot + const builtinTools = ["bash", "write_bash", "read_bash", "stop_bash", "list_bash", "grep", "glob", "view", "create", "edit", "store_memory", "code_review", "codeql_checker", "report_progress", "report_intent", "gh-advisory-database"]; + + // Internal tools that are specific to Copilot CLI + const internalTools = ["fetch_copilot_cli_documentation"]; + + for (const tool of initEntry.tools) { + const toolLower = tool.toLowerCase(); + + if (["Task", "Bash", "BashOutput", "KillBash", "ExitPlanMode"].includes(tool)) { + categories["Core"].push(tool); + } else if (["Read", "Edit", "MultiEdit", "Write", "LS", "Grep", "Glob", "NotebookEdit"].includes(tool)) { + categories["File Operations"].push(tool); + } else if (builtinTools.includes(toolLower) || internalTools.includes(toolLower)) { + categories["Builtin"].push(tool); + } else if (tool.startsWith("safeoutputs-") || tool.startsWith("safe_outputs-")) { + // Extract the tool name without the prefix for cleaner display + const toolName = tool.replace(/^safeoutputs-|^safe_outputs-/, ""); + categories["Safe Outputs"].push(toolName); + } else if (tool.startsWith("safeinputs-") || tool.startsWith("safe_inputs-")) { + // Extract the tool name without the prefix for cleaner display + const toolName = tool.replace(/^safeinputs-|^safe_inputs-/, ""); + categories["Safe Inputs"].push(toolName); + } else if (tool.startsWith("mcp__github__")) { + categories["Git/GitHub"].push(formatMcpName(tool)); + } else if (tool.startsWith("mcp__playwright__")) { + categories["Playwright"].push(formatMcpName(tool)); + } else if (tool.startsWith("mcp__serena__")) { + categories["Serena"].push(formatMcpName(tool)); + } else if (tool.startsWith("mcp__") || ["ListMcpResourcesTool", "ReadMcpResourceTool"].includes(tool)) { + categories["MCP"].push(tool.startsWith("mcp__") ? formatMcpName(tool) : tool); + } else if (isLikelyCustomAgent(tool)) { + // Custom agents typically have hyphenated names (kebab-case) + categories["Custom Agents"].push(tool); + } else { + categories["Other"].push(tool); + } + } + + // Display categories with tools + for (const [category, tools] of Object.entries(categories)) { + if (tools.length > 0) { + markdown += `- **${category}:** ${tools.length} tools\n`; + // Show all tools for complete visibility + markdown += ` - ${tools.join(", ")}\n`; + } + } + markdown += "\n"; + } + + // Display slash commands if available (Claude-specific) + if (includeSlashCommands && initEntry.slash_commands && Array.isArray(initEntry.slash_commands)) { + const commandCount = initEntry.slash_commands.length; + markdown += `**Slash Commands:** ${commandCount} available\n`; + if (commandCount <= 10) { + markdown += `- ${initEntry.slash_commands.join(", ")}\n`; + } else { + markdown += `- ${initEntry.slash_commands.slice(0, 5).join(", ")}, and ${commandCount - 5} more\n`; + } + markdown += "\n"; + } + + // Return format compatible with both engines + // Claude expects { markdown, mcpFailures }, Copilot expects just markdown + if (mcpFailures.length > 0) { + return { markdown, mcpFailures }; + } + return { markdown }; +} + +/** + * Formats a tool use entry with its result into markdown + * @param {any} toolUse - The tool use object containing name, input, etc. + * @param {any} toolResult - The corresponding tool result object + * @param {Object} options - Configuration options + * @param {boolean} [options.includeDetailedParameters] - Whether to include detailed parameter section (default: false) + * @returns {string} Formatted markdown string + */ +function formatToolUse(toolUse, toolResult, options = {}) { + const { includeDetailedParameters = false } = options; + const toolName = toolUse.name; + const input = toolUse.input || {}; + + // Skip TodoWrite except the very last one (we'll handle this separately) + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one + } + + // Helper function to determine status icon + function getStatusIcon() { + if (toolResult) { + return toolResult.is_error === true ? "❌" : "✅"; + } + return "❓"; // Unknown by default + } + + const statusIcon = getStatusIcon(); + let summary = ""; + let details = ""; + + // Get tool output from result + if (toolResult && toolResult.content) { + if (typeof toolResult.content === "string") { + details = toolResult.content; + } else if (Array.isArray(toolResult.content)) { + details = toolResult.content.map(c => (typeof c === "string" ? c : c.text || "")).join("\n"); + } + } + + // Calculate token estimate from input + output + const inputText = JSON.stringify(input); + const outputText = details; + const totalTokens = estimateTokens(inputText) + estimateTokens(outputText); + + // Format metadata (duration and tokens) + let metadata = ""; + if (toolResult && toolResult.duration_ms) { + metadata += `${formatDuration(toolResult.duration_ms)} `; + } + if (totalTokens > 0) { + metadata += `~${totalTokens}t`; + } + metadata = metadata.trim(); + + // Build the summary based on tool type + switch (toolName) { + case "Bash": + const command = input.command || ""; + const description = input.description || ""; + + // Format the command to be single line + const formattedCommand = formatBashCommand(command); + + if (description) { + summary = `${description}: ${formattedCommand}`; + } else { + summary = `${formattedCommand}`; + } + break; + + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ""); // Remove /home/runner/work/repo/repo/ prefix + summary = `Read ${relativePath}`; + break; + + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ""); + summary = `Write ${writeRelativePath}`; + break; + + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; + summary = `Search for ${truncateString(query, 80)}`; + break; + + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ""); + summary = `LS: ${lsRelativePath || lsPath}`; + break; + + default: + // Handle MCP calls and other tools + if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + const params = formatMcpParameters(input); + summary = `${mcpName}(${params})`; + } else { + // Generic tool formatting - show the tool name and main parameters + const keys = Object.keys(input); + if (keys.length > 0) { + // Try to find the most important parameter + const mainParam = keys.find(k => ["query", "command", "path", "file_path", "content"].includes(k)) || keys[0]; + const value = String(input[mainParam] || ""); + + if (value) { + summary = `${toolName}: ${truncateString(value, 100)}`; + } else { + summary = toolName; + } + } else { + summary = toolName; + } + } + } + + // Build sections for formatToolCallAsDetails + /** @type {Array<{label: string, content: string, language?: string}>} */ + const sections = []; + + // For Copilot: include detailed parameters section + if (includeDetailedParameters) { + const inputKeys = Object.keys(input); + if (inputKeys.length > 0) { + sections.push({ + label: "Parameters", + content: JSON.stringify(input, null, 2), + language: "json", + }); + } + } + + // Add response section if we have details + // Note: formatToolCallAsDetails will truncate content to MAX_TOOL_OUTPUT_LENGTH + if (details && details.trim()) { + sections.push({ + label: includeDetailedParameters ? "Response" : "Output", + content: details, + }); + } + + // Use the shared formatToolCallAsDetails helper + return formatToolCallAsDetails({ + summary, + statusIcon, + sections, + metadata: metadata || undefined, + }); +} + +/** + * Parses log content as JSON array or JSONL format + * Handles multiple formats: JSON array, JSONL, and mixed format with debug logs + * @param {string} logContent - The raw log content as a string + * @returns {Array|null} Array of parsed log entries, or null if parsing fails + */ +function parseLogEntries(logContent) { + let logEntries; + + // First, try to parse as JSON array (old format) + try { + logEntries = JSON.parse(logContent); + if (!Array.isArray(logEntries) || logEntries.length === 0) { + throw new Error("Not a JSON array or empty array"); + } + return logEntries; + } catch (jsonArrayError) { + // If that fails, try to parse as JSONL format (mixed format with debug logs) + logEntries = []; + const lines = logContent.split("\n"); + + for (const line of lines) { + const trimmedLine = line.trim(); + if (trimmedLine === "") { + continue; // Skip empty lines + } + + // Handle lines that start with [ (JSON array format) + if (trimmedLine.startsWith("[{")) { + try { + const arrayEntries = JSON.parse(trimmedLine); + if (Array.isArray(arrayEntries)) { + logEntries.push(...arrayEntries); + continue; + } + } catch (arrayParseError) { + // Skip invalid array lines + continue; + } + } + + // Skip debug log lines that don't start with { + // (these are typically timestamped debug messages) + if (!trimmedLine.startsWith("{")) { + continue; + } + + // Try to parse each line as JSON + try { + const jsonEntry = JSON.parse(trimmedLine); + logEntries.push(jsonEntry); + } catch (jsonLineError) { + // Skip invalid JSON lines (could be partial debug output) + continue; + } + } + } + + // Return null if we couldn't parse anything + if (!Array.isArray(logEntries) || logEntries.length === 0) { + return null; + } + + return logEntries; +} + +/** + * Generic helper to format a tool call as an HTML details section. + * This is a reusable helper for all code engines (Claude, Copilot, Codex). + * + * Tool output/response content is automatically truncated to MAX_TOOL_OUTPUT_LENGTH (256 chars) + * to keep step summaries readable and prevent size limit issues. + * + * @param {Object} options - Configuration options + * @param {string} options.summary - The summary text to show in the collapsed state (e.g., "✅ github::list_issues") + * @param {string} [options.statusIcon] - Status icon (✅, ❌, or ❓). If not provided, should be included in summary. + * @param {Array<{label: string, content: string, language?: string}>} [options.sections] - Array of content sections to show in expanded state + * @param {string} [options.metadata] - Optional metadata to append to summary (e.g., "~100t", "5s") + * @param {number} [options.maxContentLength=MAX_TOOL_OUTPUT_LENGTH] - Maximum length for section content before truncation + * @returns {string} Formatted HTML details string or plain summary if no sections provided + * + * @example + * // Basic usage with sections + * formatToolCallAsDetails({ + * summary: "✅ github::list_issues", + * metadata: "~100t", + * sections: [ + * { label: "Parameters", content: '{"state":"open"}', language: "json" }, + * { label: "Response", content: '{"items":[]}', language: "json" } + * ] + * }); + * + * @example + * // Bash command usage + * formatToolCallAsDetails({ + * summary: "✅ ls -la", + * sections: [ + * { label: "Command", content: "ls -la", language: "bash" }, + * { label: "Output", content: "file1.txt\nfile2.txt" } + * ] + * }); + */ +function formatToolCallAsDetails(options) { + const { summary, statusIcon, sections, metadata, maxContentLength = MAX_TOOL_OUTPUT_LENGTH } = options; + + // Build the full summary line + let fullSummary = summary; + if (statusIcon && !summary.startsWith(statusIcon)) { + fullSummary = `${statusIcon} ${summary}`; + } + if (metadata) { + fullSummary += ` ${metadata}`; + } + + // If no sections or all sections are empty, just return the summary + const hasContent = sections && sections.some(s => s.content && s.content.trim()); + if (!hasContent) { + return `${fullSummary}\n\n`; + } + + // Build the details content + let detailsContent = ""; + for (const section of sections) { + if (!section.content || !section.content.trim()) { + continue; + } + + detailsContent += `**${section.label}:**\n\n`; + + // Truncate content if it exceeds maxContentLength + let content = section.content; + if (content.length > maxContentLength) { + content = content.substring(0, maxContentLength) + "... (truncated)"; + } + + // Use 6 backticks to avoid conflicts with content that may contain 3 or 5 backticks + if (section.language) { + detailsContent += `\`\`\`\`\`\`${section.language}\n`; + } else { + detailsContent += "``````\n"; + } + detailsContent += content; + detailsContent += "\n``````\n\n"; + } + + // Remove trailing newlines from details content + detailsContent = detailsContent.trimEnd(); + + return `
\n${fullSummary}\n\n${detailsContent}\n
\n\n`; +} + +/** + * Generates a lightweight plain text summary optimized for raw text rendering. + * This is designed for console output (core.info) instead of markdown step summaries. + * + * The output includes: + * - A compact header with model info + * - Agent conversation with response text and tool executions + * - Basic execution statistics + * + * @param {Array} logEntries - Array of log entries with type, message, etc. + * @param {Object} options - Configuration options + * @param {string} [options.model] - Model name to include in the header + * @param {string} [options.parserName] - Name of the parser (e.g., "Copilot", "Claude") + * @returns {string} Plain text summary for console output + */ +function generatePlainTextSummary(logEntries, options = {}) { + const { model, parserName = "Agent" } = options; + const lines = []; + + // Header + lines.push(`=== ${parserName} Execution Summary ===`); + if (model) { + lines.push(`Model: ${model}`); + } + lines.push(""); + + // Collect tool usage pairs for status lookup + const toolUsePairs = new Map(); + for (const entry of logEntries) { + if (entry.type === "user" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_result" && content.tool_use_id) { + toolUsePairs.set(content.tool_use_id, content); + } + } + } + } + + // Generate conversation flow with agent responses and tool executions + lines.push("Conversation:"); + lines.push(""); + + let conversationLineCount = 0; + const MAX_CONVERSATION_LINES = 5000; // Limit conversation output + let conversationTruncated = false; + + for (const entry of logEntries) { + if (conversationLineCount >= MAX_CONVERSATION_LINES) { + conversationTruncated = true; + break; + } + + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (conversationLineCount >= MAX_CONVERSATION_LINES) { + conversationTruncated = true; + break; + } + + if (content.type === "text" && content.text) { + // Display agent response text + const text = content.text.trim(); + if (text && text.length > 0) { + // Truncate long responses to keep output manageable + const maxTextLength = 500; + let displayText = text; + if (displayText.length > maxTextLength) { + displayText = displayText.substring(0, maxTextLength) + "..."; + } + + // Split into lines and add Agent prefix + const textLines = displayText.split("\n"); + for (const line of textLines) { + if (conversationLineCount >= MAX_CONVERSATION_LINES) { + conversationTruncated = true; + break; + } + lines.push(`Agent: ${line}`); + conversationLineCount++; + } + lines.push(""); // Add blank line after agent response + conversationLineCount++; + } + } else if (content.type === "tool_use") { + // Display tool execution + const toolName = content.name; + const input = content.input || {}; + + // Skip internal tools (file operations) + if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) { + continue; + } + + const toolResult = toolUsePairs.get(content.id); + const isError = toolResult?.is_error === true; + const statusIcon = isError ? "✗" : "✓"; + + // Format tool execution in Copilot CLI style + let displayName; + let resultPreview = ""; + + if (toolName === "Bash") { + const cmd = formatBashCommand(input.command || ""); + displayName = `$ ${cmd}`; + + // Show result preview if available + if (toolResult && toolResult.content) { + const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content); + const resultLines = resultText.split("\n").filter(l => l.trim()); + if (resultLines.length > 0) { + const previewLine = resultLines[0].substring(0, 80); + if (resultLines.length > 1) { + resultPreview = ` └ ${resultLines.length} lines...`; + } else if (previewLine) { + resultPreview = ` └ ${previewLine}`; + } + } + } + } else if (toolName.startsWith("mcp__")) { + // Format MCP tool names like github-list_pull_requests + const formattedName = formatMcpName(toolName).replace("::", "-"); + displayName = formattedName; + + // Show result preview if available + if (toolResult && toolResult.content) { + const resultText = typeof toolResult.content === "string" ? toolResult.content : JSON.stringify(toolResult.content); + const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText; + resultPreview = ` └ ${truncated}`; + } + } else { + displayName = toolName; + + // Show result preview if available + if (toolResult && toolResult.content) { + const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content); + const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText; + resultPreview = ` └ ${truncated}`; + } + } + + lines.push(`${statusIcon} ${displayName}`); + conversationLineCount++; + + if (resultPreview) { + lines.push(resultPreview); + conversationLineCount++; + } + + lines.push(""); // Add blank line after tool execution + conversationLineCount++; + } + } + } + } + + if (conversationTruncated) { + lines.push("... (conversation truncated)"); + lines.push(""); + } + + // Statistics + const lastEntry = logEntries[logEntries.length - 1]; + lines.push("Statistics:"); + if (lastEntry?.num_turns) { + lines.push(` Turns: ${lastEntry.num_turns}`); + } + if (lastEntry?.duration_ms) { + const duration = formatDuration(lastEntry.duration_ms); + if (duration) { + lines.push(` Duration: ${duration}`); + } + } + + // Count tools for statistics + let toolCounts = { total: 0, success: 0, error: 0 }; + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_use") { + const toolName = content.name; + // Skip internal tools + if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) { + continue; + } + toolCounts.total++; + const toolResult = toolUsePairs.get(content.id); + const isError = toolResult?.is_error === true; + if (isError) { + toolCounts.error++; + } else { + toolCounts.success++; + } + } + } + } + } + + if (toolCounts.total > 0) { + lines.push(` Tools: ${toolCounts.success}/${toolCounts.total} succeeded`); + } + if (lastEntry?.usage) { + const usage = lastEntry.usage; + if (usage.input_tokens || usage.output_tokens) { + // Calculate total tokens (matching Go parser logic) + const inputTokens = usage.input_tokens || 0; + const outputTokens = usage.output_tokens || 0; + const cacheCreationTokens = usage.cache_creation_input_tokens || 0; + const cacheReadTokens = usage.cache_read_input_tokens || 0; + const totalTokens = inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens; + + lines.push(` Tokens: ${totalTokens.toLocaleString()} total (${usage.input_tokens.toLocaleString()} in / ${usage.output_tokens.toLocaleString()} out)`); + } + } + if (lastEntry?.total_cost_usd) { + lines.push(` Cost: $${lastEntry.total_cost_usd.toFixed(4)}`); + } + + return lines.join("\n"); +} + +/** + * Generates a markdown-formatted Copilot CLI style summary for step summaries. + * Similar to generatePlainTextSummary but outputs markdown with code blocks for proper rendering. + * + * The output includes: + * - A "Conversation:" section showing agent responses and tool executions + * - A "Statistics:" section with execution metrics + * + * @param {Array} logEntries - Array of log entries with type, message, etc. + * @param {Object} options - Configuration options + * @param {string} [options.model] - Model name to include in the header + * @param {string} [options.parserName] - Name of the parser (e.g., "Copilot", "Claude") + * @returns {string} Markdown-formatted summary for step summary rendering + */ +function generateCopilotCliStyleSummary(logEntries, options = {}) { + const { model, parserName = "Agent" } = options; + const lines = []; + + // Collect tool usage pairs for status lookup + const toolUsePairs = new Map(); + for (const entry of logEntries) { + if (entry.type === "user" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_result" && content.tool_use_id) { + toolUsePairs.set(content.tool_use_id, content); + } + } + } + } + + // Generate conversation flow with agent responses and tool executions + lines.push("```"); + lines.push("Conversation:"); + lines.push(""); + + let conversationLineCount = 0; + const MAX_CONVERSATION_LINES = 5000; // Limit conversation output + let conversationTruncated = false; + + for (const entry of logEntries) { + if (conversationLineCount >= MAX_CONVERSATION_LINES) { + conversationTruncated = true; + break; + } + + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (conversationLineCount >= MAX_CONVERSATION_LINES) { + conversationTruncated = true; + break; + } + + if (content.type === "text" && content.text) { + // Display agent response text + const text = content.text.trim(); + if (text && text.length > 0) { + // Truncate long responses to keep output manageable + const maxTextLength = 500; + let displayText = text; + if (displayText.length > maxTextLength) { + displayText = displayText.substring(0, maxTextLength) + "..."; + } + + // Split into lines and add Agent prefix + const textLines = displayText.split("\n"); + for (const line of textLines) { + if (conversationLineCount >= MAX_CONVERSATION_LINES) { + conversationTruncated = true; + break; + } + lines.push(`Agent: ${line}`); + conversationLineCount++; + } + lines.push(""); // Add blank line after agent response + conversationLineCount++; + } + } else if (content.type === "tool_use") { + // Display tool execution + const toolName = content.name; + const input = content.input || {}; + + // Skip internal tools (file operations) + if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) { + continue; + } + + const toolResult = toolUsePairs.get(content.id); + const isError = toolResult?.is_error === true; + const statusIcon = isError ? "✗" : "✓"; + + // Format tool execution in Copilot CLI style + let displayName; + let resultPreview = ""; + + if (toolName === "Bash") { + const cmd = formatBashCommand(input.command || ""); + displayName = `$ ${cmd}`; + + // Show result preview if available + if (toolResult && toolResult.content) { + const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content); + const resultLines = resultText.split("\n").filter(l => l.trim()); + if (resultLines.length > 0) { + const previewLine = resultLines[0].substring(0, 80); + if (resultLines.length > 1) { + resultPreview = ` └ ${resultLines.length} lines...`; + } else if (previewLine) { + resultPreview = ` └ ${previewLine}`; + } + } + } + } else if (toolName.startsWith("mcp__")) { + // Format MCP tool names like github-list_pull_requests + const formattedName = formatMcpName(toolName).replace("::", "-"); + displayName = formattedName; + + // Show result preview if available + if (toolResult && toolResult.content) { + const resultText = typeof toolResult.content === "string" ? toolResult.content : JSON.stringify(toolResult.content); + const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText; + resultPreview = ` └ ${truncated}`; + } + } else { + displayName = toolName; + + // Show result preview if available + if (toolResult && toolResult.content) { + const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content); + const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText; + resultPreview = ` └ ${truncated}`; + } + } + + lines.push(`${statusIcon} ${displayName}`); + conversationLineCount++; + + if (resultPreview) { + lines.push(resultPreview); + conversationLineCount++; + } + + lines.push(""); // Add blank line after tool execution + conversationLineCount++; + } + } + } + } + + if (conversationTruncated) { + lines.push("... (conversation truncated)"); + lines.push(""); + } + + // Statistics + const lastEntry = logEntries[logEntries.length - 1]; + lines.push("Statistics:"); + if (lastEntry?.num_turns) { + lines.push(` Turns: ${lastEntry.num_turns}`); + } + if (lastEntry?.duration_ms) { + const duration = formatDuration(lastEntry.duration_ms); + if (duration) { + lines.push(` Duration: ${duration}`); + } + } + + // Count tools for statistics + let toolCounts = { total: 0, success: 0, error: 0 }; + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_use") { + const toolName = content.name; + // Skip internal tools + if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) { + continue; + } + toolCounts.total++; + const toolResult = toolUsePairs.get(content.id); + const isError = toolResult?.is_error === true; + if (isError) { + toolCounts.error++; + } else { + toolCounts.success++; + } + } + } + } + } + + if (toolCounts.total > 0) { + lines.push(` Tools: ${toolCounts.success}/${toolCounts.total} succeeded`); + } + if (lastEntry?.usage) { + const usage = lastEntry.usage; + if (usage.input_tokens || usage.output_tokens) { + // Calculate total tokens (matching Go parser logic) + const inputTokens = usage.input_tokens || 0; + const outputTokens = usage.output_tokens || 0; + const cacheCreationTokens = usage.cache_creation_input_tokens || 0; + const cacheReadTokens = usage.cache_read_input_tokens || 0; + const totalTokens = inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens; + + lines.push(` Tokens: ${totalTokens.toLocaleString()} total (${usage.input_tokens.toLocaleString()} in / ${usage.output_tokens.toLocaleString()} out)`); + } + } + if (lastEntry?.total_cost_usd) { + lines.push(` Cost: $${lastEntry.total_cost_usd.toFixed(4)}`); + } + + lines.push("```"); + + return lines.join("\n"); +} + +// Export functions and constants +module.exports = { + // Constants + MAX_TOOL_OUTPUT_LENGTH, + MAX_STEP_SUMMARY_SIZE, + // Classes + StepSummaryTracker, + // Functions + formatDuration, + formatBashCommand, + truncateString, + estimateTokens, + formatMcpName, + isLikelyCustomAgent, + generateConversationMarkdown, + generateInformationSection, + formatMcpParameters, + formatInitializationSummary, + formatToolUse, + parseLogEntries, + formatToolCallAsDetails, + generatePlainTextSummary, + generateCopilotCliStyleSummary, +}; diff --git a/pkg/workflow/js/mcp_handler_python.cjs b/pkg/workflow/js/mcp_handler_python.cjs new file mode 100644 index 0000000000..1807e86c56 --- /dev/null +++ b/pkg/workflow/js/mcp_handler_python.cjs @@ -0,0 +1,100 @@ +// @ts-check + +/** + * Python Script Handler for Safe-Inputs + * + * This module provides a handler for executing Python scripts in safe-inputs tools. + * It uses a Pythonic approach for passing inputs via JSON on stdin. + */ + +const { execFile } = require("child_process"); + +/** + * Create a Python script handler function that executes a .py file. + * Inputs are passed as JSON via stdin for a more Pythonic approach: + * - Inputs are passed as JSON object via stdin (similar to JavaScript tools) + * - Python script reads and parses JSON from stdin into 'inputs' dictionary + * - Outputs are read from stdout (JSON format expected) + * + * @param {Object} server - The MCP server instance for logging + * @param {string} toolName - Name of the tool for logging purposes + * @param {string} scriptPath - Path to the Python script to execute + * @param {number} [timeoutSeconds=60] - Timeout in seconds for script execution + * @returns {Function} Async handler function that executes the Python script + */ +function createPythonHandler(server, toolName, scriptPath, timeoutSeconds = 60) { + return async args => { + server.debug(` [${toolName}] Invoking Python handler: ${scriptPath}`); + server.debug(` [${toolName}] Python handler args: ${JSON.stringify(args)}`); + server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); + + // Pass inputs as JSON via stdin (more Pythonic approach) + const inputJson = JSON.stringify(args || {}); + server.debug(` [${toolName}] Input JSON (${inputJson.length} bytes): ${inputJson.substring(0, 200)}${inputJson.length > 200 ? "..." : ""}`); + + return new Promise((resolve, reject) => { + server.debug(` [${toolName}] Executing Python script...`); + + const child = execFile( + "python3", + [scriptPath], + { + env: process.env, + timeout: timeoutSeconds * 1000, // Convert to milliseconds + maxBuffer: 10 * 1024 * 1024, // 10MB buffer + }, + (error, stdout, stderr) => { + // Log stdout and stderr + if (stdout) { + server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); + } + if (stderr) { + server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); + } + + if (error) { + server.debugError(` [${toolName}] Python script error: `, error); + reject(error); + return; + } + + // Parse output from stdout + let result; + try { + // Try to parse stdout as JSON + if (stdout && stdout.trim()) { + result = JSON.parse(stdout.trim()); + } else { + result = { stdout: stdout || "", stderr: stderr || "" }; + } + } catch (parseError) { + server.debug(` [${toolName}] Output is not JSON, returning as text`); + result = { stdout: stdout || "", stderr: stderr || "" }; + } + + server.debug(` [${toolName}] Python handler completed successfully`); + + // Return MCP format + resolve({ + content: [ + { + type: "text", + text: JSON.stringify(result), + }, + ], + }); + } + ); + + // Write input JSON to stdin + if (child.stdin) { + child.stdin.write(inputJson); + child.stdin.end(); + } + }); + }; +} + +module.exports = { + createPythonHandler, +}; diff --git a/pkg/workflow/js/mcp_handler_shell.cjs b/pkg/workflow/js/mcp_handler_shell.cjs new file mode 100644 index 0000000000..cda3f28276 --- /dev/null +++ b/pkg/workflow/js/mcp_handler_shell.cjs @@ -0,0 +1,146 @@ +// @ts-check + +/** + * Shell Script Handler for Safe-Inputs + * + * This module provides a handler for executing shell scripts in safe-inputs tools. + * It follows GitHub Actions conventions for passing inputs and reading outputs. + */ + +const fs = require("fs"); +const path = require("path"); +const { execFile } = require("child_process"); +const os = require("os"); + +/** + * Create a shell script handler function that executes a .sh file. + * Uses GitHub Actions convention for passing inputs/outputs: + * - Inputs are passed as environment variables prefixed with INPUT_ (uppercased, dashes replaced with underscores) + * - Outputs are read from GITHUB_OUTPUT file (key=value format, one per line) + * - Returns: { stdout, stderr, outputs } + * + * @param {Object} server - The MCP server instance for logging + * @param {string} toolName - Name of the tool for logging purposes + * @param {string} scriptPath - Path to the shell script to execute + * @param {number} [timeoutSeconds=60] - Timeout in seconds for script execution + * @returns {Function} Async handler function that executes the shell script + */ +function createShellHandler(server, toolName, scriptPath, timeoutSeconds = 60) { + return async args => { + server.debug(` [${toolName}] Invoking shell handler: ${scriptPath}`); + server.debug(` [${toolName}] Shell handler args: ${JSON.stringify(args)}`); + server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); + + // Create environment variables from args (GitHub Actions convention: INPUT_NAME) + const env = { ...process.env }; + for (const [key, value] of Object.entries(args || {})) { + const envKey = `INPUT_${key.toUpperCase().replace(/-/g, "_")}`; + env[envKey] = String(value); + server.debug(` [${toolName}] Set env: ${envKey}=${String(value).substring(0, 100)}${String(value).length > 100 ? "..." : ""}`); + } + + // Create a temporary file for outputs (GitHub Actions convention: GITHUB_OUTPUT) + const outputFile = path.join(os.tmpdir(), `mcp-shell-output-${Date.now()}-${Math.random().toString(36).substring(2)}.txt`); + env.GITHUB_OUTPUT = outputFile; + server.debug(` [${toolName}] Output file: ${outputFile}`); + + // Create the output file (empty) + fs.writeFileSync(outputFile, ""); + + return new Promise((resolve, reject) => { + server.debug(` [${toolName}] Executing shell script...`); + + execFile( + scriptPath, + [], + { + env, + timeout: timeoutSeconds * 1000, // Convert to milliseconds + maxBuffer: 10 * 1024 * 1024, // 10MB buffer + }, + (error, stdout, stderr) => { + // Log stdout and stderr + if (stdout) { + server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); + } + if (stderr) { + server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); + } + + if (error) { + server.debugError(` [${toolName}] Shell script error: `, error); + + // Clean up output file + try { + if (fs.existsSync(outputFile)) { + fs.unlinkSync(outputFile); + } + } catch { + // Ignore cleanup errors + } + + reject(error); + return; + } + + // Read outputs from the GITHUB_OUTPUT file + /** @type {Record} */ + const outputs = {}; + try { + if (fs.existsSync(outputFile)) { + const outputContent = fs.readFileSync(outputFile, "utf-8"); + server.debug(` [${toolName}] Output file content: ${outputContent.substring(0, 500)}${outputContent.length > 500 ? "..." : ""}`); + + // Parse outputs (key=value format, one per line) + const lines = outputContent.split("\n"); + for (const line of lines) { + const trimmed = line.trim(); + if (trimmed && trimmed.includes("=")) { + const eqIndex = trimmed.indexOf("="); + const key = trimmed.substring(0, eqIndex); + const value = trimmed.substring(eqIndex + 1); + outputs[key] = value; + server.debug(` [${toolName}] Parsed output: ${key}=${value.substring(0, 100)}${value.length > 100 ? "..." : ""}`); + } + } + } + } catch (readError) { + server.debugError(` [${toolName}] Error reading output file: `, readError); + } + + // Clean up output file + try { + if (fs.existsSync(outputFile)) { + fs.unlinkSync(outputFile); + } + } catch { + // Ignore cleanup errors + } + + // Build the result + const result = { + stdout: stdout || "", + stderr: stderr || "", + outputs, + }; + + server.debug(` [${toolName}] Shell handler completed, outputs: ${Object.keys(outputs).join(", ") || "(none)"}`); + + // Return MCP format + resolve({ + content: [ + { + type: "text", + text: JSON.stringify(result), + }, + ], + }); + } + ); + }); + }; +} + +module.exports = { + createShellHandler, +}; diff --git a/pkg/workflow/js/mcp_http_transport.cjs b/pkg/workflow/js/mcp_http_transport.cjs new file mode 100644 index 0000000000..4ac2ea8e54 --- /dev/null +++ b/pkg/workflow/js/mcp_http_transport.cjs @@ -0,0 +1,298 @@ +// @ts-check +/// + +/** + * MCP HTTP Transport Implementation + * + * This module provides the HTTP transport layer for the MCP (Model Context Protocol), + * removing the dependency on @modelcontextprotocol/sdk. + * + * Features: + * - HTTP request/response handling + * - Session management (stateful and stateless modes) + * - CORS support for development + * - JSON-RPC 2.0 compatible + * + * References: + * - MCP Specification: https://spec.modelcontextprotocol.io + * - JSON-RPC 2.0: https://www.jsonrpc.org/specification + */ + +const http = require("http"); +const { randomUUID } = require("crypto"); +const { createServer, registerTool, handleRequest } = require("./mcp_server_core.cjs"); + +/** + * Simple MCP Server wrapper that provides a class-like interface + * compatible with the HTTP transport, backed by mcp_server_core functions. + */ +class MCPServer { + /** + * @param {Object} serverInfo - Server metadata + * @param {string} serverInfo.name - Server name + * @param {string} serverInfo.version - Server version + * @param {Object} [options] - Server options + * @param {Object} [options.capabilities] - Server capabilities + */ + constructor(serverInfo, options = {}) { + this._coreServer = createServer(serverInfo, options); + this.serverInfo = serverInfo; + this.capabilities = options.capabilities || { tools: {} }; + this.tools = new Map(); + this.transport = null; + this.initialized = false; + } + + /** + * Register a tool with the server + * @param {string} name - Tool name + * @param {string} description - Tool description + * @param {Object} inputSchema - JSON Schema for tool input + * @param {Function} handler - Async function that handles tool calls + */ + tool(name, description, inputSchema, handler) { + this.tools.set(name, { + name, + description, + inputSchema, + handler, + }); + // Also register with the core server + registerTool(this._coreServer, { + name, + description, + inputSchema, + handler, + }); + } + + /** + * Connect to a transport + * @param {any} transport - Transport instance (must have setServer and start methods) + */ + async connect(transport) { + this.transport = transport; + transport.setServer(this); + await transport.start(); + } + + /** + * Handle an incoming JSON-RPC request + * @param {Object} request - JSON-RPC request + * @returns {Promise} JSON-RPC response or null for notifications + */ + async handleRequest(request) { + // Track initialization state + if (request.method === "initialize") { + this.initialized = true; + } + // Delegate to core server's handleRequest function + return handleRequest(this._coreServer, request); + } +} + +/** + * MCP HTTP Transport implementation + * Handles HTTP requests and converts them to MCP protocol messages + */ +class MCPHTTPTransport { + /** + * @param {Object} options - Transport options + * @param {Function} [options.sessionIdGenerator] - Function that generates session IDs (undefined for stateless) + * @param {boolean} [options.enableJsonResponse] - Enable JSON responses instead of SSE (default: true for simplicity) + * @param {boolean} [options.enableDnsRebindingProtection] - Enable DNS rebinding protection (default: false) + */ + constructor(options = {}) { + this.sessionIdGenerator = options.sessionIdGenerator; + this.enableJsonResponse = options.enableJsonResponse !== false; // Default to true + this.enableDnsRebindingProtection = options.enableDnsRebindingProtection || false; + this.server = null; + this.sessionId = null; + this.started = false; + } + + /** + * Set the MCP server instance + * @param {MCPServer} server - MCP server instance + */ + setServer(server) { + this.server = server; + } + + /** + * Start the transport + */ + async start() { + if (this.started) { + throw new Error("Transport already started"); + } + this.started = true; + } + + /** + * Handle an incoming HTTP request + * @param {http.IncomingMessage} req - HTTP request + * @param {http.ServerResponse} res - HTTP response + * @param {Object} [parsedBody] - Pre-parsed request body + */ + async handleRequest(req, res, parsedBody) { + // Set CORS headers + res.setHeader("Access-Control-Allow-Origin", "*"); + res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); + res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept, Mcp-Session-Id"); + + // Handle OPTIONS preflight + if (req.method === "OPTIONS") { + res.writeHead(200); + res.end(); + return; + } + + // Only handle POST requests for MCP protocol + if (req.method !== "POST") { + res.writeHead(405, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ error: "Method not allowed" })); + return; + } + + try { + // Parse request body if not already parsed + let body = parsedBody; + if (!body) { + const chunks = []; + for await (const chunk of req) { + chunks.push(chunk); + } + const bodyStr = Buffer.concat(chunks).toString(); + try { + body = bodyStr ? JSON.parse(bodyStr) : null; + } catch (parseError) { + res.writeHead(400, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32700, + message: "Parse error: Invalid JSON in request body", + }, + id: null, + }) + ); + return; + } + } + + if (!body) { + res.writeHead(400, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32600, + message: "Invalid Request: Empty request body", + }, + id: null, + }) + ); + return; + } + + // Validate JSON-RPC structure + if (!body.jsonrpc || body.jsonrpc !== "2.0") { + res.writeHead(400, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32600, + message: "Invalid Request: jsonrpc must be '2.0'", + }, + id: body.id || null, + }) + ); + return; + } + + // Handle session management for stateful mode + if (this.sessionIdGenerator) { + // For initialize, generate a new session ID + if (body.method === "initialize") { + this.sessionId = this.sessionIdGenerator(); + } else { + // For other methods, validate session ID + const requestSessionId = req.headers["mcp-session-id"]; + if (!requestSessionId) { + res.writeHead(400, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32600, + message: "Invalid Request: Missing Mcp-Session-Id header", + }, + id: body.id || null, + }) + ); + return; + } + + if (requestSessionId !== this.sessionId) { + res.writeHead(404, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32001, + message: "Session not found", + }, + id: body.id || null, + }) + ); + return; + } + } + } + + // Process the request through the MCP server + const response = await this.server.handleRequest(body); + + // Handle notifications (null response means no reply needed) + if (response === null) { + res.writeHead(204); // No Content + res.end(); + return; + } + + // Set response headers + const headers = { "Content-Type": "application/json" }; + if (this.sessionId) { + headers["mcp-session-id"] = this.sessionId; + } + + res.writeHead(200, headers); + res.end(JSON.stringify(response)); + } catch (error) { + // Log the full error with stack trace on the server for debugging + console.error("MCP HTTP Transport error:", error); + + if (!res.headersSent) { + res.writeHead(500, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32603, + message: "Internal server error", + }, + id: null, + }) + ); + } + } + } +} + +module.exports = { + MCPServer, + MCPHTTPTransport, +}; diff --git a/pkg/workflow/js/mcp_logger.cjs b/pkg/workflow/js/mcp_logger.cjs new file mode 100644 index 0000000000..c4e764160a --- /dev/null +++ b/pkg/workflow/js/mcp_logger.cjs @@ -0,0 +1,53 @@ +// @ts-check +/// + +/** + * MCP Logger Utility + * + * This module provides logger creation utilities for MCP servers. + * It creates logger objects with debug and debugError methods that write + * timestamped messages to stderr. + * + * Usage: + * const { createLogger } = require("./mcp_logger.cjs"); + * const logger = createLogger("my-server"); + * logger.debug("Server started"); + * logger.debugError("Error: ", new Error("Something went wrong")); + */ + +/** + * Create a logger object with debug and debugError methods + * @param {string} serverName - Name to include in log messages + * @returns {Object} Logger object with debug and debugError methods + */ +function createLogger(serverName) { + const logger = { + /** + * Log a debug message to stderr with timestamp + * @param {string} msg - Message to log + */ + debug: msg => { + const timestamp = new Date().toISOString(); + process.stderr.write(`[${timestamp}] [${serverName}] ${msg}\n`); + }, + + /** + * Log an error with optional stack trace + * @param {string} prefix - Prefix for the error message + * @param {Error|string|any} error - Error object or message + */ + debugError: (prefix, error) => { + const errorMessage = error instanceof Error ? error.message : String(error); + logger.debug(`${prefix}${errorMessage}`); + if (error instanceof Error && error.stack) { + logger.debug(`${prefix}Stack trace: ${error.stack}`); + } + }, + }; + + return logger; +} + +module.exports = { + createLogger, +}; diff --git a/pkg/workflow/js/mcp_server_core.cjs b/pkg/workflow/js/mcp_server_core.cjs new file mode 100644 index 0000000000..ab1a60832c --- /dev/null +++ b/pkg/workflow/js/mcp_server_core.cjs @@ -0,0 +1,747 @@ +// @ts-check +/// + +/** + * MCP Server Core Module + * + * This module provides a reusable API for creating MCP (Model Context Protocol) servers. + * It handles JSON-RPC 2.0 message parsing, tool registration, and server lifecycle. + * + * Usage: + * const { createServer, registerTool, start } = require("./mcp_server_core.cjs"); + * + * const server = createServer({ name: "my-server", version: "1.0.0" }); + * registerTool(server, { + * name: "my_tool", + * description: "A tool", + * inputSchema: { type: "object", properties: {} }, + * handler: (args) => ({ content: [{ type: "text", text: "result" }] }) + * }); + * start(server); + */ + +const fs = require("fs"); +const path = require("path"); + +const { ReadBuffer } = require("./read_buffer.cjs"); +const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); + +const encoder = new TextEncoder(); + +/** + * @typedef {Object} ServerInfo + * @property {string} name - Server name + * @property {string} version - Server version + */ + +/** + * @typedef {Object} Tool + * @property {string} name - Tool name + * @property {string} description - Tool description + * @property {Object} inputSchema - JSON Schema for tool inputs + * @property {Function} [handler] - Tool handler function + * @property {string} [handlerPath] - Optional file path to handler module (original path from config) + * @property {number} [timeout] - Timeout in seconds for tool execution (default: 60) + */ + +/** + * @typedef {Object} MCPServer + * @property {ServerInfo} serverInfo - Server information + * @property {Object} tools - Registered tools + * @property {Function} debug - Debug logging function + * @property {Function} debugError - Debug logging function for errors (extracts message from Error objects) + * @property {Function} writeMessage - Write message to stdout + * @property {Function} replyResult - Send a result response + * @property {Function} replyError - Send an error response + * @property {ReadBuffer} readBuffer - Message buffer + * @property {string} [logDir] - Optional log directory + * @property {string} [logFilePath] - Optional log file path + * @property {boolean} logFileInitialized - Whether log file has been initialized + */ + +/** + * Initialize log file for the server + * @param {MCPServer} server - The MCP server instance + */ +function initLogFile(server) { + if (server.logFileInitialized || !server.logDir || !server.logFilePath) return; + try { + if (!fs.existsSync(server.logDir)) { + fs.mkdirSync(server.logDir, { recursive: true }); + } + // Initialize/truncate log file with header + const timestamp = new Date().toISOString(); + fs.writeFileSync(server.logFilePath, `# ${server.serverInfo.name} MCP Server Log\n# Started: ${timestamp}\n# Version: ${server.serverInfo.version}\n\n`); + server.logFileInitialized = true; + } catch { + // Silently ignore errors - logging to stderr will still work + } +} + +/** + * Create a debug function for the server + * @param {MCPServer} server - The MCP server instance + * @returns {Function} Debug function + */ +function createDebugFunction(server) { + return msg => { + const timestamp = new Date().toISOString(); + const formattedMsg = `[${timestamp}] [${server.serverInfo.name}] ${msg}\n`; + + // Always write to stderr + process.stderr.write(formattedMsg); + + // Also write to log file if log directory is set (initialize on first use) + if (server.logDir && server.logFilePath) { + if (!server.logFileInitialized) { + initLogFile(server); + } + if (server.logFileInitialized) { + try { + fs.appendFileSync(server.logFilePath, formattedMsg); + } catch { + // Silently ignore file write errors - stderr logging still works + } + } + } + }; +} + +/** + * Create a debugError function for the server that handles error casting + * @param {MCPServer} server - The MCP server instance + * @returns {Function} Debug error function that extracts message from Error objects + */ +function createDebugErrorFunction(server) { + return (prefix, error) => { + const errorMessage = error instanceof Error ? error.message : String(error); + server.debug(`${prefix}${errorMessage}`); + if (error instanceof Error && error.stack) { + server.debug(`${prefix}Stack trace: ${error.stack}`); + } + }; +} + +/** + * Create a writeMessage function for the server + * @param {MCPServer} server - The MCP server instance + * @returns {Function} Write message function + */ +function createWriteMessageFunction(server) { + return obj => { + const json = JSON.stringify(obj); + server.debug(`send: ${json}`); + const message = json + "\n"; + const bytes = encoder.encode(message); + fs.writeSync(1, bytes); + }; +} + +/** + * Create a replyResult function for the server + * @param {MCPServer} server - The MCP server instance + * @returns {Function} Reply result function + */ +function createReplyResultFunction(server) { + return (id, result) => { + if (id === undefined || id === null) return; // notification + const res = { jsonrpc: "2.0", id, result }; + server.writeMessage(res); + }; +} + +/** + * Create a replyError function for the server + * @param {MCPServer} server - The MCP server instance + * @returns {Function} Reply error function + */ +function createReplyErrorFunction(server) { + return (id, code, message) => { + // Don't send error responses for notifications (id is null/undefined) + if (id === undefined || id === null) { + server.debug(`Error for notification: ${message}`); + return; + } + + const error = { code, message }; + const res = { + jsonrpc: "2.0", + id, + error, + }; + server.writeMessage(res); + }; +} + +/** + * Create a new MCP server instance + * @param {ServerInfo} serverInfo - Server information (name and version) + * @param {Object} [options] - Optional server configuration + * @param {string} [options.logDir] - Directory for log file (optional) + * @returns {MCPServer} The MCP server instance + */ +function createServer(serverInfo, options = {}) { + const logDir = options.logDir || undefined; + const logFilePath = logDir ? path.join(logDir, "server.log") : undefined; + + /** @type {MCPServer} */ + const server = { + serverInfo, + tools: {}, + debug: () => {}, // placeholder + debugError: () => {}, // placeholder + writeMessage: () => {}, // placeholder + replyResult: () => {}, // placeholder + replyError: () => {}, // placeholder + readBuffer: new ReadBuffer(), + logDir, + logFilePath, + logFileInitialized: false, + }; + + // Initialize functions with references to server + server.debug = createDebugFunction(server); + server.debugError = createDebugErrorFunction(server); + server.writeMessage = createWriteMessageFunction(server); + server.replyResult = createReplyResultFunction(server); + server.replyError = createReplyErrorFunction(server); + + return server; +} + +/** + * Create a wrapped handler function that normalizes results to MCP format. + * Extracted to avoid creating closures with excessive scope in loadToolHandlers. + * + * @param {MCPServer} server - The MCP server instance for logging + * @param {string} toolName - Name of the tool for logging purposes + * @param {Function} handlerFn - The original handler function to wrap + * @returns {Function} Wrapped async handler function + */ +function createWrappedHandler(server, toolName, handlerFn) { + return async args => { + server.debug(` [${toolName}] Invoking handler with args: ${JSON.stringify(args)}`); + + try { + // Call the handler (may be sync or async) + const result = await Promise.resolve(handlerFn(args)); + server.debug(` [${toolName}] Handler returned result type: ${typeof result}`); + + // If the result is already in MCP format (has content array), return as-is + if (result && typeof result === "object" && Array.isArray(result.content)) { + server.debug(` [${toolName}] Result is already in MCP format`); + return result; + } + + // Otherwise, serialize the result to text + // Use try-catch for serialization to handle circular references and non-serializable values + let serializedResult; + try { + serializedResult = JSON.stringify(result); + } catch (serializationError) { + server.debugError(` [${toolName}] Serialization error: `, serializationError); + // Fall back to String() for non-serializable values + serializedResult = String(result); + } + server.debug(` [${toolName}] Serialized result: ${serializedResult.substring(0, 200)}${serializedResult.length > 200 ? "..." : ""}`); + + return { + content: [ + { + type: "text", + text: serializedResult, + }, + ], + }; + } catch (error) { + server.debugError(` [${toolName}] Handler threw error: `, error); + throw error; + } + }; +} + +/** + * Load handler functions from file paths specified in tools configuration. + * This function iterates through tools and loads handler modules based on file extension: + * + * For JavaScript handlers (.js, .cjs, .mjs): + * - Uses require() to load the module + * - Handler must export a function as default export + * - Handler signature: async function handler(args: Record): Promise + * + * For Shell script handlers (.sh): + * - Uses GitHub Actions convention for passing inputs/outputs + * - Inputs are passed as environment variables prefixed with INPUT_ (uppercased) + * - Outputs are read from GITHUB_OUTPUT file (key=value format per line) + * - Returns: { stdout, stderr, outputs } + * + * For Python script handlers (.py): + * - Uses GitHub Actions convention for passing inputs/outputs + * - Inputs are passed as environment variables prefixed with INPUT_ (uppercased) + * - Outputs are read from GITHUB_OUTPUT file (key=value format per line) + * - Executed using python3 command + * - Returns: { stdout, stderr, outputs } + * + * SECURITY NOTE: Handler paths are loaded from tools.json configuration file, + * which should be controlled by the server administrator. When basePath is provided, + * relative paths are resolved within it, preventing directory traversal outside + * the intended directory. Absolute paths bypass this validation but are still + * logged for auditing purposes. + * + * @param {MCPServer} server - The MCP server instance for logging + * @param {Array} tools - Array of tool configurations from tools.json + * @param {string} [basePath] - Optional base path for resolving relative handler paths. + * When provided, relative paths are validated to be within this directory. + * @returns {Array} The tools array with loaded handlers attached + */ +function loadToolHandlers(server, tools, basePath) { + server.debug(`Loading tool handlers...`); + server.debug(` Total tools to process: ${tools.length}`); + server.debug(` Base path: ${basePath || "(not specified)"}`); + + let loadedCount = 0; + let skippedCount = 0; + let errorCount = 0; + + for (const tool of tools) { + const toolName = tool.name || "(unnamed)"; + + // Check if tool has a handler path specified + if (!tool.handler) { + server.debug(` [${toolName}] No handler path specified, skipping handler load`); + skippedCount++; + continue; + } + + const handlerPath = tool.handler; + server.debug(` [${toolName}] Handler path specified: ${handlerPath}`); + + // Resolve the handler path + let resolvedPath = handlerPath; + if (basePath && !path.isAbsolute(handlerPath)) { + resolvedPath = path.resolve(basePath, handlerPath); + server.debug(` [${toolName}] Resolved relative path to: ${resolvedPath}`); + + // Security validation: Ensure resolved path is within basePath to prevent directory traversal + const normalizedBase = path.resolve(basePath); + const normalizedResolved = path.resolve(resolvedPath); + if (!normalizedResolved.startsWith(normalizedBase + path.sep) && normalizedResolved !== normalizedBase) { + server.debug(` [${toolName}] ERROR: Handler path escapes base directory: ${resolvedPath} is not within ${basePath}`); + errorCount++; + continue; + } + } else if (path.isAbsolute(handlerPath)) { + server.debug(` [${toolName}] Using absolute path (bypasses basePath validation): ${handlerPath}`); + } + + // Store the original handler path for reference + tool.handlerPath = handlerPath; + + try { + server.debug(` [${toolName}] Loading handler from: ${resolvedPath}`); + + // Check if file exists before loading + if (!fs.existsSync(resolvedPath)) { + server.debug(` [${toolName}] ERROR: Handler file does not exist: ${resolvedPath}`); + errorCount++; + continue; + } + + // Detect handler type by file extension + const ext = path.extname(resolvedPath).toLowerCase(); + server.debug(` [${toolName}] Handler file extension: ${ext}`); + + if (ext === ".sh") { + // Shell script handler - use GitHub Actions convention + server.debug(` [${toolName}] Detected shell script handler`); + + // Make sure the script is executable (on Unix-like systems) + try { + fs.accessSync(resolvedPath, fs.constants.X_OK); + server.debug(` [${toolName}] Shell script is executable`); + } catch { + // Try to make it executable + try { + fs.chmodSync(resolvedPath, 0o755); + server.debug(` [${toolName}] Made shell script executable`); + } catch (chmodError) { + server.debugError(` [${toolName}] Warning: Could not make shell script executable: `, chmodError); + // Continue anyway - it might work depending on the shell + } + } + + // Lazy-load shell handler module + const { createShellHandler } = require("./mcp_handler_shell.cjs"); + const timeout = tool.timeout || 60; // Default to 60 seconds if not specified + tool.handler = createShellHandler(server, toolName, resolvedPath, timeout); + + loadedCount++; + server.debug(` [${toolName}] Shell handler created successfully with timeout: ${timeout}s`); + } else if (ext === ".py") { + // Python script handler - use GitHub Actions convention + server.debug(` [${toolName}] Detected Python script handler`); + + // Make sure the script is executable (on Unix-like systems) + try { + fs.accessSync(resolvedPath, fs.constants.X_OK); + server.debug(` [${toolName}] Python script is executable`); + } catch { + // Try to make it executable + try { + fs.chmodSync(resolvedPath, 0o755); + server.debug(` [${toolName}] Made Python script executable`); + } catch (chmodError) { + server.debugError(` [${toolName}] Warning: Could not make Python script executable: `, chmodError); + // Continue anyway - python3 will be called explicitly + } + } + + // Lazy-load Python handler module + const { createPythonHandler } = require("./mcp_handler_python.cjs"); + const timeout = tool.timeout || 60; // Default to 60 seconds if not specified + tool.handler = createPythonHandler(server, toolName, resolvedPath, timeout); + + loadedCount++; + server.debug(` [${toolName}] Python handler created successfully with timeout: ${timeout}s`); + } else { + // JavaScript/CommonJS handler - use require() + server.debug(` [${toolName}] Loading JavaScript handler module`); + + // Load the handler module + const handlerModule = require(resolvedPath); + server.debug(` [${toolName}] Handler module loaded successfully`); + server.debug(` [${toolName}] Module type: ${typeof handlerModule}`); + + // Get the handler function (support default export patterns) + let handlerFn = handlerModule; + + // Handle ES module default export pattern (module.default) + if (handlerModule && typeof handlerModule === "object" && typeof handlerModule.default === "function") { + handlerFn = handlerModule.default; + server.debug(` [${toolName}] Using module.default export`); + } + + // Validate that the handler is a function + if (typeof handlerFn !== "function") { + server.debug(` [${toolName}] ERROR: Handler is not a function, got: ${typeof handlerFn}`); + server.debug(` [${toolName}] Module keys: ${Object.keys(handlerModule || {}).join(", ") || "(none)"}`); + errorCount++; + continue; + } + + server.debug(` [${toolName}] Handler function validated successfully`); + server.debug(` [${toolName}] Handler function name: ${handlerFn.name || "(anonymous)"}`); + + // Wrap the handler using the separate function to avoid bloating the closure + tool.handler = createWrappedHandler(server, toolName, handlerFn); + + loadedCount++; + server.debug(` [${toolName}] JavaScript handler loaded and wrapped successfully`); + } + } catch (error) { + server.debugError(` [${toolName}] ERROR loading handler: `, error); + errorCount++; + } + } + + server.debug(`Handler loading complete:`); + server.debug(` Loaded: ${loadedCount}`); + server.debug(` Skipped (no handler path): ${skippedCount}`); + server.debug(` Errors: ${errorCount}`); + + return tools; +} + +/** + * Register a tool with the server + * @param {MCPServer} server - The MCP server instance + * @param {Tool} tool - The tool to register + */ +function registerTool(server, tool) { + const normalizedName = normalizeTool(tool.name); + server.tools[normalizedName] = { + ...tool, + name: normalizedName, + }; + server.debug(`Registered tool: ${normalizedName}`); +} + +/** + * Normalize a tool name (convert dashes to underscores, lowercase) + * @param {string} name - The tool name to normalize + * @returns {string} Normalized tool name + */ +function normalizeTool(name) { + return name.replace(/-/g, "_").toLowerCase(); +} + +/** + * Handle an incoming JSON-RPC request and return a response (for HTTP transport) + * This function is compatible with the MCPServer class's handleRequest method. + * @param {MCPServer} server - The MCP server instance + * @param {Object} request - The incoming JSON-RPC request + * @param {Function} [defaultHandler] - Default handler for tools without a handler + * @returns {Promise} JSON-RPC response object, or null for notifications + */ +async function handleRequest(server, request, defaultHandler) { + const { id, method, params } = request; + + try { + // Handle notifications per JSON-RPC 2.0 spec: + // Requests without id field are notifications (no response) + // Note: id can be null for valid requests, so we check for field presence with "in" operator + if (!("id" in request)) { + // No id field - this is a notification (no response) + return null; + } + + let result; + + if (method === "initialize") { + const protocolVersion = params?.protocolVersion || "2024-11-05"; + result = { + protocolVersion, + serverInfo: server.serverInfo, + capabilities: { + tools: {}, + }, + }; + } else if (method === "ping") { + result = {}; + } else if (method === "tools/list") { + const list = []; + Object.values(server.tools).forEach(tool => { + const toolDef = { + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + }; + list.push(toolDef); + }); + result = { tools: list }; + } else if (method === "tools/call") { + const name = params?.name; + const args = params?.arguments ?? {}; + if (!name || typeof name !== "string") { + throw { + code: -32602, + message: "Invalid params: 'name' must be a string", + }; + } + const tool = server.tools[normalizeTool(name)]; + if (!tool) { + throw { + code: -32602, + message: `Tool '${name}' not found`, + }; + } + + // Use tool handler, or default handler, or error + let handler = tool.handler; + if (!handler && defaultHandler) { + handler = defaultHandler(tool.name); + } + if (!handler) { + throw { + code: -32603, + message: `No handler for tool: ${name}`, + }; + } + + const missing = validateRequiredFields(args, tool.inputSchema); + if (missing.length) { + throw { + code: -32602, + message: `Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`, + }; + } + + // Call handler and await the result (supports both sync and async handlers) + const handlerResult = await Promise.resolve(handler(args)); + const content = handlerResult && handlerResult.content ? handlerResult.content : []; + result = { content, isError: false }; + } else if (/^notifications\//.test(method)) { + // Notifications don't need a response + return null; + } else { + throw { + code: -32601, + message: `Method not found: ${method}`, + }; + } + + return { + jsonrpc: "2.0", + id, + result, + }; + } catch (error) { + /** @type {any} */ + const err = error; + return { + jsonrpc: "2.0", + id, + error: { + code: err.code || -32603, + message: err.message || "Internal error", + }, + }; + } +} + +/** + * Handle an incoming JSON-RPC message (for stdio transport) + * @param {MCPServer} server - The MCP server instance + * @param {Object} req - The incoming request + * @param {Function} [defaultHandler] - Default handler for tools without a handler + * @returns {Promise} + */ +async function handleMessage(server, req, defaultHandler) { + // Validate basic JSON-RPC structure + if (!req || typeof req !== "object") { + server.debug(`Invalid message: not an object`); + return; + } + + if (req.jsonrpc !== "2.0") { + server.debug(`Invalid message: missing or invalid jsonrpc field`); + return; + } + + const { id, method, params } = req; + + // Validate method field + if (!method || typeof method !== "string") { + server.replyError(id, -32600, "Invalid Request: method must be a string"); + return; + } + + try { + if (method === "initialize") { + const clientInfo = params?.clientInfo ?? {}; + server.debug(`client info: ${JSON.stringify(clientInfo)}`); + const protocolVersion = params?.protocolVersion ?? undefined; + const result = { + serverInfo: server.serverInfo, + ...(protocolVersion ? { protocolVersion } : {}), + capabilities: { + tools: {}, + }, + }; + server.replyResult(id, result); + } else if (method === "tools/list") { + const list = []; + Object.values(server.tools).forEach(tool => { + const toolDef = { + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + }; + list.push(toolDef); + }); + server.replyResult(id, { tools: list }); + } else if (method === "tools/call") { + const name = params?.name; + const args = params?.arguments ?? {}; + if (!name || typeof name !== "string") { + server.replyError(id, -32602, "Invalid params: 'name' must be a string"); + return; + } + const tool = server.tools[normalizeTool(name)]; + if (!tool) { + server.replyError(id, -32601, `Tool not found: ${name} (${normalizeTool(name)})`); + return; + } + + // Use tool handler, or default handler, or error + let handler = tool.handler; + if (!handler && defaultHandler) { + handler = defaultHandler(tool.name); + } + if (!handler) { + server.replyError(id, -32603, `No handler for tool: ${name}`); + return; + } + + const missing = validateRequiredFields(args, tool.inputSchema); + if (missing.length) { + server.replyError(id, -32602, `Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); + return; + } + + // Call handler and await the result (supports both sync and async handlers) + server.debug(`Calling handler for tool: ${name}`); + const result = await Promise.resolve(handler(args)); + server.debug(`Handler returned for tool: ${name}`); + const content = result && result.content ? result.content : []; + server.replyResult(id, { content, isError: false }); + } else if (/^notifications\//.test(method)) { + server.debug(`ignore ${method}`); + } else { + server.replyError(id, -32601, `Method not found: ${method}`); + } + } catch (e) { + server.replyError(id, -32603, e instanceof Error ? e.message : String(e)); + } +} + +/** + * Process the read buffer and handle messages + * @param {MCPServer} server - The MCP server instance + * @param {Function} [defaultHandler] - Default handler for tools without a handler + * @returns {Promise} + */ +async function processReadBuffer(server, defaultHandler) { + while (true) { + try { + const message = server.readBuffer.readMessage(); + if (!message) { + break; + } + server.debug(`recv: ${JSON.stringify(message)}`); + await handleMessage(server, message, defaultHandler); + } catch (error) { + // For parse errors, we can't know the request id, so we shouldn't send a response + // according to JSON-RPC spec. Just log the error. + server.debug(`Parse error: ${error instanceof Error ? error.message : String(error)}`); + } + } +} + +/** + * Start the MCP server on stdio + * @param {MCPServer} server - The MCP server instance + * @param {Object} [options] - Start options + * @param {Function} [options.defaultHandler] - Default handler for tools without a handler + */ +function start(server, options = {}) { + const { defaultHandler } = options; + + server.debug(`v${server.serverInfo.version} ready on stdio`); + server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); + + if (!Object.keys(server.tools).length) { + throw new Error("No tools registered"); + } + + const onData = async chunk => { + server.readBuffer.append(chunk); + await processReadBuffer(server, defaultHandler); + }; + + process.stdin.on("data", onData); + process.stdin.on("error", err => server.debug(`stdin error: ${err}`)); + process.stdin.resume(); + server.debug(`listening...`); +} + +module.exports = { + createServer, + registerTool, + normalizeTool, + handleRequest, + handleMessage, + processReadBuffer, + start, + loadToolHandlers, +}; diff --git a/pkg/workflow/js/messages.cjs b/pkg/workflow/js/messages.cjs new file mode 100644 index 0000000000..401a4adf8e --- /dev/null +++ b/pkg/workflow/js/messages.cjs @@ -0,0 +1,58 @@ +// @ts-check +/// + +/** + * Safe Output Messages Module (Barrel File) + * + * This module re-exports all message functions from the modular message files. + * It provides backward compatibility for existing code that imports from messages.cjs. + * + * For new code, prefer importing directly from the specific modules: + * - ./messages_core.cjs - Core utilities (getMessages, renderTemplate, toSnakeCase) + * - ./messages_footer.cjs - Footer messages (getFooterMessage, getFooterInstallMessage, generateFooterWithMessages) + * - ./messages_staged.cjs - Staged mode messages (getStagedTitle, getStagedDescription) + * - ./messages_run_status.cjs - Run status messages (getRunStartedMessage, getRunSuccessMessage, getRunFailureMessage) + * - ./messages_close_discussion.cjs - Close discussion messages (getCloseOlderDiscussionMessage) + * + * Supported placeholders: + * - {workflow_name} - Name of the workflow + * - {run_url} - URL to the workflow run + * - {workflow_source} - Source specification (owner/repo/path@ref) + * - {workflow_source_url} - GitHub URL for the workflow source + * - {triggering_number} - Issue/PR/Discussion number that triggered this workflow + * - {operation} - Operation name (for staged mode titles/descriptions) + * - {event_type} - Event type description (for run-started messages) + * - {status} - Workflow status text (for run-failure messages) + * + * Both camelCase and snake_case placeholder formats are supported. + */ + +// Re-export core utilities +const { getMessages, renderTemplate } = require("./messages_core.cjs"); + +// Re-export footer messages +const { getFooterMessage, getFooterInstallMessage, generateFooterWithMessages, generateXMLMarker } = require("./messages_footer.cjs"); + +// Re-export staged mode messages +const { getStagedTitle, getStagedDescription } = require("./messages_staged.cjs"); + +// Re-export run status messages +const { getRunStartedMessage, getRunSuccessMessage, getRunFailureMessage } = require("./messages_run_status.cjs"); + +// Re-export close discussion messages +const { getCloseOlderDiscussionMessage } = require("./messages_close_discussion.cjs"); + +module.exports = { + getMessages, + renderTemplate, + getFooterMessage, + getFooterInstallMessage, + generateFooterWithMessages, + generateXMLMarker, + getStagedTitle, + getStagedDescription, + getRunStartedMessage, + getRunSuccessMessage, + getRunFailureMessage, + getCloseOlderDiscussionMessage, +}; diff --git a/pkg/workflow/js/messages_close_discussion.cjs b/pkg/workflow/js/messages_close_discussion.cjs new file mode 100644 index 0000000000..96a7e46073 --- /dev/null +++ b/pkg/workflow/js/messages_close_discussion.cjs @@ -0,0 +1,45 @@ +// @ts-check +/// + +/** + * Close Discussion Message Module + * + * This module provides the message for closing older discussions + * when a newer one is created. + */ + +const { getMessages, renderTemplate, toSnakeCase } = require("./messages_core.cjs"); + +/** + * @typedef {Object} CloseOlderDiscussionContext + * @property {string} newDiscussionUrl - URL of the new discussion that replaced this one + * @property {number} newDiscussionNumber - Number of the new discussion + * @property {string} workflowName - Name of the workflow + * @property {string} runUrl - URL of the workflow run + */ + +/** + * Get the close-older-discussion message, using custom template if configured. + * @param {CloseOlderDiscussionContext} ctx - Context for message generation + * @returns {string} Close older discussion message + */ +function getCloseOlderDiscussionMessage(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default close-older-discussion template - pirate themed! 🏴‍☠️ + const defaultMessage = `⚓ Avast! This discussion be marked as **outdated** by [{workflow_name}]({run_url}). + +🗺️ A newer treasure map awaits ye at **[Discussion #{new_discussion_number}]({new_discussion_url})**. + +Fair winds, matey! 🏴‍☠️`; + + // Use custom message if configured + return messages?.closeOlderDiscussion ? renderTemplate(messages.closeOlderDiscussion, templateContext) : renderTemplate(defaultMessage, templateContext); +} + +module.exports = { + getCloseOlderDiscussionMessage, +}; diff --git a/pkg/workflow/js/messages_core.cjs b/pkg/workflow/js/messages_core.cjs new file mode 100644 index 0000000000..ce38d3afed --- /dev/null +++ b/pkg/workflow/js/messages_core.cjs @@ -0,0 +1,91 @@ +// @ts-check +/// + +/** + * Core Message Utilities Module + * + * This module provides shared utilities for message template processing. + * It includes configuration parsing and template rendering functions. + * + * Supported placeholders: + * - {workflow_name} - Name of the workflow + * - {run_url} - URL to the workflow run + * - {workflow_source} - Source specification (owner/repo/path@ref) + * - {workflow_source_url} - GitHub URL for the workflow source + * - {triggering_number} - Issue/PR/Discussion number that triggered this workflow + * - {operation} - Operation name (for staged mode titles/descriptions) + * - {event_type} - Event type description (for run-started messages) + * - {status} - Workflow status text (for run-failure messages) + * + * Both camelCase and snake_case placeholder formats are supported. + */ + +/** + * @typedef {Object} SafeOutputMessages + * @property {string} [footer] - Custom footer message template + * @property {string} [footerInstall] - Custom installation instructions template + * @property {string} [stagedTitle] - Custom staged mode title template + * @property {string} [stagedDescription] - Custom staged mode description template + * @property {string} [runStarted] - Custom workflow activation message template + * @property {string} [runSuccess] - Custom workflow success message template + * @property {string} [runFailure] - Custom workflow failure message template + * @property {string} [detectionFailure] - Custom detection job failure message template + * @property {string} [closeOlderDiscussion] - Custom message for closing older discussions as outdated + */ + +/** + * Get the safe-output messages configuration from environment variable. + * @returns {SafeOutputMessages|null} Parsed messages config or null if not set + */ +function getMessages() { + const messagesEnv = process.env.GH_AW_SAFE_OUTPUT_MESSAGES; + if (!messagesEnv) { + return null; + } + + try { + // Parse JSON with camelCase keys from Go struct (using json struct tags) + return JSON.parse(messagesEnv); + } catch (error) { + core.warning(`Failed to parse GH_AW_SAFE_OUTPUT_MESSAGES: ${error instanceof Error ? error.message : String(error)}`); + return null; + } +} + +/** + * Replace placeholders in a template string with values from context. + * Supports {key} syntax for placeholder replacement. + * @param {string} template - Template string with {key} placeholders + * @param {Record} context - Key-value pairs for replacement + * @returns {string} Template with placeholders replaced + */ +function renderTemplate(template, context) { + return template.replace(/\{(\w+)\}/g, (match, key) => { + const value = context[key]; + return value !== undefined && value !== null ? String(value) : match; + }); +} + +/** + * Convert context object keys to snake_case for template rendering + * @param {Record} obj - Object with camelCase keys + * @returns {Record} Object with snake_case keys + */ +function toSnakeCase(obj) { + /** @type {Record} */ + const result = {}; + for (const [key, value] of Object.entries(obj)) { + // Convert camelCase to snake_case + const snakeKey = key.replace(/([A-Z])/g, "_$1").toLowerCase(); + result[snakeKey] = value; + // Also keep original key for backwards compatibility + result[key] = value; + } + return result; +} + +module.exports = { + getMessages, + renderTemplate, + toSnakeCase, +}; diff --git a/pkg/workflow/js/messages_footer.cjs b/pkg/workflow/js/messages_footer.cjs new file mode 100644 index 0000000000..67d1988692 --- /dev/null +++ b/pkg/workflow/js/messages_footer.cjs @@ -0,0 +1,171 @@ +// @ts-check +/// + +/** + * Footer Message Module + * + * This module provides footer and installation instructions generation + * for safe-output workflows. + */ + +const { getMessages, renderTemplate, toSnakeCase } = require("./messages_core.cjs"); + +/** + * @typedef {Object} FooterContext + * @property {string} workflowName - Name of the workflow + * @property {string} runUrl - URL of the workflow run + * @property {string} [workflowSource] - Source of the workflow (owner/repo/path@ref) + * @property {string} [workflowSourceUrl] - GitHub URL for the workflow source + * @property {number|string} [triggeringNumber] - Issue, PR, or discussion number that triggered this workflow + */ + +/** + * Get the footer message, using custom template if configured. + * @param {FooterContext} ctx - Context for footer generation + * @returns {string} Footer message + */ +function getFooterMessage(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default footer template - pirate themed! 🏴‍☠️ + const defaultFooter = "> Ahoy! This treasure was crafted by [🏴‍☠️ {workflow_name}]({run_url})"; + + // Use custom footer if configured + let footer = messages?.footer ? renderTemplate(messages.footer, templateContext) : renderTemplate(defaultFooter, templateContext); + + // Add triggering reference if available + if (ctx.triggeringNumber) { + footer += ` fer issue #{triggering_number} 🗺️`.replace("{triggering_number}", String(ctx.triggeringNumber)); + } + + return footer; +} + +/** + * Get the footer installation instructions, using custom template if configured. + * @param {FooterContext} ctx - Context for footer generation + * @returns {string} Footer installation message or empty string if no source + */ +function getFooterInstallMessage(ctx) { + if (!ctx.workflowSource || !ctx.workflowSourceUrl) { + return ""; + } + + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default installation template - pirate themed! 🏴‍☠️ + const defaultInstall = "> Arr! To plunder this workflow fer yer own ship, run `gh aw add {workflow_source}`. Chart yer course at [🦜 {workflow_source_url}]({workflow_source_url})!"; + + // Use custom installation message if configured + return messages?.footerInstall ? renderTemplate(messages.footerInstall, templateContext) : renderTemplate(defaultInstall, templateContext); +} + +/** + * Generates an XML comment marker with agentic workflow metadata for traceability. + * This marker enables searching and tracing back items generated by an agentic workflow. + * + * The marker format is: + * + * + * @param {string} workflowName - Name of the workflow + * @param {string} runUrl - URL of the workflow run + * @returns {string} XML comment marker with workflow metadata + */ +function generateXMLMarker(workflowName, runUrl) { + // Read engine metadata from environment variables + const engineId = process.env.GH_AW_ENGINE_ID || ""; + const engineVersion = process.env.GH_AW_ENGINE_VERSION || ""; + const engineModel = process.env.GH_AW_ENGINE_MODEL || ""; + const trackerId = process.env.GH_AW_TRACKER_ID || ""; + + // Build the key-value pairs for the marker + const parts = []; + + // Always include agentic-workflow name + parts.push(`agentic-workflow: ${workflowName}`); + + // Add tracker-id if available (for searchability and tracing) + if (trackerId) { + parts.push(`tracker-id: ${trackerId}`); + } + + // Add engine ID if available + if (engineId) { + parts.push(`engine: ${engineId}`); + } + + // Add version if available + if (engineVersion) { + parts.push(`version: ${engineVersion}`); + } + + // Add model if available + if (engineModel) { + parts.push(`model: ${engineModel}`); + } + + // Always include run URL + parts.push(`run: ${runUrl}`); + + // Return the XML comment marker + return ``; +} + +/** + * Generate the complete footer with AI attribution and optional installation instructions. + * This is a drop-in replacement for the original generateFooter function. + * @param {string} workflowName - Name of the workflow + * @param {string} runUrl - URL of the workflow run + * @param {string} workflowSource - Source of the workflow (owner/repo/path@ref) + * @param {string} workflowSourceURL - GitHub URL for the workflow source + * @param {number|undefined} triggeringIssueNumber - Issue number that triggered this workflow + * @param {number|undefined} triggeringPRNumber - Pull request number that triggered this workflow + * @param {number|undefined} triggeringDiscussionNumber - Discussion number that triggered this workflow + * @returns {string} Complete footer text + */ +function generateFooterWithMessages(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber) { + // Determine triggering number (issue takes precedence, then PR, then discussion) + let triggeringNumber; + if (triggeringIssueNumber) { + triggeringNumber = triggeringIssueNumber; + } else if (triggeringPRNumber) { + triggeringNumber = triggeringPRNumber; + } else if (triggeringDiscussionNumber) { + triggeringNumber = `discussion #${triggeringDiscussionNumber}`; + } + + const ctx = { + workflowName, + runUrl, + workflowSource, + workflowSourceUrl: workflowSourceURL, + triggeringNumber, + }; + + let footer = "\n\n" + getFooterMessage(ctx); + + // Add installation instructions if source is available + const installMessage = getFooterInstallMessage(ctx); + if (installMessage) { + footer += "\n>\n" + installMessage; + } + + // Add XML comment marker for traceability + footer += "\n\n" + generateXMLMarker(workflowName, runUrl); + + footer += "\n"; + return footer; +} + +module.exports = { + getFooterMessage, + getFooterInstallMessage, + generateFooterWithMessages, + generateXMLMarker, +}; diff --git a/pkg/workflow/js/messages_run_status.cjs b/pkg/workflow/js/messages_run_status.cjs new file mode 100644 index 0000000000..57658d0b3f --- /dev/null +++ b/pkg/workflow/js/messages_run_status.cjs @@ -0,0 +1,116 @@ +// @ts-check +/// + +/** + * Run Status Message Module + * + * This module provides run status messages (started, success, failure) + * for workflow execution notifications. + */ + +const { getMessages, renderTemplate, toSnakeCase } = require("./messages_core.cjs"); + +/** + * @typedef {Object} RunStartedContext + * @property {string} workflowName - Name of the workflow + * @property {string} runUrl - URL of the workflow run + * @property {string} eventType - Event type description (e.g., "issue", "pull request", "discussion") + */ + +/** + * Get the run-started message, using custom template if configured. + * @param {RunStartedContext} ctx - Context for run-started message generation + * @returns {string} Run-started message + */ +function getRunStartedMessage(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default run-started template - pirate themed! 🏴‍☠️ + const defaultMessage = "⚓ Avast! [{workflow_name}]({run_url}) be settin' sail on this {event_type}! 🏴‍☠️"; + + // Use custom message if configured + return messages?.runStarted ? renderTemplate(messages.runStarted, templateContext) : renderTemplate(defaultMessage, templateContext); +} + +/** + * @typedef {Object} RunSuccessContext + * @property {string} workflowName - Name of the workflow + * @property {string} runUrl - URL of the workflow run + */ + +/** + * Get the run-success message, using custom template if configured. + * @param {RunSuccessContext} ctx - Context for run-success message generation + * @returns {string} Run-success message + */ +function getRunSuccessMessage(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default run-success template - pirate themed! 🏴‍☠️ + const defaultMessage = "🎉 Yo ho ho! [{workflow_name}]({run_url}) found the treasure and completed successfully! ⚓💰"; + + // Use custom message if configured + return messages?.runSuccess ? renderTemplate(messages.runSuccess, templateContext) : renderTemplate(defaultMessage, templateContext); +} + +/** + * @typedef {Object} RunFailureContext + * @property {string} workflowName - Name of the workflow + * @property {string} runUrl - URL of the workflow run + * @property {string} status - Status text (e.g., "failed", "was cancelled", "timed out") + */ + +/** + * Get the run-failure message, using custom template if configured. + * @param {RunFailureContext} ctx - Context for run-failure message generation + * @returns {string} Run-failure message + */ +function getRunFailureMessage(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default run-failure template - pirate themed! 🏴‍☠️ + const defaultMessage = "💀 Blimey! [{workflow_name}]({run_url}) {status} and walked the plank! No treasure today, matey! ☠️"; + + // Use custom message if configured + return messages?.runFailure ? renderTemplate(messages.runFailure, templateContext) : renderTemplate(defaultMessage, templateContext); +} + +/** + * @typedef {Object} DetectionFailureContext + * @property {string} workflowName - Name of the workflow + * @property {string} runUrl - URL of the workflow run + */ + +/** + * Get the detection-failure message, using custom template if configured. + * @param {DetectionFailureContext} ctx - Context for detection-failure message generation + * @returns {string} Detection-failure message + */ +function getDetectionFailureMessage(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default detection-failure template + const defaultMessage = "⚠️ Security scanning failed for [{workflow_name}]({run_url}). Review the logs for details."; + + // Use custom message if configured + return messages?.detectionFailure ? renderTemplate(messages.detectionFailure, templateContext) : renderTemplate(defaultMessage, templateContext); +} + +module.exports = { + getRunStartedMessage, + getRunSuccessMessage, + getRunFailureMessage, + getDetectionFailureMessage, +}; diff --git a/pkg/workflow/js/messages_staged.cjs b/pkg/workflow/js/messages_staged.cjs new file mode 100644 index 0000000000..3a23c34884 --- /dev/null +++ b/pkg/workflow/js/messages_staged.cjs @@ -0,0 +1,57 @@ +// @ts-check +/// + +/** + * Staged Mode Message Module + * + * This module provides staged mode title and description generation + * for safe-output preview functionality. + */ + +const { getMessages, renderTemplate, toSnakeCase } = require("./messages_core.cjs"); + +/** + * @typedef {Object} StagedContext + * @property {string} operation - The operation name (e.g., "Create Issues", "Add Comments") + */ + +/** + * Get the staged mode title, using custom template if configured. + * @param {StagedContext} ctx - Context for staged title generation + * @returns {string} Staged mode title + */ +function getStagedTitle(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default staged title template - pirate themed! 🏴‍☠️ + const defaultTitle = "## 🏴‍☠️ Ahoy Matey! Staged Waters: {operation} Preview"; + + // Use custom title if configured + return messages?.stagedTitle ? renderTemplate(messages.stagedTitle, templateContext) : renderTemplate(defaultTitle, templateContext); +} + +/** + * Get the staged mode description, using custom template if configured. + * @param {StagedContext} ctx - Context for staged description generation + * @returns {string} Staged mode description + */ +function getStagedDescription(ctx) { + const messages = getMessages(); + + // Create context with both camelCase and snake_case keys + const templateContext = toSnakeCase(ctx); + + // Default staged description template - pirate themed! 🏴‍☠️ + const defaultDescription = "🗺️ Shiver me timbers! The following booty would be plundered if we set sail (staged mode disabled):"; + + // Use custom description if configured + return messages?.stagedDescription ? renderTemplate(messages.stagedDescription, templateContext) : renderTemplate(defaultDescription, templateContext); +} + +module.exports = { + getStagedTitle, + getStagedDescription, +}; diff --git a/pkg/workflow/js/missing_tool.cjs b/pkg/workflow/js/missing_tool.cjs new file mode 100644 index 0000000000..a566dccfbc --- /dev/null +++ b/pkg/workflow/js/missing_tool.cjs @@ -0,0 +1,135 @@ +// @ts-check +/// + +async function main() { + const fs = require("fs"); + + // Get environment variables + const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT || ""; + const maxReports = process.env.GH_AW_MISSING_TOOL_MAX ? parseInt(process.env.GH_AW_MISSING_TOOL_MAX) : null; + + core.info("Processing missing-tool reports..."); + if (maxReports) { + core.info(`Maximum reports allowed: ${maxReports}`); + } + + /** @type {any[]} */ + const missingTools = []; + + // Return early if no agent output + if (!agentOutputFile.trim()) { + core.info("No agent output to process"); + core.setOutput("tools_reported", JSON.stringify(missingTools)); + core.setOutput("total_count", missingTools.length.toString()); + return; + } + + // Read agent output from file + let agentOutput; + try { + agentOutput = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + core.info(`Agent output file not found or unreadable: ${error instanceof Error ? error.message : String(error)}`); + core.setOutput("tools_reported", JSON.stringify(missingTools)); + core.setOutput("total_count", missingTools.length.toString()); + return; + } + + if (agentOutput.trim() === "") { + core.info("No agent output to process"); + core.setOutput("tools_reported", JSON.stringify(missingTools)); + core.setOutput("total_count", missingTools.length.toString()); + return; + } + + core.info(`Agent output length: ${agentOutput.length}`); + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(agentOutput); + } catch (error) { + core.setFailed(`Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + core.setOutput("tools_reported", JSON.stringify(missingTools)); + core.setOutput("total_count", missingTools.length.toString()); + return; + } + + core.info(`Parsed agent output with ${validatedOutput.items.length} entries`); + + // Process all parsed entries + for (const entry of validatedOutput.items) { + if (entry.type === "missing_tool") { + // Validate required fields + if (!entry.tool) { + core.warning(`missing-tool entry missing 'tool' field: ${JSON.stringify(entry)}`); + continue; + } + if (!entry.reason) { + core.warning(`missing-tool entry missing 'reason' field: ${JSON.stringify(entry)}`); + continue; + } + + const missingTool = { + tool: entry.tool, + reason: entry.reason, + alternatives: entry.alternatives || null, + timestamp: new Date().toISOString(), + }; + + missingTools.push(missingTool); + core.info(`Recorded missing tool: ${missingTool.tool}`); + + // Check max limit + if (maxReports && missingTools.length >= maxReports) { + core.info(`Reached maximum number of missing tool reports (${maxReports})`); + break; + } + } + } + + core.info(`Total missing tools reported: ${missingTools.length}`); + + // Output results + core.setOutput("tools_reported", JSON.stringify(missingTools)); + core.setOutput("total_count", missingTools.length.toString()); + + // Log details for debugging and create step summary + if (missingTools.length > 0) { + core.info("Missing tools summary:"); + + // Create structured summary for GitHub Actions step summary + core.summary.addHeading("Missing Tools Report", 3).addRaw(`Found **${missingTools.length}** missing tool${missingTools.length > 1 ? "s" : ""} in this workflow execution.\n\n`); + + missingTools.forEach((tool, index) => { + core.info(`${index + 1}. Tool: ${tool.tool}`); + core.info(` Reason: ${tool.reason}`); + if (tool.alternatives) { + core.info(` Alternatives: ${tool.alternatives}`); + } + core.info(` Reported at: ${tool.timestamp}`); + core.info(""); + + // Add to summary with structured formatting + core.summary.addRaw(`#### ${index + 1}. \`${tool.tool}\`\n\n`).addRaw(`**Reason:** ${tool.reason}\n\n`); + + if (tool.alternatives) { + core.summary.addRaw(`**Alternatives:** ${tool.alternatives}\n\n`); + } + + core.summary.addRaw(`**Reported at:** ${tool.timestamp}\n\n---\n\n`); + }); + + core.summary.write(); + } else { + core.info("No missing tools reported in this workflow execution."); + core.summary.addHeading("Missing Tools Report", 3).addRaw("✅ No missing tools reported in this workflow execution.").write(); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/noop.cjs b/pkg/workflow/js/noop.cjs new file mode 100644 index 0000000000..6bcba542d2 --- /dev/null +++ b/pkg/workflow/js/noop.cjs @@ -0,0 +1,68 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); + +/** + * Main function to handle noop safe output + * No-op is a fallback output type that logs messages for transparency + * without taking any GitHub API actions + */ +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all noop items + const noopItems = result.items.filter(/** @param {any} item */ item => item.type === "noop"); + if (noopItems.length === 0) { + core.info("No noop items found in agent output"); + return; + } + + core.info(`Found ${noopItems.length} noop item(s)`); + + // If in staged mode, emit step summary instead of logging + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: No-Op Messages Preview\n\n"; + summaryContent += "The following messages would be logged if staged mode was disabled:\n\n"; + + for (let i = 0; i < noopItems.length; i++) { + const item = noopItems[i]; + summaryContent += `### Message ${i + 1}\n`; + summaryContent += `${item.message}\n\n`; + summaryContent += "---\n\n"; + } + + await core.summary.addRaw(summaryContent).write(); + core.info("📝 No-op message preview written to step summary"); + return; + } + + // Process each noop item - just log the messages for transparency + let summaryContent = "\n\n## No-Op Messages\n\n"; + summaryContent += "The following messages were logged for transparency:\n\n"; + + for (let i = 0; i < noopItems.length; i++) { + const item = noopItems[i]; + core.info(`No-op message ${i + 1}: ${item.message}`); + summaryContent += `- ${item.message}\n`; + } + + // Write summary for all noop messages + await core.summary.addRaw(summaryContent).write(); + + // Export the first noop message for use in add-comment default reporting + if (noopItems.length > 0) { + core.setOutput("noop_message", noopItems[0].message); + core.exportVariable("GH_AW_NOOP_MESSAGE", noopItems[0].message); + } + + core.info(`Successfully processed ${noopItems.length} noop message(s)`); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/normalize_branch_name.cjs b/pkg/workflow/js/normalize_branch_name.cjs new file mode 100644 index 0000000000..ce8d4473a3 --- /dev/null +++ b/pkg/workflow/js/normalize_branch_name.cjs @@ -0,0 +1,54 @@ +// @ts-check +/// + +/** + * Normalizes a branch name to be a valid git branch name. + * + * IMPORTANT: Keep this function in sync with the normalizeBranchName function in upload_assets.cjs + * + * Valid characters: alphanumeric (a-z, A-Z, 0-9), dash (-), underscore (_), forward slash (/), dot (.) + * Max length: 128 characters + * + * The normalization process: + * 1. Replaces invalid characters with a single dash + * 2. Collapses multiple consecutive dashes to a single dash + * 3. Removes leading and trailing dashes + * 4. Truncates to 128 characters + * 5. Removes trailing dashes after truncation + * 6. Converts to lowercase + * + * @param {string} branchName - The branch name to normalize + * @returns {string} The normalized branch name + */ +function normalizeBranchName(branchName) { + if (!branchName || typeof branchName !== "string" || branchName.trim() === "") { + return branchName; + } + + // Replace any sequence of invalid characters with a single dash + // Valid characters are: a-z, A-Z, 0-9, -, _, /, . + let normalized = branchName.replace(/[^a-zA-Z0-9\-_/.]+/g, "-"); + + // Collapse multiple consecutive dashes to a single dash + normalized = normalized.replace(/-+/g, "-"); + + // Remove leading and trailing dashes + normalized = normalized.replace(/^-+|-+$/g, ""); + + // Truncate to max 128 characters + if (normalized.length > 128) { + normalized = normalized.substring(0, 128); + } + + // Ensure it doesn't end with a dash after truncation + normalized = normalized.replace(/-+$/, ""); + + // Convert to lowercase + normalized = normalized.toLowerCase(); + + return normalized; +} + +module.exports = { + normalizeBranchName, +}; diff --git a/pkg/workflow/js/notify_comment_error.cjs b/pkg/workflow/js/notify_comment_error.cjs new file mode 100644 index 0000000000..d6781583b9 --- /dev/null +++ b/pkg/workflow/js/notify_comment_error.cjs @@ -0,0 +1,210 @@ +// @ts-check +/// + +// This script updates an existing comment created by the activation job +// to notify about the workflow completion status (success or failure). +// It also processes noop messages and adds them to the activation comment. + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { getRunSuccessMessage, getRunFailureMessage, getDetectionFailureMessage } = require("./messages_run_status.cjs"); + +/** + * Collect generated asset URLs from safe output jobs + * @returns {Array} Array of generated asset URLs + */ +function collectGeneratedAssets() { + const assets = []; + + // Get the safe output jobs mapping from environment + const safeOutputJobsEnv = process.env.GH_AW_SAFE_OUTPUT_JOBS; + if (!safeOutputJobsEnv) { + return assets; + } + + let jobOutputMapping; + try { + jobOutputMapping = JSON.parse(safeOutputJobsEnv); + } catch (error) { + core.warning(`Failed to parse GH_AW_SAFE_OUTPUT_JOBS: ${error instanceof Error ? error.message : String(error)}`); + return assets; + } + + // Iterate through each job and collect its URL output + for (const [jobName, urlKey] of Object.entries(jobOutputMapping)) { + // Access the job output using the GitHub Actions context + // The value will be set as an environment variable in the format GH_AW_OUTPUT__ + const envVarName = `GH_AW_OUTPUT_${jobName.toUpperCase()}_${urlKey.toUpperCase()}`; + const url = process.env[envVarName]; + + if (url && url.trim() !== "") { + assets.push(url); + core.info(`Collected asset URL: ${url}`); + } + } + + return assets; +} + +async function main() { + const commentId = process.env.GH_AW_COMMENT_ID; + const commentRepo = process.env.GH_AW_COMMENT_REPO; + const runUrl = process.env.GH_AW_RUN_URL; + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow"; + const agentConclusion = process.env.GH_AW_AGENT_CONCLUSION || "failure"; + const detectionConclusion = process.env.GH_AW_DETECTION_CONCLUSION; + + core.info(`Comment ID: ${commentId}`); + core.info(`Comment Repo: ${commentRepo}`); + core.info(`Run URL: ${runUrl}`); + core.info(`Workflow Name: ${workflowName}`); + core.info(`Agent Conclusion: ${agentConclusion}`); + if (detectionConclusion) { + core.info(`Detection Conclusion: ${detectionConclusion}`); + } + + // Load agent output to check for noop messages + let noopMessages = []; + const agentOutputResult = loadAgentOutput(); + if (agentOutputResult.success && agentOutputResult.data) { + const noopItems = agentOutputResult.data.items.filter(item => item.type === "noop"); + if (noopItems.length > 0) { + core.info(`Found ${noopItems.length} noop message(s)`); + noopMessages = noopItems.map(item => item.message); + } + } + + // If there's no comment to update but we have noop messages, write to step summary + if (!commentId && noopMessages.length > 0) { + core.info("No comment ID found, writing noop messages to step summary"); + + let summaryContent = "## No-Op Messages\n\n"; + summaryContent += "The following messages were logged for transparency:\n\n"; + + if (noopMessages.length === 1) { + summaryContent += noopMessages[0]; + } else { + summaryContent += noopMessages.map((msg, idx) => `${idx + 1}. ${msg}`).join("\n"); + } + + await core.summary.addRaw(summaryContent).write(); + core.info(`Successfully wrote ${noopMessages.length} noop message(s) to step summary`); + return; + } + + if (!commentId) { + core.info("No comment ID found and no noop messages to process, skipping comment update"); + return; + } + + // At this point, we have a comment to update + if (!runUrl) { + core.setFailed("Run URL is required"); + return; + } + + // Parse comment repo (format: "owner/repo") + const repoOwner = commentRepo ? commentRepo.split("/")[0] : context.repo.owner; + const repoName = commentRepo ? commentRepo.split("/")[1] : context.repo.repo; + + core.info(`Updating comment in ${repoOwner}/${repoName}`); + + // Determine the message based on agent conclusion using custom messages if configured + let message; + + // Check if detection job failed (if detection job exists) + if (detectionConclusion && detectionConclusion === "failure") { + // Detection job failed - report this prominently + message = getDetectionFailureMessage({ + workflowName, + runUrl, + }); + } else if (agentConclusion === "success") { + message = getRunSuccessMessage({ + workflowName, + runUrl, + }); + } else { + // Determine status text based on conclusion type + let statusText; + if (agentConclusion === "cancelled") { + statusText = "was cancelled"; + } else if (agentConclusion === "skipped") { + statusText = "was skipped"; + } else if (agentConclusion === "timed_out") { + statusText = "timed out"; + } else { + statusText = "failed"; + } + + message = getRunFailureMessage({ + workflowName, + runUrl, + status: statusText, + }); + } + + // Add noop messages to the comment if any + if (noopMessages.length > 0) { + message += "\n\n"; + if (noopMessages.length === 1) { + message += noopMessages[0]; + } else { + message += noopMessages.map((msg, idx) => `${idx + 1}. ${msg}`).join("\n"); + } + } + + // Collect generated asset URLs from safe output jobs + const generatedAssets = collectGeneratedAssets(); + if (generatedAssets.length > 0) { + message += "\n\n"; + generatedAssets.forEach(url => { + message += `${url}\n`; + }); + } + + // Check if this is a discussion comment (GraphQL node ID format) + const isDiscussionComment = commentId.startsWith("DC_"); + + try { + if (isDiscussionComment) { + // Update discussion comment using GraphQL + const result = await github.graphql( + ` + mutation($commentId: ID!, $body: String!) { + updateDiscussionComment(input: { commentId: $commentId, body: $body }) { + comment { + id + url + } + } + }`, + { commentId: commentId, body: message } + ); + + const comment = result.updateDiscussionComment.comment; + core.info(`Successfully updated discussion comment`); + core.info(`Comment ID: ${comment.id}`); + core.info(`Comment URL: ${comment.url}`); + } else { + // Update issue/PR comment using REST API + const response = await github.request("PATCH /repos/{owner}/{repo}/issues/comments/{comment_id}", { + owner: repoOwner, + repo: repoName, + comment_id: parseInt(commentId, 10), + body: message, + headers: { + Accept: "application/vnd.github+json", + }, + }); + + core.info(`Successfully updated comment`); + core.info(`Comment ID: ${response.data.id}`); + core.info(`Comment URL: ${response.data.html_url}`); + } + } catch (error) { + // Don't fail the workflow if we can't update the comment + core.warning(`Failed to update comment: ${error instanceof Error ? error.message : String(error)}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/parse_claude_log.cjs b/pkg/workflow/js/parse_claude_log.cjs new file mode 100644 index 0000000000..6bbfe5b930 --- /dev/null +++ b/pkg/workflow/js/parse_claude_log.cjs @@ -0,0 +1,123 @@ +// @ts-check +/// + +const { runLogParser } = require("./log_parser_bootstrap.cjs"); +const { generateConversationMarkdown, generateInformationSection, formatInitializationSummary, formatToolUse, parseLogEntries } = require("./log_parser_shared.cjs"); + +function main() { + runLogParser({ + parseLog: parseClaudeLog, + parserName: "Claude", + supportsDirectories: false, + }); +} + +/** + * Parses Claude log content and converts it to markdown format + * @param {string} logContent - The raw log content as a string + * @returns {{markdown: string, mcpFailures: string[], maxTurnsHit: boolean, logEntries: Array}} Result with formatted markdown content, MCP failure list, max-turns status, and parsed log entries + */ +function parseClaudeLog(logContent) { + try { + // Use shared parseLogEntries function + const logEntries = parseLogEntries(logContent); + + if (!logEntries) { + return { + markdown: "## Agent Log Summary\n\nLog format not recognized as Claude JSON array or JSONL.\n", + mcpFailures: [], + maxTurnsHit: false, + logEntries: [], + }; + } + + const mcpFailures = []; + + // Generate conversation markdown using shared function + const conversationResult = generateConversationMarkdown(logEntries, { + formatToolCallback: (toolUse, toolResult) => formatToolUse(toolUse, toolResult, { includeDetailedParameters: false }), + formatInitCallback: initEntry => { + const result = formatInitializationSummary(initEntry, { + includeSlashCommands: true, + mcpFailureCallback: server => { + // Display detailed error information for failed MCP servers (Claude-specific) + const errorDetails = []; + + if (server.error) { + errorDetails.push(`**Error:** ${server.error}`); + } + + if (server.stderr) { + // Truncate stderr if too long + const maxStderrLength = 500; + const stderr = server.stderr.length > maxStderrLength ? server.stderr.substring(0, maxStderrLength) + "..." : server.stderr; + errorDetails.push(`**Stderr:** \`${stderr}\``); + } + + if (server.exitCode !== undefined && server.exitCode !== null) { + errorDetails.push(`**Exit Code:** ${server.exitCode}`); + } + + if (server.command) { + errorDetails.push(`**Command:** \`${server.command}\``); + } + + if (server.message) { + errorDetails.push(`**Message:** ${server.message}`); + } + + if (server.reason) { + errorDetails.push(`**Reason:** ${server.reason}`); + } + + // Return formatted error details with proper indentation + if (errorDetails.length > 0) { + return errorDetails.map(detail => ` - ${detail}\n`).join(""); + } + return ""; + }, + }); + + // Track MCP failures + if (result.mcpFailures) { + mcpFailures.push(...result.mcpFailures); + } + return result; + }, + }); + + let markdown = conversationResult.markdown; + + // Add Information section from the last entry with result metadata + const lastEntry = logEntries[logEntries.length - 1]; + markdown += generateInformationSection(lastEntry); + + // Check if max-turns limit was hit + let maxTurnsHit = false; + const maxTurns = process.env.GH_AW_MAX_TURNS; + if (maxTurns && lastEntry && lastEntry.num_turns) { + const configuredMaxTurns = parseInt(maxTurns, 10); + if (!isNaN(configuredMaxTurns) && lastEntry.num_turns >= configuredMaxTurns) { + maxTurnsHit = true; + } + } + + return { markdown, mcpFailures, maxTurnsHit, logEntries }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { + markdown: `## Agent Log Summary\n\nError parsing Claude log (tried both JSON array and JSONL formats): ${errorMessage}\n`, + mcpFailures: [], + maxTurnsHit: false, + logEntries: [], + }; + } +} + +// Export for testing +if (typeof module !== "undefined" && module.exports) { + module.exports = { + main, + parseClaudeLog, + }; +} diff --git a/pkg/workflow/js/parse_codex_log.cjs b/pkg/workflow/js/parse_codex_log.cjs new file mode 100644 index 0000000000..cf3e771413 --- /dev/null +++ b/pkg/workflow/js/parse_codex_log.cjs @@ -0,0 +1,464 @@ +// @ts-check +/// + +const { runLogParser } = require("./log_parser_bootstrap.cjs"); +const { truncateString, estimateTokens, formatToolCallAsDetails } = require("./log_parser_shared.cjs"); + +function main() { + runLogParser({ + parseLog: parseCodexLog, + parserName: "Codex", + supportsDirectories: false, + }); +} + +/** + * Extract MCP server initialization information from Codex logs + * @param {string[]} lines - Array of log lines + * @returns {{hasInfo: boolean, markdown: string, servers: Array<{name: string, status: string, error?: string}>}} MCP initialization info + */ +function extractMCPInitialization(lines) { + const mcpServers = new Map(); // Map server name to status/error info + let serverCount = 0; + let connectedCount = 0; + let availableTools = []; + + for (const line of lines) { + // Match: Initializing MCP servers from config + if (line.includes("Initializing MCP servers") || (line.includes("mcp") && line.includes("init"))) { + // Continue to next patterns + } + + // Match: Found N MCP servers in configuration + const countMatch = line.match(/Found (\d+) MCP servers? in configuration/i); + if (countMatch) { + serverCount = parseInt(countMatch[1]); + } + + // Match: Connecting to MCP server: + const connectingMatch = line.match(/Connecting to MCP server[:\s]+['"]?(\w+)['"]?/i); + if (connectingMatch) { + const serverName = connectingMatch[1]; + if (!mcpServers.has(serverName)) { + mcpServers.set(serverName, { name: serverName, status: "connecting" }); + } + } + + // Match: MCP server '' connected successfully + const connectedMatch = line.match(/MCP server ['"](\w+)['"] connected successfully/i); + if (connectedMatch) { + const serverName = connectedMatch[1]; + mcpServers.set(serverName, { name: serverName, status: "connected" }); + connectedCount++; + } + + // Match: Failed to connect to MCP server '': + const failedMatch = line.match(/Failed to connect to MCP server ['"](\w+)['"][:]\s*(.+)/i); + if (failedMatch) { + const serverName = failedMatch[1]; + const error = failedMatch[2].trim(); + mcpServers.set(serverName, { name: serverName, status: "failed", error }); + } + + // Match: MCP server '' initialization failed + const initFailedMatch = line.match(/MCP server ['"](\w+)['"] initialization failed/i); + if (initFailedMatch) { + const serverName = initFailedMatch[1]; + const existing = mcpServers.get(serverName); + if (existing && existing.status !== "failed") { + mcpServers.set(serverName, { name: serverName, status: "failed", error: "Initialization failed" }); + } + } + + // Match: Available tools: tool1, tool2, tool3 + const toolsMatch = line.match(/Available tools:\s*(.+)/i); + if (toolsMatch) { + const toolsStr = toolsMatch[1]; + availableTools = toolsStr + .split(",") + .map(t => t.trim()) + .filter(t => t.length > 0); + } + } + + // Build markdown output + let markdown = ""; + const hasInfo = mcpServers.size > 0 || availableTools.length > 0; + + if (mcpServers.size > 0) { + markdown += "**MCP Servers:**\n"; + + // Count by status + const servers = Array.from(mcpServers.values()); + const connected = servers.filter(s => s.status === "connected"); + const failed = servers.filter(s => s.status === "failed"); + + markdown += `- Total: ${servers.length}${serverCount > 0 && servers.length !== serverCount ? ` (configured: ${serverCount})` : ""}\n`; + markdown += `- Connected: ${connected.length}\n`; + if (failed.length > 0) { + markdown += `- Failed: ${failed.length}\n`; + } + markdown += "\n"; + + // List each server with status + for (const server of servers) { + const statusIcon = server.status === "connected" ? "✅" : server.status === "failed" ? "❌" : "⏳"; + markdown += `- ${statusIcon} **${server.name}** (${server.status})`; + if (server.error) { + markdown += `\n - Error: ${server.error}`; + } + markdown += "\n"; + } + markdown += "\n"; + } + + if (availableTools.length > 0) { + markdown += "**Available MCP Tools:**\n"; + markdown += `- Total: ${availableTools.length} tools\n`; + markdown += `- Tools: ${availableTools.slice(0, 10).join(", ")}${availableTools.length > 10 ? ", ..." : ""}\n\n`; + } + + return { + hasInfo, + markdown, + servers: Array.from(mcpServers.values()), + }; +} + +/** + * Parse codex log content and format as markdown + * @param {string} logContent - The raw log content to parse + * @returns {string} Formatted markdown content + */ +function parseCodexLog(logContent) { + try { + const lines = logContent.split("\n"); + + // Look-ahead window size for finding tool results + // New format has verbose debug logs, so requires larger window + const LOOKAHEAD_WINDOW = 50; + + let markdown = ""; + + // Extract MCP initialization information + const mcpInfo = extractMCPInitialization(lines); + if (mcpInfo.hasInfo) { + markdown += "## 🚀 Initialization\n\n"; + markdown += mcpInfo.markdown; + } + + markdown += "## 🤖 Reasoning\n\n"; + + // Second pass: process full conversation flow with interleaved reasoning and tools + let inThinkingSection = false; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Skip metadata lines (including Rust debug lines) + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") || + line.includes("DEBUG codex") || + line.includes("INFO codex") || + line.match(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z\s+(DEBUG|INFO|WARN|ERROR)/) + ) { + continue; + } + + // Thinking section starts with standalone "thinking" line + if (line.trim() === "thinking") { + inThinkingSection = true; + continue; + } + + // Tool call line "tool github.list_pull_requests(...)" + const toolMatch = line.match(/^tool\s+(\w+)\.(\w+)\(/); + if (toolMatch) { + inThinkingSection = false; + const server = toolMatch[1]; + const toolName = toolMatch[2]; + + // Look ahead to find the result status + let statusIcon = "❓"; // Unknown by default + for (let j = i + 1; j < Math.min(i + LOOKAHEAD_WINDOW, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes(`${server}.${toolName}(`) && nextLine.includes("success in")) { + statusIcon = "✅"; + break; + } else if (nextLine.includes(`${server}.${toolName}(`) && (nextLine.includes("failed in") || nextLine.includes("error"))) { + statusIcon = "❌"; + break; + } + } + + markdown += `${statusIcon} ${server}::${toolName}(...)\n\n`; + continue; + } + + // Process thinking content (filter out timestamp lines and very short lines) + if (inThinkingSection && line.trim().length > 20 && !line.match(/^\d{4}-\d{2}-\d{2}T/)) { + const trimmed = line.trim(); + // Add thinking content directly + markdown += `${trimmed}\n\n`; + } + } + + markdown += "## 🤖 Commands and Tools\n\n"; + + // First pass: collect tool calls with details + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Match: tool server.method(params) or ToolCall: server__method params + const toolMatch = line.match(/^\[.*?\]\s+tool\s+(\w+)\.(\w+)\((.+)\)/) || line.match(/ToolCall:\s+(\w+)__(\w+)\s+(\{.+\})/); + + // Also match: exec bash -lc 'command' in /path + const bashMatch = line.match(/^\[.*?\]\s+exec\s+bash\s+-lc\s+'([^']+)'/); + + if (toolMatch) { + const server = toolMatch[1]; + const toolName = toolMatch[2]; + const params = toolMatch[3]; + + // Look ahead to find the result + let statusIcon = "❓"; + let response = ""; + let isError = false; + + for (let j = i + 1; j < Math.min(i + LOOKAHEAD_WINDOW, lines.length); j++) { + const nextLine = lines[j]; + + // Check for result line: server.method(...) success/failed in Xms: + if (nextLine.includes(`${server}.${toolName}(`) && (nextLine.includes("success in") || nextLine.includes("failed in"))) { + isError = nextLine.includes("failed in"); + statusIcon = isError ? "❌" : "✅"; + + // Extract response - it's the JSON object following this line + let jsonLines = []; + let braceCount = 0; + let inJson = false; + + for (let k = j + 1; k < Math.min(j + 30, lines.length); k++) { + const respLine = lines[k]; + + // Stop if we hit the next tool call or tokens used + if (respLine.includes("tool ") || respLine.includes("ToolCall:") || respLine.includes("tokens used")) { + break; + } + + // Count braces to track JSON boundaries + for (const char of respLine) { + if (char === "{") { + braceCount++; + inJson = true; + } else if (char === "}") { + braceCount--; + } + } + + if (inJson) { + jsonLines.push(respLine); + } + + if (inJson && braceCount === 0) { + break; + } + } + + response = jsonLines.join("\n"); + break; + } + } + + // Format the tool call with HTML details + markdown += formatCodexToolCall(server, toolName, params, response, statusIcon); + } else if (bashMatch) { + const command = bashMatch[1]; + + // Look ahead to find the result + let statusIcon = "❓"; + let response = ""; + let isError = false; + + for (let j = i + 1; j < Math.min(i + LOOKAHEAD_WINDOW, lines.length); j++) { + const nextLine = lines[j]; + + // Check for bash result line: bash -lc 'command' succeeded/failed in Xms: + if (nextLine.includes("bash -lc") && (nextLine.includes("succeeded in") || nextLine.includes("failed in"))) { + isError = nextLine.includes("failed in"); + statusIcon = isError ? "❌" : "✅"; + + // Extract response - it's the plain text following this line + let responseLines = []; + + for (let k = j + 1; k < Math.min(j + 20, lines.length); k++) { + const respLine = lines[k]; + + // Stop if we hit the next tool call, exec, or tokens used + if (respLine.includes("tool ") || respLine.includes("exec ") || respLine.includes("ToolCall:") || respLine.includes("tokens used") || respLine.includes("thinking")) { + break; + } + + responseLines.push(respLine); + } + + response = responseLines.join("\n").trim(); + break; + } + } + + // Format the bash command with HTML details + markdown += formatCodexBashCall(command, response, statusIcon); + } + } + + // Add Information section + markdown += "\n## 📊 Information\n\n"; + + // Extract metadata from Codex logs + let totalTokens = 0; + + // TokenCount(TokenCountEvent { ... total_tokens: 13281 ... + const tokenCountMatches = logContent.matchAll(/total_tokens:\s*(\d+)/g); + for (const match of tokenCountMatches) { + const tokens = parseInt(match[1]); + totalTokens = Math.max(totalTokens, tokens); // Use the highest value (final total) + } + + // Also check for "tokens used\n" at the end (number may have commas) + const finalTokensMatch = logContent.match(/tokens used\n([\d,]+)/); + if (finalTokensMatch) { + // Remove commas before parsing + totalTokens = parseInt(finalTokensMatch[1].replace(/,/g, "")); + } + + if (totalTokens > 0) { + markdown += `**Total Tokens Used:** ${totalTokens.toLocaleString()}\n\n`; + } + + // Count tool calls + const toolCalls = (logContent.match(/ToolCall:\s+\w+__\w+/g) || []).length; + + if (toolCalls > 0) { + markdown += `**Tool Calls:** ${toolCalls}\n\n`; + } + + return markdown; + } catch (error) { + core.error(`Error parsing Codex log: ${error}`); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; + } +} + +/** + * Format a Codex tool call with HTML details + * Uses the shared formatToolCallAsDetails helper for consistent rendering across all engines. + * @param {string} server - The server name (e.g., "github", "time") + * @param {string} toolName - The tool name (e.g., "list_pull_requests") + * @param {string} params - The parameters as JSON string + * @param {string} response - The response as JSON string + * @param {string} statusIcon - The status icon (✅, ❌, or ❓) + * @returns {string} Formatted HTML details string + */ +function formatCodexToolCall(server, toolName, params, response, statusIcon) { + // Calculate token estimate from params + response + const totalTokens = estimateTokens(params) + estimateTokens(response); + + // Format metadata + let metadata = ""; + if (totalTokens > 0) { + metadata = `~${totalTokens}t`; + } + + const summary = `${server}::${toolName}`; + + // Build sections array + const sections = []; + + if (params && params.trim()) { + sections.push({ + label: "Parameters", + content: params, + language: "json", + }); + } + + if (response && response.trim()) { + sections.push({ + label: "Response", + content: response, + language: "json", + }); + } + + return formatToolCallAsDetails({ + summary, + statusIcon, + metadata, + sections, + }); +} + +/** + * Format a Codex bash call with HTML details + * Uses the shared formatToolCallAsDetails helper for consistent rendering across all engines. + * @param {string} command - The bash command + * @param {string} response - The response as plain text + * @param {string} statusIcon - The status icon (✅, ❌, or ❓) + * @returns {string} Formatted HTML details string + */ +function formatCodexBashCall(command, response, statusIcon) { + // Calculate token estimate from command + response + const totalTokens = estimateTokens(command) + estimateTokens(response); + + // Format metadata + let metadata = ""; + if (totalTokens > 0) { + metadata = `~${totalTokens}t`; + } + + const summary = `bash: ${truncateString(command, 60)}`; + + // Build sections array + const sections = []; + + sections.push({ + label: "Command", + content: command, + language: "bash", + }); + + if (response && response.trim()) { + sections.push({ + label: "Output", + content: response, + }); + } + + return formatToolCallAsDetails({ + summary, + statusIcon, + metadata, + sections, + }); +} + +// Export for testing +if (typeof module !== "undefined" && module.exports) { + module.exports = { + main, + parseCodexLog, + formatCodexToolCall, + formatCodexBashCall, + extractMCPInitialization, + }; +} diff --git a/pkg/workflow/js/parse_copilot_log.cjs b/pkg/workflow/js/parse_copilot_log.cjs new file mode 100644 index 0000000000..6976552f8f --- /dev/null +++ b/pkg/workflow/js/parse_copilot_log.cjs @@ -0,0 +1,692 @@ +// @ts-check +/// + +const { runLogParser } = require("./log_parser_bootstrap.cjs"); +const { generateConversationMarkdown, generateInformationSection, formatInitializationSummary, formatToolUse, parseLogEntries } = require("./log_parser_shared.cjs"); + +function main() { + runLogParser({ + parseLog: parseCopilotLog, + parserName: "Copilot", + supportsDirectories: true, + }); +} + +/** + * Extracts the premium request count from the log content using regex + * @param {string} logContent - The raw log content as a string + * @returns {number} The number of premium requests consumed (defaults to 1 if not found) + */ +function extractPremiumRequestCount(logContent) { + // Try various patterns that might appear in the Copilot CLI output + const patterns = [/premium\s+requests?\s+consumed:?\s*(\d+)/i, /(\d+)\s+premium\s+requests?\s+consumed/i, /consumed\s+(\d+)\s+premium\s+requests?/i]; + + for (const pattern of patterns) { + const match = logContent.match(pattern); + if (match && match[1]) { + const count = parseInt(match[1], 10); + if (!isNaN(count) && count > 0) { + return count; + } + } + } + + // Default to 1 if no match found + // For agentic workflows, 1 premium request is consumed per workflow run + return 1; +} + +/** + * Parses Copilot CLI log content and converts it to markdown format + * @param {string} logContent - The raw log content as a string + * @returns {string} Formatted markdown content + */ +function parseCopilotLog(logContent) { + try { + let logEntries; + + // First, try to parse as JSON array (structured format) + try { + logEntries = JSON.parse(logContent); + if (!Array.isArray(logEntries)) { + throw new Error("Not a JSON array"); + } + } catch (jsonArrayError) { + // If that fails, try to parse as debug logs format + const debugLogEntries = parseDebugLogFormat(logContent); + if (debugLogEntries && debugLogEntries.length > 0) { + logEntries = debugLogEntries; + } else { + // Try JSONL format using shared function + logEntries = parseLogEntries(logContent); + } + } + + if (!logEntries || logEntries.length === 0) { + return { markdown: "## Agent Log Summary\n\nLog format not recognized as Copilot JSON array or JSONL.\n", logEntries: [] }; + } + + // Generate conversation markdown using shared function + const conversationResult = generateConversationMarkdown(logEntries, { + formatToolCallback: (toolUse, toolResult) => formatToolUse(toolUse, toolResult, { includeDetailedParameters: true }), + formatInitCallback: initEntry => + formatInitializationSummary(initEntry, { + includeSlashCommands: false, + modelInfoCallback: entry => { + // Display premium model information if available (Copilot-specific) + if (!entry.model_info) return ""; + + const modelInfo = entry.model_info; + let markdown = ""; + + // Display model name and vendor + if (modelInfo.name) { + markdown += `**Model Name:** ${modelInfo.name}`; + if (modelInfo.vendor) { + markdown += ` (${modelInfo.vendor})`; + } + markdown += "\n\n"; + } + + // Display billing/premium information + if (modelInfo.billing) { + const billing = modelInfo.billing; + if (billing.is_premium === true) { + markdown += `**Premium Model:** Yes`; + if (billing.multiplier && billing.multiplier !== 1) { + markdown += ` (${billing.multiplier}x cost multiplier)`; + } + markdown += "\n"; + + if (billing.restricted_to && Array.isArray(billing.restricted_to) && billing.restricted_to.length > 0) { + markdown += `**Required Plans:** ${billing.restricted_to.join(", ")}\n`; + } + markdown += "\n"; + } else if (billing.is_premium === false) { + markdown += `**Premium Model:** No\n\n`; + } + } + + return markdown; + }, + }), + }); + + let markdown = conversationResult.markdown; + + // Add Information section + const lastEntry = logEntries[logEntries.length - 1]; + const initEntry = logEntries.find(entry => entry.type === "system" && entry.subtype === "init"); + + markdown += generateInformationSection(lastEntry, { + additionalInfoCallback: entry => { + // Display premium request consumption if using a premium model + const isPremiumModel = initEntry && initEntry.model_info && initEntry.model_info.billing && initEntry.model_info.billing.is_premium === true; + if (isPremiumModel) { + const premiumRequestCount = extractPremiumRequestCount(logContent); + return `**Premium Requests Consumed:** ${premiumRequestCount}\n\n`; + } + return ""; + }, + }); + + return { markdown, logEntries }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { + markdown: `## Agent Log Summary\n\nError parsing Copilot log (tried both JSON array and JSONL formats): ${errorMessage}\n`, + logEntries: [], + }; + } +} + +/** + * Scans log content for tool execution errors and builds a map of failed tools + * @param {string} logContent - Raw debug log content + * @returns {Map} Map of tool IDs/names to error status + */ +function scanForToolErrors(logContent) { + const toolErrors = new Map(); + const lines = logContent.split("\n"); + + // Track recent tool calls to associate errors with them + const recentToolCalls = []; + const MAX_RECENT_TOOLS = 10; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Look for tool_calls in data blocks (not in JSON arguments) + // Only match if it's in a choices/message context + if (line.includes('"tool_calls":') && !line.includes('\\"tool_calls\\"')) { + // Next few lines should contain tool call details + for (let j = i + 1; j < Math.min(i + 30, lines.length); j++) { + const nextLine = lines[j]; + + // Extract tool call ID + const idMatch = nextLine.match(/"id":\s*"([^"]+)"/); + // Extract function name (not arguments with escaped quotes) + const nameMatch = nextLine.match(/"name":\s*"([^"]+)"/) && !nextLine.includes('\\"name\\"'); + + if (idMatch) { + const toolId = idMatch[1]; + // Keep looking for the name + for (let k = j; k < Math.min(j + 10, lines.length); k++) { + const nameLine = lines[k]; + const funcNameMatch = nameLine.match(/"name":\s*"([^"]+)"/); + if (funcNameMatch && !nameLine.includes('\\"name\\"')) { + const toolName = funcNameMatch[1]; + recentToolCalls.unshift({ id: toolId, name: toolName }); + if (recentToolCalls.length > MAX_RECENT_TOOLS) { + recentToolCalls.pop(); + } + break; + } + } + } + } + } + + // Look for error messages + const errorMatch = line.match(/\[ERROR\].*(?:Tool execution failed|Permission denied|Resource not accessible|Error executing tool)/i); + if (errorMatch) { + // Try to extract tool name from error line + const toolNameMatch = line.match(/Tool execution failed:\s*([^\s]+)/i); + const toolIdMatch = line.match(/tool_call_id:\s*([^\s]+)/i); + + if (toolNameMatch) { + const toolName = toolNameMatch[1]; + toolErrors.set(toolName, true); + // Also mark by ID if we can find it in recent calls + const matchingTool = recentToolCalls.find(t => t.name === toolName); + if (matchingTool) { + toolErrors.set(matchingTool.id, true); + } + } else if (toolIdMatch) { + toolErrors.set(toolIdMatch[1], true); + } else if (recentToolCalls.length > 0) { + // Mark the most recent tool call as failed + const lastTool = recentToolCalls[0]; + toolErrors.set(lastTool.id, true); + toolErrors.set(lastTool.name, true); + } + } + } + + return toolErrors; +} + +/** + * Parses Copilot CLI debug log format and reconstructs the conversation flow + * @param {string} logContent - Raw debug log content + * @returns {Array} Array of log entries in structured format + */ +function parseDebugLogFormat(logContent) { + const entries = []; + const lines = logContent.split("\n"); + + // First pass: scan for tool errors + const toolErrors = scanForToolErrors(logContent); + + // Extract model information from the start + let model = "unknown"; + let sessionId = null; + let modelInfo = null; + let tools = []; + const modelMatch = logContent.match(/Starting Copilot CLI: ([\d.]+)/); + if (modelMatch) { + sessionId = `copilot-${modelMatch[1]}-${Date.now()}`; + } + + // Extract premium model info from "Got model info:" JSON block + // Look for a multi-line JSON block that starts with "Got model info: {" and ends with "}" + const gotModelInfoIndex = logContent.indexOf("[DEBUG] Got model info: {"); + if (gotModelInfoIndex !== -1) { + // Find the start of the JSON (the opening brace) + const jsonStart = logContent.indexOf("{", gotModelInfoIndex); + if (jsonStart !== -1) { + // Track braces to find the end of the JSON + let braceCount = 0; + let inString = false; + let escapeNext = false; + let jsonEnd = -1; + + for (let i = jsonStart; i < logContent.length; i++) { + const char = logContent[i]; + + if (escapeNext) { + escapeNext = false; + continue; + } + + if (char === "\\") { + escapeNext = true; + continue; + } + + if (char === '"' && !escapeNext) { + inString = !inString; + continue; + } + + if (inString) continue; + + if (char === "{") { + braceCount++; + } else if (char === "}") { + braceCount--; + if (braceCount === 0) { + jsonEnd = i + 1; + break; + } + } + } + + if (jsonEnd !== -1) { + const modelInfoJson = logContent.substring(jsonStart, jsonEnd); + try { + modelInfo = JSON.parse(modelInfoJson); + } catch (e) { + // Failed to parse model info, continue without it + } + } + } + } + + // Extract tools from "[DEBUG] Tools:" section + // The format is: [DEBUG] Tools: \n[DEBUG] [\n { "type": "function", "function": { "name": "..." } }\n] + const toolsIndex = logContent.indexOf("[DEBUG] Tools:"); + if (toolsIndex !== -1) { + // Find the start of the JSON array - look for a line that starts with [DEBUG] [ + // Skip past the "Tools:" line first + const afterToolsLine = logContent.indexOf("\n", toolsIndex); + let toolsStart = logContent.indexOf("[DEBUG] [", afterToolsLine); + if (toolsStart !== -1) { + // Find the actual '[' character after '[DEBUG] ' + toolsStart = logContent.indexOf("[", toolsStart + 7); // Skip '[DEBUG] ' which is 8 chars + } + if (toolsStart !== -1) { + // Track brackets to find the end of the JSON array + let bracketCount = 0; + let inString = false; + let escapeNext = false; + let toolsEnd = -1; + + for (let i = toolsStart; i < logContent.length; i++) { + const char = logContent[i]; + + if (escapeNext) { + escapeNext = false; + continue; + } + + if (char === "\\") { + escapeNext = true; + continue; + } + + if (char === '"' && !escapeNext) { + inString = !inString; + continue; + } + + if (inString) continue; + + if (char === "[") { + bracketCount++; + } else if (char === "]") { + bracketCount--; + if (bracketCount === 0) { + toolsEnd = i + 1; + break; + } + } + } + + if (toolsEnd !== -1) { + // Remove [DEBUG] prefixes from each line in the JSON + let toolsJson = logContent.substring(toolsStart, toolsEnd); + toolsJson = toolsJson.replace(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z \[DEBUG\] /gm, ""); + + try { + const toolsArray = JSON.parse(toolsJson); + // Extract tool names from the OpenAI function format + // Format: [{ "type": "function", "function": { "name": "bash", ... } }, ...] + if (Array.isArray(toolsArray)) { + tools = toolsArray + .map(tool => { + if (tool.type === "function" && tool.function && tool.function.name) { + // Convert github-* names to mcp__github__* format for consistency + let name = tool.function.name; + if (name.startsWith("github-")) { + name = "mcp__github__" + name.substring(7); + } else if (name.startsWith("safe_outputs-")) { + name = name; // Keep safe_outputs names as-is + } + return name; + } + return null; + }) + .filter(name => name !== null); + } + } catch (e) { + // Failed to parse tools, continue without them + } + } + } + } + + // Find all JSON response blocks in the debug logs + let inDataBlock = false; + let currentJsonLines = []; + let turnCount = 0; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Detect start of a JSON data block + if (line.includes("[DEBUG] data:")) { + inDataBlock = true; + currentJsonLines = []; + continue; + } + + // While in a data block, accumulate lines + if (inDataBlock) { + // Check if this line starts with timestamp + const hasTimestamp = line.match(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z /); + + if (hasTimestamp) { + // Strip the timestamp and [DEBUG] prefix to see what remains + const cleanLine = line.replace(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z \[DEBUG\] /, ""); + + // If after stripping, the line starts with JSON characters, it's part of JSON + // Otherwise, it's a new log entry and we should end the block + const isJsonContent = /^[{\[}\]"]/.test(cleanLine) || cleanLine.trim().startsWith('"'); + + if (!isJsonContent) { + // This is a new log line (not JSON content) - end of JSON block, process what we have + if (currentJsonLines.length > 0) { + try { + const jsonStr = currentJsonLines.join("\n"); + const jsonData = JSON.parse(jsonStr); + + // Extract model info + if (jsonData.model) { + model = jsonData.model; + } + + // Process the choices in the response + if (jsonData.choices && Array.isArray(jsonData.choices)) { + for (const choice of jsonData.choices) { + if (choice.message) { + const message = choice.message; + + // Create an assistant entry + const content = []; + const toolResults = []; // Collect tool calls to create synthetic results (debug logs don't include actual results) + + if (message.content && message.content.trim()) { + content.push({ + type: "text", + text: message.content, + }); + } + + if (message.tool_calls && Array.isArray(message.tool_calls)) { + for (const toolCall of message.tool_calls) { + if (toolCall.function) { + let toolName = toolCall.function.name; + const originalToolName = toolName; // Keep original for error matching + const toolId = toolCall.id || `tool_${Date.now()}_${Math.random()}`; + let args = {}; + + // Parse tool name (handle github- prefix and bash) + if (toolName.startsWith("github-")) { + toolName = "mcp__github__" + toolName.substring(7); + } else if (toolName === "bash") { + toolName = "Bash"; + } + + // Parse arguments + try { + args = JSON.parse(toolCall.function.arguments); + } catch (e) { + args = {}; + } + + content.push({ + type: "tool_use", + id: toolId, + name: toolName, + input: args, + }); + + // Check if this tool had an error (by ID or by name) + const hasError = toolErrors.has(toolId) || toolErrors.has(originalToolName); + + // Create a corresponding tool result + toolResults.push({ + type: "tool_result", + tool_use_id: toolId, + content: hasError ? "Permission denied or tool execution failed" : "", // Set error message if failed + is_error: hasError, // Mark as error if we detected failure + }); + } + } + } + + if (content.length > 0) { + entries.push({ + type: "assistant", + message: { content }, + }); + turnCount++; + + // Add tool results as a user message if we have any + if (toolResults.length > 0) { + entries.push({ + type: "user", + message: { content: toolResults }, + }); + } + } + } + } + + // Accumulate usage/result entry from each response + if (jsonData.usage) { + // Initialize accumulator if needed + if (!entries._accumulatedUsage) { + entries._accumulatedUsage = { + input_tokens: 0, + output_tokens: 0, + }; + } + + // Accumulate token counts from this response + // OpenAI uses prompt_tokens/completion_tokens, normalize to input_tokens/output_tokens + if (jsonData.usage.prompt_tokens) { + entries._accumulatedUsage.input_tokens += jsonData.usage.prompt_tokens; + } + if (jsonData.usage.completion_tokens) { + entries._accumulatedUsage.output_tokens += jsonData.usage.completion_tokens; + } + + // Store result entry with accumulated usage + entries._lastResult = { + type: "result", + num_turns: turnCount, + usage: entries._accumulatedUsage, + }; + } + } + } catch (e) { + // Skip invalid JSON blocks + } + } + + inDataBlock = false; + currentJsonLines = []; + continue; // Don't add this line to JSON + } else if (hasTimestamp && isJsonContent) { + // This line has a timestamp but is JSON content - strip prefix and add + currentJsonLines.push(cleanLine); + } + } else { + // This line is part of the JSON - add it (remove [DEBUG] prefix if present) + const cleanLine = line.replace(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z \[DEBUG\] /, ""); + currentJsonLines.push(cleanLine); + } + } + } + + // Process any remaining JSON block at the end of file + if (inDataBlock && currentJsonLines.length > 0) { + try { + const jsonStr = currentJsonLines.join("\n"); + const jsonData = JSON.parse(jsonStr); + + if (jsonData.model) { + model = jsonData.model; + } + + if (jsonData.choices && Array.isArray(jsonData.choices)) { + for (const choice of jsonData.choices) { + if (choice.message) { + const message = choice.message; + const content = []; + const toolResults = []; // Collect tool calls to create synthetic results (debug logs don't include actual results) + + if (message.content && message.content.trim()) { + content.push({ + type: "text", + text: message.content, + }); + } + + if (message.tool_calls && Array.isArray(message.tool_calls)) { + for (const toolCall of message.tool_calls) { + if (toolCall.function) { + let toolName = toolCall.function.name; + const originalToolName = toolName; + const toolId = toolCall.id || `tool_${Date.now()}_${Math.random()}`; + let args = {}; + + if (toolName.startsWith("github-")) { + toolName = "mcp__github__" + toolName.substring(7); + } else if (toolName === "bash") { + toolName = "Bash"; + } + + try { + args = JSON.parse(toolCall.function.arguments); + } catch (e) { + args = {}; + } + + content.push({ + type: "tool_use", + id: toolId, + name: toolName, + input: args, + }); + + // Check if this tool had an error (by ID or by name) + const hasError = toolErrors.has(toolId) || toolErrors.has(originalToolName); + + // Create a corresponding tool result + toolResults.push({ + type: "tool_result", + tool_use_id: toolId, + content: hasError ? "Permission denied or tool execution failed" : "", + is_error: hasError, + }); + } + } + } + + if (content.length > 0) { + entries.push({ + type: "assistant", + message: { content }, + }); + turnCount++; + + // Add tool results as a user message if we have any + if (toolResults.length > 0) { + entries.push({ + type: "user", + message: { content: toolResults }, + }); + } + } + } + } + + if (jsonData.usage) { + // Initialize accumulator if needed + if (!entries._accumulatedUsage) { + entries._accumulatedUsage = { + input_tokens: 0, + output_tokens: 0, + }; + } + + // Accumulate token counts from this response + // OpenAI uses prompt_tokens/completion_tokens, normalize to input_tokens/output_tokens + if (jsonData.usage.prompt_tokens) { + entries._accumulatedUsage.input_tokens += jsonData.usage.prompt_tokens; + } + if (jsonData.usage.completion_tokens) { + entries._accumulatedUsage.output_tokens += jsonData.usage.completion_tokens; + } + + // Store result entry with accumulated usage + entries._lastResult = { + type: "result", + num_turns: turnCount, + usage: entries._accumulatedUsage, + }; + } + } + } catch (e) { + // Skip invalid JSON + } + } + + // Add system init entry at the beginning if we have entries + if (entries.length > 0) { + const initEntry = { + type: "system", + subtype: "init", + session_id: sessionId, + model: model, + tools: tools, // Tools extracted from [DEBUG] Tools: section + }; + + // Add model info if available + if (modelInfo) { + initEntry.model_info = modelInfo; + } + + entries.unshift(initEntry); + + // Add the final result entry if we have it + if (entries._lastResult) { + entries.push(entries._lastResult); + delete entries._lastResult; + } + } + + return entries; +} + +// Export for testing +if (typeof module !== "undefined" && module.exports) { + module.exports = { + main, + parseCopilotLog, + extractPremiumRequestCount, + }; +} diff --git a/pkg/workflow/js/parse_firewall_logs.cjs b/pkg/workflow/js/parse_firewall_logs.cjs new file mode 100644 index 0000000000..c41ac9538f --- /dev/null +++ b/pkg/workflow/js/parse_firewall_logs.cjs @@ -0,0 +1,220 @@ +// @ts-check +/// + +/** + * Parses firewall logs and creates a step summary + * Firewall log format: timestamp client_ip:port domain dest_ip:port proto method status decision url user_agent + */ + +const { sanitizeWorkflowName } = require("./sanitize_workflow_name.cjs"); + +function main() { + const fs = require("fs"); + const path = require("path"); + + try { + // Get the firewall logs directory path - awf writes logs to /tmp/gh-aw/sandbox/firewall/logs + const squidLogsDir = `/tmp/gh-aw/sandbox/firewall/logs/`; + + if (!fs.existsSync(squidLogsDir)) { + core.info(`No firewall logs directory found at: ${squidLogsDir}`); + return; + } + + // Find all access.log files + const files = fs.readdirSync(squidLogsDir).filter(file => file.endsWith(".log")); + + if (files.length === 0) { + core.info(`No firewall log files found in: ${squidLogsDir}`); + return; + } + + core.info(`Found ${files.length} firewall log file(s)`); + + // Parse all log files and aggregate results + let totalRequests = 0; + let allowedRequests = 0; + let deniedRequests = 0; + const allowedDomains = new Set(); + const deniedDomains = new Set(); + const requestsByDomain = new Map(); + + for (const file of files) { + const filePath = path.join(squidLogsDir, file); + core.info(`Parsing firewall log: ${file}`); + + const content = fs.readFileSync(filePath, "utf8"); + const lines = content.split("\n").filter(line => line.trim()); + + for (const line of lines) { + const entry = parseFirewallLogLine(line); + if (!entry) { + continue; + } + + totalRequests++; + + // Determine if request was allowed or denied + const isAllowed = isRequestAllowed(entry.decision, entry.status); + + if (isAllowed) { + allowedRequests++; + allowedDomains.add(entry.domain); + } else { + deniedRequests++; + deniedDomains.add(entry.domain); + } + + // Track request count per domain + if (!requestsByDomain.has(entry.domain)) { + requestsByDomain.set(entry.domain, { allowed: 0, denied: 0 }); + } + const domainStats = requestsByDomain.get(entry.domain); + if (isAllowed) { + domainStats.allowed++; + } else { + domainStats.denied++; + } + } + } + + // Generate step summary + const summary = generateFirewallSummary({ + totalRequests, + allowedRequests, + deniedRequests, + allowedDomains: Array.from(allowedDomains).sort(), + deniedDomains: Array.from(deniedDomains).sort(), + requestsByDomain, + }); + + core.summary.addRaw(summary).write(); + core.info("Firewall log summary generated successfully"); + } catch (error) { + core.setFailed(error instanceof Error ? error : String(error)); + } +} + +/** + * Parses a single firewall log line + * Format: timestamp client_ip:port domain dest_ip:port proto method status decision url user_agent + * @param {string} line - Log line to parse + * @returns {object|null} Parsed entry or null if invalid + */ +function parseFirewallLogLine(line) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith("#")) { + return null; + } + + // Split by whitespace but preserve quoted strings + const fields = trimmed.match(/(?:[^\s"]+|"[^"]*")+/g); + if (!fields || fields.length < 10) { + return null; + } + + // Only validate timestamp (essential for log format detection) + const timestamp = fields[0]; + if (!/^\d+(\.\d+)?$/.test(timestamp)) { + return null; + } + + return { + timestamp, + clientIpPort: fields[1], + domain: fields[2], + destIpPort: fields[3], + proto: fields[4], + method: fields[5], + status: fields[6], + decision: fields[7], + url: fields[8], + userAgent: fields[9]?.replace(/^"|"$/g, "") || "-", + }; +} + +/** + * Determines if a request was allowed based on decision and status + * @param {string} decision - Decision field (e.g., TCP_TUNNEL:HIER_DIRECT, NONE_NONE:HIER_NONE) + * @param {string} status - Status code (e.g., 200, 403, 0) + * @returns {boolean} True if request was allowed + */ +function isRequestAllowed(decision, status) { + // Check status code first + const statusCode = parseInt(status, 10); + if (statusCode === 200 || statusCode === 206 || statusCode === 304) { + return true; + } + + // Check decision field + if (decision.includes("TCP_TUNNEL") || decision.includes("TCP_HIT") || decision.includes("TCP_MISS")) { + return true; + } + + if (decision.includes("NONE_NONE") || decision.includes("TCP_DENIED") || statusCode === 403 || statusCode === 407) { + return false; + } + + // Default to denied for safety + return false; +} + +/** + * Generates markdown summary from firewall log analysis + * Uses details/summary structure with basic stats in summary and domain table in details + * @param {object} analysis - Analysis results + * @returns {string} Markdown formatted summary + */ +function generateFirewallSummary(analysis) { + const { totalRequests, requestsByDomain } = analysis; + + // Filter out invalid domains (placeholder "-" values) + const validDomains = Array.from(requestsByDomain.keys()) + .filter(domain => domain !== "-") + .sort(); + const uniqueDomainCount = validDomains.length; + + // Calculate valid allowed and denied requests in a single pass + let validAllowedRequests = 0; + let validDeniedRequests = 0; + for (const domain of validDomains) { + const stats = requestsByDomain.get(domain); + validAllowedRequests += stats.allowed; + validDeniedRequests += stats.denied; + } + + let summary = ""; + + // Wrap entire summary in details/summary tags + summary += "
\n"; + summary += `sandbox agent: ${totalRequests} request${totalRequests !== 1 ? "s" : ""} | `; + summary += `${validAllowedRequests} allowed | `; + summary += `${validDeniedRequests} blocked | `; + summary += `${uniqueDomainCount} unique domain${uniqueDomainCount !== 1 ? "s" : ""}\n\n`; + + if (uniqueDomainCount > 0) { + summary += "| Domain | Allowed | Denied |\n"; + summary += "|--------|---------|--------|\n"; + + for (const domain of validDomains) { + const stats = requestsByDomain.get(domain); + summary += `| ${domain} | ${stats.allowed} | ${stats.denied} |\n`; + } + } else { + summary += "No firewall activity detected.\n"; + } + + summary += "\n
\n\n"; + + return summary; +} + +// Export for testing +if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseFirewallLogLine, + isRequestAllowed, + generateFirewallSummary, + main, + }; +} diff --git a/pkg/workflow/js/push_repo_memory.cjs b/pkg/workflow/js/push_repo_memory.cjs new file mode 100644 index 0000000000..cd86a77c5f --- /dev/null +++ b/pkg/workflow/js/push_repo_memory.cjs @@ -0,0 +1,243 @@ +// @ts-check +/// + +const fs = require("fs"); +const path = require("path"); +const { execSync } = require("child_process"); + +/** + * Push repo-memory changes to git branch + * Environment variables: + * ARTIFACT_DIR: Path to the downloaded artifact directory containing memory files + * MEMORY_ID: Memory identifier (used for subdirectory path) + * TARGET_REPO: Target repository (owner/name) + * BRANCH_NAME: Branch name to push to + * MAX_FILE_SIZE: Maximum file size in bytes + * MAX_FILE_COUNT: Maximum number of files per commit + * FILE_GLOB_FILTER: Optional space-separated list of file patterns (e.g., "*.md metrics/ ** /*") + * Supports * (matches any chars except /) and ** (matches any chars including /) + * GH_TOKEN: GitHub token for authentication + * GITHUB_RUN_ID: Workflow run ID for commit messages + */ + +async function main() { + const artifactDir = process.env.ARTIFACT_DIR; + const memoryId = process.env.MEMORY_ID; + const targetRepo = process.env.TARGET_REPO; + const branchName = process.env.BRANCH_NAME; + const maxFileSize = parseInt(process.env.MAX_FILE_SIZE || "10240", 10); + const maxFileCount = parseInt(process.env.MAX_FILE_COUNT || "100", 10); + const fileGlobFilter = process.env.FILE_GLOB_FILTER || ""; + const ghToken = process.env.GH_TOKEN; + const githubRunId = process.env.GITHUB_RUN_ID || "unknown"; + + // Validate required environment variables + if (!artifactDir || !memoryId || !targetRepo || !branchName || !ghToken) { + core.setFailed("Missing required environment variables: ARTIFACT_DIR, MEMORY_ID, TARGET_REPO, BRANCH_NAME, GH_TOKEN"); + return; + } + + // Source directory with memory files (artifact location) + const sourceMemoryPath = path.join(artifactDir, "memory", memoryId); + + // Check if artifact memory directory exists + if (!fs.existsSync(sourceMemoryPath)) { + core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); + return; + } + + // We're already in the checked out repository (from checkout step) + const workspaceDir = process.env.GITHUB_WORKSPACE || process.cwd(); + core.info(`Working in repository: ${workspaceDir}`); + + // Disable sparse checkout to work with full branch content + // This is necessary because checkout was configured with sparse-checkout + core.info(`Disabling sparse checkout...`); + try { + execSync("git sparse-checkout disable", { stdio: "pipe" }); + } catch (error) { + // Ignore if sparse checkout wasn't enabled + core.info("Sparse checkout was not enabled or already disabled"); + } + + // Checkout or create the memory branch + core.info(`Checking out branch: ${branchName}...`); + try { + const repoUrl = `https://x-access-token:${ghToken}@github.com/${targetRepo}.git`; + + // Try to fetch the branch + try { + execSync(`git fetch "${repoUrl}" "${branchName}:${branchName}"`, { stdio: "pipe" }); + execSync(`git checkout "${branchName}"`, { stdio: "inherit" }); + core.info(`Checked out existing branch: ${branchName}`); + } catch (fetchError) { + // Branch doesn't exist, create orphan branch + core.info(`Branch ${branchName} does not exist, creating orphan branch...`); + execSync(`git checkout --orphan "${branchName}"`, { stdio: "inherit" }); + execSync("git rm -rf . || true", { stdio: "pipe" }); + core.info(`Created orphan branch: ${branchName}`); + } + } catch (error) { + core.setFailed(`Failed to checkout branch: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + // Create destination directory in repo + const destMemoryPath = path.join(workspaceDir, "memory", memoryId); + fs.mkdirSync(destMemoryPath, { recursive: true }); + core.info(`Destination directory: ${destMemoryPath}`); + + // Recursively scan and collect files from artifact directory + let filesToCopy = []; + + /** + * Recursively scan directory and collect files + * @param {string} dirPath - Directory to scan + * @param {string} relativePath - Relative path from sourceMemoryPath (for nested files) + */ + function scanDirectory(dirPath, relativePath = "") { + const entries = fs.readdirSync(dirPath, { withFileTypes: true }); + + for (const entry of entries) { + const fullPath = path.join(dirPath, entry.name); + const relativeFilePath = relativePath ? path.join(relativePath, entry.name) : entry.name; + + if (entry.isDirectory()) { + // Recursively scan subdirectory + scanDirectory(fullPath, relativeFilePath); + } else if (entry.isFile()) { + const stats = fs.statSync(fullPath); + + // Validate file name patterns if filter is set + if (fileGlobFilter) { + const patterns = fileGlobFilter.split(/\s+/).map(pattern => { + // Convert glob pattern to regex that supports directory wildcards + // ** matches any path segment (including /) + // * matches any characters except / + let regexPattern = pattern + .replace(/\\/g, "\\\\") // Escape backslashes + .replace(/\./g, "\\.") // Escape dots + .replace(/\*\*/g, "") // Temporarily replace ** + .replace(/\*/g, "[^/]*") // Single * matches non-slash chars + .replace(//g, ".*"); // ** matches everything including / + return new RegExp(`^${regexPattern}$`); + }); + + if (!patterns.some(pattern => pattern.test(relativeFilePath))) { + core.error(`File does not match allowed patterns: ${relativeFilePath}`); + core.error(`Allowed patterns: ${fileGlobFilter}`); + core.setFailed("File pattern validation failed"); + throw new Error("File pattern validation failed"); + } + } + + // Validate file size + if (stats.size > maxFileSize) { + core.error(`File exceeds size limit: ${relativeFilePath} (${stats.size} bytes > ${maxFileSize} bytes)`); + core.setFailed("File size validation failed"); + throw new Error("File size validation failed"); + } + + filesToCopy.push({ + relativePath: relativeFilePath, + source: fullPath, + size: stats.size, + }); + } + } + } + + try { + scanDirectory(sourceMemoryPath); + } catch (error) { + core.setFailed(`Failed to scan artifact directory: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + // Validate file count + if (filesToCopy.length > maxFileCount) { + core.setFailed(`Too many files (${filesToCopy.length} > ${maxFileCount})`); + return; + } + + if (filesToCopy.length === 0) { + core.info("No files to copy from artifact"); + return; + } + + core.info(`Copying ${filesToCopy.length} validated file(s)...`); + + // Copy files to destination (preserving directory structure) + for (const file of filesToCopy) { + const destFilePath = path.join(destMemoryPath, file.relativePath); + const destDir = path.dirname(destFilePath); + + try { + // Ensure destination directory exists + fs.mkdirSync(destDir, { recursive: true }); + + // Copy file + fs.copyFileSync(file.source, destFilePath); + core.info(`Copied: ${file.relativePath} (${file.size} bytes)`); + } catch (error) { + core.setFailed(`Failed to copy file ${file.relativePath}: ${error instanceof Error ? error.message : String(error)}`); + return; + } + } + + // Check if we have any changes to commit + let hasChanges = false; + try { + const status = execSync("git status --porcelain", { encoding: "utf8" }); + hasChanges = status.trim().length > 0; + } catch (error) { + core.setFailed(`Failed to check git status: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (!hasChanges) { + core.info("No changes detected after copying files"); + return; + } + + core.info("Changes detected, committing and pushing..."); + + // Stage all changes + try { + execSync("git add .", { stdio: "inherit" }); + } catch (error) { + core.setFailed(`Failed to stage changes: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + // Commit changes + try { + execSync(`git commit -m "Update repo memory from workflow run ${githubRunId}"`, { stdio: "inherit" }); + } catch (error) { + core.setFailed(`Failed to commit changes: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + // Pull with merge strategy (ours wins on conflicts) + core.info(`Pulling latest changes from ${branchName}...`); + try { + const repoUrl = `https://x-access-token:${ghToken}@github.com/${targetRepo}.git`; + execSync(`git pull --no-rebase -X ours "${repoUrl}" "${branchName}"`, { stdio: "inherit" }); + } catch (error) { + // Pull might fail if branch doesn't exist yet or on conflicts - this is acceptable + core.warning(`Pull failed (this may be expected): ${error instanceof Error ? error.message : String(error)}`); + } + + // Push changes + core.info(`Pushing changes to ${branchName}...`); + try { + const repoUrl = `https://x-access-token:${ghToken}@github.com/${targetRepo}.git`; + execSync(`git push "${repoUrl}" HEAD:"${branchName}"`, { stdio: "inherit" }); + core.info(`Successfully pushed changes to ${branchName} branch`); + } catch (error) { + core.setFailed(`Failed to push changes: ${error instanceof Error ? error.message : String(error)}`); + return; + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/push_to_pull_request_branch.cjs b/pkg/workflow/js/push_to_pull_request_branch.cjs new file mode 100644 index 0000000000..25ffcc183a --- /dev/null +++ b/pkg/workflow/js/push_to_pull_request_branch.cjs @@ -0,0 +1,425 @@ +// @ts-check +/// + +/** @type {typeof import("fs")} */ +const fs = require("fs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { updateActivationCommentWithCommit } = require("./update_activation_comment.cjs"); + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + // Environment validation - fail early if required variables are missing + const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT || ""; + if (agentOutputFile.trim() === "") { + core.info("Agent output content is empty"); + return; + } + + // Read agent output from file + let outputContent; + try { + outputContent = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + core.setFailed(`Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return; + } + + const target = process.env.GH_AW_PUSH_TARGET || "triggering"; + const ifNoChanges = process.env.GH_AW_PUSH_IF_NO_CHANGES || "warn"; + + // Check if patch file exists and has valid content + if (!fs.existsSync("/tmp/gh-aw/aw.patch")) { + const message = "No patch file found - cannot push without changes"; + + switch (ifNoChanges) { + case "error": + core.setFailed(message); + return; + case "ignore": + // Silent success - no console output + return; + case "warn": + default: + core.info(message); + return; + } + } + + const patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + + // Check for actual error conditions (but allow empty patches as valid noop) + if (patchContent.includes("Failed to generate patch")) { + const message = "Patch file contains error message - cannot push without changes"; + + // Log diagnostic information to help with troubleshooting + core.error("Patch file generation failed - this is an error condition that requires investigation"); + core.error(`Patch file location: /tmp/gh-aw/aw.patch`); + core.error(`Patch file size: ${Buffer.byteLength(patchContent, "utf8")} bytes`); + + // Show first 500 characters of patch content for diagnostics + const previewLength = Math.min(500, patchContent.length); + core.error(`Patch file preview (first ${previewLength} characters):`); + core.error(patchContent.substring(0, previewLength)); + + // This is always a failure regardless of if-no-changes configuration + // because the patch file contains an error message from the patch generation process + core.setFailed(message); + return; + } + + // Validate patch size (unless empty) + const isEmpty = !patchContent || !patchContent.trim(); + if (!isEmpty) { + // Get maximum patch size from environment (default: 1MB = 1024 KB) + const maxSizeKb = parseInt(process.env.GH_AW_MAX_PATCH_SIZE || "1024", 10); + const patchSizeBytes = Buffer.byteLength(patchContent, "utf8"); + const patchSizeKb = Math.ceil(patchSizeBytes / 1024); + + core.info(`Patch size: ${patchSizeKb} KB (maximum allowed: ${maxSizeKb} KB)`); + + if (patchSizeKb > maxSizeKb) { + const message = `Patch size (${patchSizeKb} KB) exceeds maximum allowed size (${maxSizeKb} KB)`; + core.setFailed(message); + return; + } + + core.info("Patch size validation passed"); + } + if (isEmpty) { + const message = "Patch file is empty - no changes to apply (noop operation)"; + + switch (ifNoChanges) { + case "error": + core.setFailed("No changes to push - failing as configured by if-no-changes: error"); + return; + case "ignore": + // Silent success - no console output + break; + case "warn": + default: + core.info(message); + break; + } + } + + core.info(`Agent output content length: ${outputContent.length}`); + if (!isEmpty) { + core.info("Patch content validation passed"); + } + core.info(`Target configuration: ${target}`); + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + core.setFailed(`Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + return; + } + + // Find the push-to-pull-request-branch item + const pushItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === "push_to_pull_request_branch"); + if (!pushItem) { + core.info("No push-to-pull-request-branch item found in agent output"); + return; + } + + core.info("Found push-to-pull-request-branch item"); + + // If in staged mode, emit step summary instead of pushing changes + if (isStaged) { + await generateStagedPreview({ + title: "Push to PR Branch", + description: "The following changes would be pushed if staged mode was disabled:", + items: [{ target, commit_message: pushItem.commit_message }], + renderItem: item => { + let content = ""; + content += `**Target:** ${item.target}\n\n`; + + if (item.commit_message) { + content += `**Commit Message:** ${item.commit_message}\n\n`; + } + + if (fs.existsSync("/tmp/gh-aw/aw.patch")) { + const patchStats = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + if (patchStats.trim()) { + content += `**Changes:** Patch file exists with ${patchStats.split("\n").length} lines\n\n`; + content += `
Show patch preview\n\n\`\`\`diff\n${patchStats.slice(0, 2000)}${patchStats.length > 2000 ? "\n... (truncated)" : ""}\n\`\`\`\n\n
\n\n`; + } else { + content += `**Changes:** No changes (empty patch)\n\n`; + } + } + return content; + }, + }); + return; + } + + // Validate target configuration for pull request context + if (target !== "*" && target !== "triggering") { + // If target is a specific number, validate it's a valid pull request number + const pullNumber = parseInt(target, 10); + if (isNaN(pullNumber)) { + core.setFailed('Invalid target configuration: must be "triggering", "*", or a valid pull request number'); + return; + } + } + + // Compute the target branch name based on target configuration + let pullNumber; + if (target === "triggering") { + // Use the number of the triggering pull request + pullNumber = context.payload?.pull_request?.number || context.payload?.issue?.number; + + // Check if we're in a pull request context when required + if (!pullNumber) { + core.setFailed('push-to-pull-request-branch with target "triggering" requires pull request context'); + return; + } + } else if (target === "*") { + if (pushItem.pull_number) { + pullNumber = parseInt(pushItem.pull_number, 10); + } + } else { + // Target is a specific pull request number + pullNumber = parseInt(target, 10); + } + let branchName; + let prTitle = ""; + let prLabels = []; + + // Validate pull number is defined before fetching + if (!pullNumber) { + core.setFailed("Pull request number is required but not found"); + return; + } + + // Fetch the specific PR to get its head branch, title, and labels + try { + const { data: pullRequest } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pullNumber, + }); + branchName = pullRequest.head.ref; + prTitle = pullRequest.title || ""; + prLabels = pullRequest.labels.map(label => label.name); + } catch (error) { + core.info(`Warning: Could not fetch PR ${pullNumber} details: ${error instanceof Error ? error.message : String(error)}`); + // Exit with failure if we cannot determine the branch name + core.setFailed(`Failed to determine branch name for PR ${pullNumber}`); + return; + } + + core.info(`Target branch: ${branchName}`); + core.info(`PR title: ${prTitle}`); + core.info(`PR labels: ${prLabels.join(", ")}`); + + // Validate title prefix if specified + const titlePrefix = process.env.GH_AW_PR_TITLE_PREFIX; + if (titlePrefix && !prTitle.startsWith(titlePrefix)) { + core.setFailed(`Pull request title "${prTitle}" does not start with required prefix "${titlePrefix}"`); + return; + } + + // Validate labels if specified + const requiredLabelsStr = process.env.GH_AW_PR_LABELS; + if (requiredLabelsStr) { + const requiredLabels = requiredLabelsStr.split(",").map(label => label.trim()); + const missingLabels = requiredLabels.filter(label => !prLabels.includes(label)); + if (missingLabels.length > 0) { + core.setFailed(`Pull request is missing required labels: ${missingLabels.join(", ")}. Current labels: ${prLabels.join(", ")}`); + return; + } + } + + if (titlePrefix) { + core.info(`✓ Title prefix validation passed: "${titlePrefix}"`); + } + if (requiredLabelsStr) { + core.info(`✓ Labels validation passed: ${requiredLabelsStr}`); + } + + // Check if patch has actual changes (not just empty) + const hasChanges = !isEmpty; + + // Switch to or create the target branch + core.info(`Switching to branch: ${branchName}`); + + // Fetch the specific target branch from origin (since we use shallow checkout) + try { + core.info(`Fetching branch: ${branchName}`); + await exec.exec(`git fetch origin ${branchName}:refs/remotes/origin/${branchName}`); + } catch (fetchError) { + core.setFailed(`Failed to fetch branch ${branchName}: ${fetchError instanceof Error ? fetchError.message : String(fetchError)}`); + return; + } + + // Check if branch exists on origin + try { + await exec.exec(`git rev-parse --verify origin/${branchName}`); + } catch (verifyError) { + core.setFailed(`Branch ${branchName} does not exist on origin, can't push to it: ${verifyError instanceof Error ? verifyError.message : String(verifyError)}`); + return; + } + + // Checkout the branch from origin + try { + await exec.exec(`git checkout -B ${branchName} origin/${branchName}`); + core.info(`Checked out existing branch from origin: ${branchName}`); + } catch (checkoutError) { + core.setFailed(`Failed to checkout branch ${branchName}: ${checkoutError instanceof Error ? checkoutError.message : String(checkoutError)}`); + return; + } + + // Apply the patch using git CLI (skip if empty) + if (!isEmpty) { + core.info("Applying patch..."); + try { + // Check if commit title suffix is configured + const commitTitleSuffix = process.env.GH_AW_COMMIT_TITLE_SUFFIX; + + if (commitTitleSuffix) { + core.info(`Appending commit title suffix: "${commitTitleSuffix}"`); + + // Read the patch file + let patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + + // Modify Subject lines in the patch to append the suffix + // Patch format has "Subject: [PATCH] " or "Subject: " + // Append the suffix at the end of the title to avoid git am stripping brackets + patchContent = patchContent.replace(/^Subject: (?:\[PATCH\] )?(.*)$/gm, (match, title) => `Subject: [PATCH] ${title}${commitTitleSuffix}`); + + // Write the modified patch back + fs.writeFileSync("/tmp/gh-aw/aw.patch", patchContent, "utf8"); + core.info(`Patch modified with commit title suffix: "${commitTitleSuffix}"`); + } + + // Log first 100 lines of patch for debugging + const finalPatchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8"); + const patchLines = finalPatchContent.split("\n"); + const previewLineCount = Math.min(100, patchLines.length); + core.info(`Patch preview (first ${previewLineCount} of ${patchLines.length} lines):`); + for (let i = 0; i < previewLineCount; i++) { + core.info(patchLines[i]); + } + + // Patches are created with git format-patch, so use git am to apply them + await exec.exec("git am /tmp/gh-aw/aw.patch"); + core.info("Patch applied successfully"); + + // Push the applied commits to the branch + await exec.exec(`git push origin ${branchName}`); + core.info(`Changes committed and pushed to branch: ${branchName}`); + } catch (error) { + core.error(`Failed to apply patch: ${error instanceof Error ? error.message : String(error)}`); + + // Investigate why the patch failed by logging git status and the failed patch + try { + core.info("Investigating patch failure..."); + + // Log git status to see the current state + const statusResult = await exec.getExecOutput("git", ["status"]); + core.info("Git status output:"); + core.info(statusResult.stdout); + + // Log recent commits for context + const logResult = await exec.getExecOutput("git", ["log", "--oneline", "-5"]); + core.info("Recent commits (last 5):"); + core.info(logResult.stdout); + + // Log uncommitted changes + const diffResult = await exec.getExecOutput("git", ["diff", "HEAD"]); + core.info("Uncommitted changes:"); + core.info(diffResult.stdout && diffResult.stdout.trim() ? diffResult.stdout : "(no uncommitted changes)"); + + // Log the failed patch diff + const patchDiffResult = await exec.getExecOutput("git", ["am", "--show-current-patch=diff"]); + core.info("Failed patch diff:"); + core.info(patchDiffResult.stdout); + + // Log the full failed patch for complete context + const patchFullResult = await exec.getExecOutput("git", ["am", "--show-current-patch"]); + core.info("Failed patch (full):"); + core.info(patchFullResult.stdout); + } catch (investigateError) { + core.warning(`Failed to investigate patch failure: ${investigateError instanceof Error ? investigateError.message : String(investigateError)}`); + } + + core.setFailed("Failed to apply patch"); + return; + } + } else { + core.info("Skipping patch application (empty patch)"); + + // Handle if-no-changes configuration for empty patches + const message = "No changes to apply - noop operation completed successfully"; + + switch (ifNoChanges) { + case "error": + core.setFailed("No changes to apply - failing as configured by if-no-changes: error"); + return; + case "ignore": + // Silent success - no console output + break; + case "warn": + default: + core.info(message); + break; + } + } + + // Get commit SHA and push URL + const commitShaRes = await exec.getExecOutput("git", ["rev-parse", "HEAD"]); + if (commitShaRes.exitCode !== 0) throw new Error("Failed to get commit SHA"); + const commitSha = commitShaRes.stdout.trim(); + + // Get repository base URL and construct URLs + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const repoUrl = context.payload.repository ? context.payload.repository.html_url : `${githubServer}/${context.repo.owner}/${context.repo.repo}`; + const pushUrl = `${repoUrl}/tree/${branchName}`; + const commitUrl = `${repoUrl}/commit/${commitSha}`; + + // Set outputs + core.setOutput("branch_name", branchName); + core.setOutput("commit_sha", commitSha); + core.setOutput("push_url", pushUrl); + core.setOutput("commit_url", commitUrl); + + // Update the activation comment with commit link (if a comment was created and changes were pushed) + if (hasChanges) { + await updateActivationCommentWithCommit(github, context, core, commitSha, commitUrl); + } + + // Write summary to GitHub Actions summary + const summaryTitle = hasChanges ? "Push to Branch" : "Push to Branch (No Changes)"; + const summaryContent = hasChanges + ? ` +## ${summaryTitle} +- **Branch**: \`${branchName}\` +- **Commit**: [${commitSha.substring(0, 7)}](${commitUrl}) +- **URL**: [${pushUrl}](${pushUrl}) +` + : ` +## ${summaryTitle} +- **Branch**: \`${branchName}\` +- **Status**: No changes to apply (noop operation) +- **URL**: [${pushUrl}](${pushUrl}) +`; + + await core.summary.addRaw(summaryContent).write(); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/read_buffer.cjs b/pkg/workflow/js/read_buffer.cjs new file mode 100644 index 0000000000..4221c67735 --- /dev/null +++ b/pkg/workflow/js/read_buffer.cjs @@ -0,0 +1,67 @@ +// @ts-check +/// + +/** + * ReadBuffer Module + * + * This module provides a buffer class for parsing JSON-RPC messages from stdin. + * It handles line-by-line reading and JSON parsing with support for both + * Unix (\n) and Windows (\r\n) line endings. + * + * Usage: + * const { ReadBuffer } = require("./read_buffer.cjs"); + * + * const buffer = new ReadBuffer(); + * buffer.append(chunk); + * const message = buffer.readMessage(); + */ + +/** + * ReadBuffer class for parsing JSON-RPC messages from stdin + */ +class ReadBuffer { + constructor() { + /** @type {Buffer|null} */ + this._buffer = null; + } + + /** + * Append data to the buffer + * @param {Buffer} chunk - Data chunk to append + */ + append(chunk) { + this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; + } + + /** + * Read a complete message from the buffer + * @returns {Object|null} Parsed JSON message or null if no complete message + */ + readMessage() { + if (!this._buffer) { + return null; + } + + const index = this._buffer.indexOf("\n"); + if (index === -1) { + return null; + } + + const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); + this._buffer = this._buffer.subarray(index + 1); + + if (line.trim() === "") { + return this.readMessage(); // Skip empty lines recursively + } + + try { + return JSON.parse(line); + } catch (error) { + throw new Error(`Parse error: ${error instanceof Error ? error.message : String(error)}`); + } + } +} + +module.exports = { + ReadBuffer, +}; diff --git a/pkg/workflow/js/redact_secrets.cjs b/pkg/workflow/js/redact_secrets.cjs new file mode 100644 index 0000000000..103c3fb134 --- /dev/null +++ b/pkg/workflow/js/redact_secrets.cjs @@ -0,0 +1,152 @@ +// @ts-check +/// + +/** + * Redacts secrets from files in /tmp/gh-aw directory before uploading artifacts + * This script processes all .txt, .json, .log, .md, .mdx, .yml, .jsonl files under /tmp/gh-aw and redacts + * any strings matching the actual secret values provided via environment variables. + */ +const fs = require("fs"); +const path = require("path"); +/** + * Recursively finds all files matching the specified extensions + * @param {string} dir - Directory to search + * @param {string[]} extensions - File extensions to match (e.g., ['.txt', '.json', '.log']) + * @returns {string[]} Array of file paths + */ +function findFiles(dir, extensions) { + const results = []; + try { + if (!fs.existsSync(dir)) { + return results; + } + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + // Recursively search subdirectories + results.push(...findFiles(fullPath, extensions)); + } else if (entry.isFile()) { + // Check if file has one of the target extensions + const ext = path.extname(entry.name).toLowerCase(); + if (extensions.includes(ext)) { + results.push(fullPath); + } + } + } + } catch (error) { + core.warning(`Failed to scan directory ${dir}: ${error instanceof Error ? error.message : String(error)}`); + } + return results; +} + +/** + * Redacts secrets from file content using exact string matching + * @param {string} content - File content to process + * @param {string[]} secretValues - Array of secret values to redact + * @returns {{content: string, redactionCount: number}} Redacted content and count of redactions + */ +function redactSecrets(content, secretValues) { + let redactionCount = 0; + let redacted = content; + // Sort secret values by length (longest first) to handle overlapping secrets + const sortedSecrets = secretValues.slice().sort((a, b) => b.length - a.length); + for (const secretValue of sortedSecrets) { + // Skip empty or very short values (likely not actual secrets) + if (!secretValue || secretValue.length < 8) { + continue; + } + // Count occurrences before replacement + // Use split and join for exact string matching (not regex) + // This is safer than regex as it doesn't interpret special characters + // Show first 3 letters followed by asterisks for the remaining length + const prefix = secretValue.substring(0, 3); + const asterisks = "*".repeat(Math.max(0, secretValue.length - 3)); + const replacement = prefix + asterisks; + const parts = redacted.split(secretValue); + const occurrences = parts.length - 1; + if (occurrences > 0) { + redacted = parts.join(replacement); + redactionCount += occurrences; + core.info(`Redacted ${occurrences} occurrence(s) of a secret`); + } + } + return { content: redacted, redactionCount }; +} + +/** + * Process a single file for secret redaction + * @param {string} filePath - Path to the file + * @param {string[]} secretValues - Array of secret values to redact + * @returns {number} Number of redactions made + */ +function processFile(filePath, secretValues) { + try { + const content = fs.readFileSync(filePath, "utf8"); + const { content: redactedContent, redactionCount } = redactSecrets(content, secretValues); + if (redactionCount > 0) { + fs.writeFileSync(filePath, redactedContent, "utf8"); + core.info(`Processed ${filePath}: ${redactionCount} redaction(s)`); + } + return redactionCount; + } catch (error) { + core.warning(`Failed to process file ${filePath}: ${error instanceof Error ? error.message : String(error)}`); + return 0; + } +} + +/** + * Main function + */ +async function main() { + // Get the list of secret names from environment variable + const secretNames = process.env.GH_AW_SECRET_NAMES; + if (!secretNames) { + core.info("GH_AW_SECRET_NAMES not set, no redaction performed"); + return; + } + core.info("Starting secret redaction in /tmp/gh-aw directory"); + try { + // Parse the comma-separated list of secret names + const secretNameList = secretNames.split(",").filter(name => name.trim()); + // Collect the actual secret values from environment variables + const secretValues = []; + for (const secretName of secretNameList) { + const envVarName = `SECRET_${secretName}`; + const secretValue = process.env[envVarName]; + // Skip empty or undefined secrets + if (!secretValue || secretValue.trim() === "") { + continue; + } + secretValues.push(secretValue.trim()); + } + if (secretValues.length === 0) { + core.info("No secret values found to redact"); + return; + } + core.info(`Found ${secretValues.length} secret(s) to redact`); + // Find all target files in /tmp/gh-aw directory + const targetExtensions = [".txt", ".json", ".log", ".md", ".mdx", ".yml", ".jsonl"]; + const files = findFiles("/tmp/gh-aw", targetExtensions); + core.info(`Found ${files.length} file(s) to scan for secrets`); + let totalRedactions = 0; + let filesWithRedactions = 0; + // Process each file + for (const file of files) { + const redactionCount = processFile(file, secretValues); + if (redactionCount > 0) { + filesWithRedactions++; + totalRedactions += redactionCount; + } + } + if (totalRedactions > 0) { + core.info(`Secret redaction complete: ${totalRedactions} redaction(s) in ${filesWithRedactions} file(s)`); + } else { + core.info("Secret redaction complete: no secrets found"); + } + } catch (error) { + core.setFailed(`Secret redaction failed: ${error instanceof Error ? error.message : String(error)}`); + } +} + +module.exports = { main }; diff --git a/pkg/workflow/js/remove_duplicate_title.cjs b/pkg/workflow/js/remove_duplicate_title.cjs new file mode 100644 index 0000000000..b4041667ee --- /dev/null +++ b/pkg/workflow/js/remove_duplicate_title.cjs @@ -0,0 +1,50 @@ +// @ts-check +/** + * Remove duplicate title from description + * @module remove_duplicate_title + */ + +/** + * Removes duplicate title from the beginning of description content. + * If the description starts with a header (# or ## or ### etc.) that matches + * the title, it will be removed along with any trailing newlines. + * + * @param {string} title - The title text to match and remove + * @param {string} description - The description content that may contain duplicate title + * @returns {string} The description with duplicate title removed + */ +function removeDuplicateTitleFromDescription(title, description) { + // Handle null/undefined/empty inputs + if (!title || typeof title !== "string") { + return description || ""; + } + if (!description || typeof description !== "string") { + return ""; + } + + const trimmedTitle = title.trim(); + const trimmedDescription = description.trim(); + + if (!trimmedTitle || !trimmedDescription) { + return trimmedDescription; + } + + // Match any header level (# to ######) followed by the title at the start + // This regex matches: + // - Start of string + // - One or more # characters + // - One or more spaces + // - The exact title (escaped for regex special chars) + // - Optional trailing spaces + // - Optional newlines after the header + const escapedTitle = trimmedTitle.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); + const headerRegex = new RegExp(`^#{1,6}\\s+${escapedTitle}\\s*(?:\\r?\\n)*`, "i"); + + if (headerRegex.test(trimmedDescription)) { + return trimmedDescription.replace(headerRegex, "").trim(); + } + + return trimmedDescription; +} + +module.exports = { removeDuplicateTitleFromDescription }; diff --git a/pkg/workflow/js/repo_helpers.cjs b/pkg/workflow/js/repo_helpers.cjs new file mode 100644 index 0000000000..ce0c5d87be --- /dev/null +++ b/pkg/workflow/js/repo_helpers.cjs @@ -0,0 +1,80 @@ +// @ts-check +/// + +/** + * Repository-related helper functions for safe-output scripts + * Provides common repository parsing, validation, and resolution logic + */ + +/** + * Parse the allowed repos from environment variable + * @returns {Set} Set of allowed repository slugs + */ +function parseAllowedRepos() { + const allowedReposEnv = process.env.GH_AW_ALLOWED_REPOS; + const set = new Set(); + if (allowedReposEnv) { + allowedReposEnv + .split(",") + .map(repo => repo.trim()) + .filter(repo => repo) + .forEach(repo => set.add(repo)); + } + return set; +} + +/** + * Get the default target repository + * @returns {string} Repository slug in "owner/repo" format + */ +function getDefaultTargetRepo() { + // First check if there's a target-repo override + const targetRepoSlug = process.env.GH_AW_TARGET_REPO_SLUG; + if (targetRepoSlug) { + return targetRepoSlug; + } + // Fall back to context repo + return `${context.repo.owner}/${context.repo.repo}`; +} + +/** + * Validate that a repo is allowed for operations + * @param {string} repo - Repository slug to validate + * @param {string} defaultRepo - Default target repository + * @param {Set} allowedRepos - Set of explicitly allowed repos + * @returns {{valid: boolean, error: string|null}} + */ +function validateRepo(repo, defaultRepo, allowedRepos) { + // Default repo is always allowed + if (repo === defaultRepo) { + return { valid: true, error: null }; + } + // Check if it's in the allowed repos list + if (allowedRepos.has(repo)) { + return { valid: true, error: null }; + } + return { + valid: false, + error: `Repository '${repo}' is not in the allowed-repos list. Allowed: ${defaultRepo}${allowedRepos.size > 0 ? ", " + Array.from(allowedRepos).join(", ") : ""}`, + }; +} + +/** + * Parse owner and repo from a repository slug + * @param {string} repoSlug - Repository slug in "owner/repo" format + * @returns {{owner: string, repo: string}|null} + */ +function parseRepoSlug(repoSlug) { + const parts = repoSlug.split("/"); + if (parts.length !== 2 || !parts[0] || !parts[1]) { + return null; + } + return { owner: parts[0], repo: parts[1] }; +} + +module.exports = { + parseAllowedRepos, + getDefaultTargetRepo, + validateRepo, + parseRepoSlug, +}; diff --git a/pkg/workflow/js/resolve_mentions.cjs b/pkg/workflow/js/resolve_mentions.cjs new file mode 100644 index 0000000000..caad8107f7 --- /dev/null +++ b/pkg/workflow/js/resolve_mentions.cjs @@ -0,0 +1,194 @@ +// @ts-check +/// + +/** + * @typedef {Object} MentionResolutionResult + * @property {string[]} allowedMentions - List of allowed mention usernames + * @property {number} totalMentions - Total number of mentions found + * @property {number} resolvedCount - Number of mentions resolved via API + * @property {boolean} limitExceeded - Whether the 50 mention limit was exceeded + */ + +/** + * Extract all @mentions from text + * @param {string} text - The text to extract mentions from + * @returns {string[]} Array of unique usernames mentioned (case-preserved) + */ +function extractMentions(text) { + if (!text || typeof text !== "string") { + return []; + } + + const mentionRegex = /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g; + const mentions = []; + const seen = new Set(); + + let match; + while ((match = mentionRegex.exec(text)) !== null) { + const username = match[2]; + const lowercaseUsername = username.toLowerCase(); + if (!seen.has(lowercaseUsername)) { + seen.add(lowercaseUsername); + mentions.push(username); + } + } + + return mentions; +} + +/** + * Check if a user from the payload is a bot + * @param {any} user - User object from GitHub payload + * @returns {boolean} True if the user is a bot + */ +function isPayloadUserBot(user) { + return !!(user && user.type === "Bot"); +} + +/** + * Get recent collaborators (any permission level) - optimistic resolution + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {any} github - GitHub API instance + * @param {any} core - GitHub Actions core module + * @returns {Promise>} Map of username (lowercase) to whether they're allowed (any collaborator, not bot) + */ +async function getRecentCollaborators(owner, repo, github, core) { + try { + // Fetch only first page (30 collaborators) for optimistic resolution + const collaborators = await github.rest.repos.listCollaborators({ + owner: owner, + repo: repo, + affiliation: "direct", + per_page: 30, + }); + + const allowedMap = new Map(); + for (const collaborator of collaborators.data) { + const lowercaseLogin = collaborator.login.toLowerCase(); + // Allow any collaborator (regardless of permission level) except bots + const isAllowed = collaborator.type !== "Bot"; + allowedMap.set(lowercaseLogin, isAllowed); + } + + return allowedMap; + } catch (error) { + core.warning(`Failed to fetch recent collaborators: ${error instanceof Error ? error.message : String(error)}`); + return new Map(); + } +} + +/** + * Check individual user's permission lazily + * @param {string} username - Username to check + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {any} github - GitHub API instance + * @param {any} core - GitHub Actions core module + * @returns {Promise} True if user is allowed (any collaborator, not bot) + */ +async function checkUserPermission(username, owner, repo, github, core) { + try { + // First check if user exists and is not a bot + const { data: user } = await github.rest.users.getByUsername({ + username: username, + }); + + if (user.type === "Bot") { + return false; + } + + // Check if user is a collaborator (any permission level) + const { data: permissionData } = await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: username, + }); + + // Allow any permission level (read, triage, write, maintain, admin) + return permissionData.permission !== "none"; + } catch (error) { + // User doesn't exist, not a collaborator, or API error - deny + return false; + } +} + +/** + * Resolve mentions lazily with optimistic caching + * @param {string} text - The text containing mentions + * @param {string[]} knownAuthors - Known authors that should be allowed (e.g., issue author, comment author) + * @param {string} owner - Repository owner + * @param {string} repo - Repository name + * @param {any} github - GitHub API instance + * @param {any} core - GitHub Actions core module + * @returns {Promise} Resolution result with allowed mentions + */ +async function resolveMentionsLazily(text, knownAuthors, owner, repo, github, core) { + // Extract all mentions from text + const mentions = extractMentions(text); + const totalMentions = mentions.length; + + core.info(`Found ${totalMentions} unique mentions in text`); + + // Limit to 50 mentions - filter out excess without API lookup + const limitExceeded = totalMentions > 50; + const mentionsToProcess = limitExceeded ? mentions.slice(0, 50) : mentions; + + if (limitExceeded) { + core.warning(`Mention limit exceeded: ${totalMentions} mentions found, processing only first 50`); + } + + // Build set of known allowed authors (case-insensitive) + const knownAuthorsLowercase = new Set(knownAuthors.filter(a => a).map(a => a.toLowerCase())); + + // Optimistically fetch recent collaborators (first page only) + const collaboratorCache = await getRecentCollaborators(owner, repo, github, core); + core.info(`Cached ${collaboratorCache.size} recent collaborators for optimistic resolution`); + + const allowedMentions = []; + let resolvedCount = 0; + + // Process each mention + for (const mention of mentionsToProcess) { + const lowerMention = mention.toLowerCase(); + + // Check if it's a known author (already verified as non-bot in caller) + if (knownAuthorsLowercase.has(lowerMention)) { + allowedMentions.push(mention); + continue; + } + + // Check optimistic cache + if (collaboratorCache.has(lowerMention)) { + if (collaboratorCache.get(lowerMention)) { + allowedMentions.push(mention); + } + continue; + } + + // Not in cache - lazy lookup individual user + resolvedCount++; + const isAllowed = await checkUserPermission(mention, owner, repo, github, core); + if (isAllowed) { + allowedMentions.push(mention); + } + } + + core.info(`Resolved ${resolvedCount} mentions via individual API calls`); + core.info(`Total allowed mentions: ${allowedMentions.length}`); + + return { + allowedMentions, + totalMentions, + resolvedCount, + limitExceeded, + }; +} + +module.exports = { + extractMentions, + isPayloadUserBot, + getRecentCollaborators, + checkUserPermission, + resolveMentionsLazily, +}; diff --git a/pkg/workflow/js/resolve_mentions_from_payload.cjs b/pkg/workflow/js/resolve_mentions_from_payload.cjs new file mode 100644 index 0000000000..b8f7086e7f --- /dev/null +++ b/pkg/workflow/js/resolve_mentions_from_payload.cjs @@ -0,0 +1,198 @@ +// @ts-check +/// + +/** + * Helper module for resolving allowed mentions from GitHub event payloads + */ + +const { resolveMentionsLazily, isPayloadUserBot } = require("./resolve_mentions.cjs"); + +/** + * Resolve allowed mentions from the current GitHub event context + * @param {any} context - GitHub Actions context + * @param {any} github - GitHub API client + * @param {any} core - GitHub Actions core + * @param {any} [mentionsConfig] - Mentions configuration from safe-outputs + * @returns {Promise} Array of allowed mention usernames + */ +async function resolveAllowedMentionsFromPayload(context, github, core, mentionsConfig) { + // Return empty array if context is not available (e.g., in tests) + if (!context || !github || !core) { + return []; + } + + // Handle mentions configuration + // If mentions is explicitly set to false, return empty array (all mentions escaped) + if (mentionsConfig && mentionsConfig.enabled === false) { + core.info("[MENTIONS] Mentions explicitly disabled - all mentions will be escaped"); + return []; + } + + // If mentions is explicitly set to true, we still need to resolve from payload + // but we'll be more permissive. In strict mode, this should error before reaching here. + const allowAllMentions = mentionsConfig && mentionsConfig.enabled === true; + + // Get configuration options (with defaults) + const allowTeamMembers = mentionsConfig?.allowTeamMembers !== false; // default: true + const allowContext = mentionsConfig?.allowContext !== false; // default: true + const allowedList = mentionsConfig?.allowed || []; + const maxMentions = mentionsConfig?.max || 50; + + try { + const { owner, repo } = context.repo; + const knownAuthors = []; + + // Extract known authors from the event payload (if allow-context is enabled) + if (allowContext) { + switch (context.eventName) { + case "issues": + if (context.payload.issue?.user?.login && !isPayloadUserBot(context.payload.issue.user)) { + knownAuthors.push(context.payload.issue.user.login); + } + if (context.payload.issue?.assignees && Array.isArray(context.payload.issue.assignees)) { + for (const assignee of context.payload.issue.assignees) { + if (assignee?.login && !isPayloadUserBot(assignee)) { + knownAuthors.push(assignee.login); + } + } + } + break; + + case "pull_request": + case "pull_request_target": + if (context.payload.pull_request?.user?.login && !isPayloadUserBot(context.payload.pull_request.user)) { + knownAuthors.push(context.payload.pull_request.user.login); + } + if (context.payload.pull_request?.assignees && Array.isArray(context.payload.pull_request.assignees)) { + for (const assignee of context.payload.pull_request.assignees) { + if (assignee?.login && !isPayloadUserBot(assignee)) { + knownAuthors.push(assignee.login); + } + } + } + break; + + case "issue_comment": + if (context.payload.comment?.user?.login && !isPayloadUserBot(context.payload.comment.user)) { + knownAuthors.push(context.payload.comment.user.login); + } + if (context.payload.issue?.user?.login && !isPayloadUserBot(context.payload.issue.user)) { + knownAuthors.push(context.payload.issue.user.login); + } + if (context.payload.issue?.assignees && Array.isArray(context.payload.issue.assignees)) { + for (const assignee of context.payload.issue.assignees) { + if (assignee?.login && !isPayloadUserBot(assignee)) { + knownAuthors.push(assignee.login); + } + } + } + break; + + case "pull_request_review_comment": + if (context.payload.comment?.user?.login && !isPayloadUserBot(context.payload.comment.user)) { + knownAuthors.push(context.payload.comment.user.login); + } + if (context.payload.pull_request?.user?.login && !isPayloadUserBot(context.payload.pull_request.user)) { + knownAuthors.push(context.payload.pull_request.user.login); + } + if (context.payload.pull_request?.assignees && Array.isArray(context.payload.pull_request.assignees)) { + for (const assignee of context.payload.pull_request.assignees) { + if (assignee?.login && !isPayloadUserBot(assignee)) { + knownAuthors.push(assignee.login); + } + } + } + break; + + case "pull_request_review": + if (context.payload.review?.user?.login && !isPayloadUserBot(context.payload.review.user)) { + knownAuthors.push(context.payload.review.user.login); + } + if (context.payload.pull_request?.user?.login && !isPayloadUserBot(context.payload.pull_request.user)) { + knownAuthors.push(context.payload.pull_request.user.login); + } + if (context.payload.pull_request?.assignees && Array.isArray(context.payload.pull_request.assignees)) { + for (const assignee of context.payload.pull_request.assignees) { + if (assignee?.login && !isPayloadUserBot(assignee)) { + knownAuthors.push(assignee.login); + } + } + } + break; + + case "discussion": + if (context.payload.discussion?.user?.login && !isPayloadUserBot(context.payload.discussion.user)) { + knownAuthors.push(context.payload.discussion.user.login); + } + break; + + case "discussion_comment": + if (context.payload.comment?.user?.login && !isPayloadUserBot(context.payload.comment.user)) { + knownAuthors.push(context.payload.comment.user.login); + } + if (context.payload.discussion?.user?.login && !isPayloadUserBot(context.payload.discussion.user)) { + knownAuthors.push(context.payload.discussion.user.login); + } + break; + + case "release": + if (context.payload.release?.author?.login && !isPayloadUserBot(context.payload.release.author)) { + knownAuthors.push(context.payload.release.author.login); + } + break; + + case "workflow_dispatch": + // Add the actor who triggered the workflow + knownAuthors.push(context.actor); + break; + + default: + // No known authors for other event types + break; + } + } + + // Add allowed list to known authors (these are always allowed regardless of configuration) + knownAuthors.push(...allowedList); + + // If allow-team-members is disabled, only use known authors (context + allowed list) + if (!allowTeamMembers) { + core.info(`[MENTIONS] Team members disabled - only allowing context (${knownAuthors.length} users)`); + // Apply max limit + const limitedMentions = knownAuthors.slice(0, maxMentions); + if (knownAuthors.length > maxMentions) { + core.warning(`[MENTIONS] Mention limit exceeded: ${knownAuthors.length} mentions, limiting to ${maxMentions}`); + } + return limitedMentions; + } + + // Build allowed mentions list from known authors and collaborators + // We pass the known authors as fake mentions in text so they get processed + const fakeText = knownAuthors.map(author => `@${author}`).join(" "); + const mentionResult = await resolveMentionsLazily(fakeText, knownAuthors, owner, repo, github, core); + let allowedMentions = mentionResult.allowedMentions; + + // Apply max limit + if (allowedMentions.length > maxMentions) { + core.warning(`[MENTIONS] Mention limit exceeded: ${allowedMentions.length} mentions, limiting to ${maxMentions}`); + allowedMentions = allowedMentions.slice(0, maxMentions); + } + + // Log allowed mentions for debugging + if (allowedMentions.length > 0) { + core.info(`[OUTPUT COLLECTOR] Allowed mentions: ${allowedMentions.join(", ")}`); + } else { + core.info("[OUTPUT COLLECTOR] No allowed mentions - all mentions will be escaped"); + } + + return allowedMentions; + } catch (error) { + core.warning(`Failed to resolve mentions for output collector: ${error instanceof Error ? error.message : String(error)}`); + // Return empty array on error + return []; + } +} + +module.exports = { + resolveAllowedMentionsFromPayload, +}; diff --git a/pkg/workflow/js/runtime_import.cjs b/pkg/workflow/js/runtime_import.cjs new file mode 100644 index 0000000000..3b5781ef5f --- /dev/null +++ b/pkg/workflow/js/runtime_import.cjs @@ -0,0 +1,154 @@ +// @ts-check +/// + +// runtime_import.cjs +// Processes {{#runtime-import filepath}} and {{#runtime-import? filepath}} macros +// at runtime to import markdown file contents dynamically. + +const fs = require("fs"); +const path = require("path"); + +/** + * Checks if a file starts with front matter (---\n) + * @param {string} content - The file content to check + * @returns {boolean} - True if content starts with front matter + */ +function hasFrontMatter(content) { + return content.trimStart().startsWith("---\n") || content.trimStart().startsWith("---\r\n"); +} + +/** + * Removes XML comments from content + * @param {string} content - The content to process + * @returns {string} - Content with XML comments removed + */ +function removeXMLComments(content) { + // Remove XML/HTML comments: + // Apply repeatedly to handle nested/overlapping patterns that could reintroduce comment markers + let previous; + do { + previous = content; + content = content.replace(//g, ""); + } while (content !== previous); + return content; +} + +/** + * Checks if content contains GitHub Actions macros (${{ ... }}) + * @param {string} content - The content to check + * @returns {boolean} - True if GitHub Actions macros are found + */ +function hasGitHubActionsMacros(content) { + return /\$\{\{[\s\S]*?\}\}/.test(content); +} + +/** + * Reads and processes a file for runtime import + * @param {string} filepath - The path to the file to import (relative to GITHUB_WORKSPACE) + * @param {boolean} optional - Whether the import is optional (true for {{#runtime-import? filepath}}) + * @param {string} workspaceDir - The GITHUB_WORKSPACE directory path + * @returns {string} - The processed file content, or empty string if optional and file not found + * @throws {Error} - If file is not found and import is not optional, or if GitHub Actions macros are detected + */ +function processRuntimeImport(filepath, optional, workspaceDir) { + // Resolve the absolute path + const absolutePath = path.resolve(workspaceDir, filepath); + + // Check if file exists + if (!fs.existsSync(absolutePath)) { + if (optional) { + core.warning(`Optional runtime import file not found: ${filepath}`); + return ""; + } + throw new Error(`Runtime import file not found: ${filepath}`); + } + + // Read the file + let content = fs.readFileSync(absolutePath, "utf8"); + + // Check for front matter and warn + if (hasFrontMatter(content)) { + core.warning(`File ${filepath} contains front matter which will be ignored in runtime import`); + // Remove front matter (everything between first --- and second ---) + const lines = content.split("\n"); + let inFrontMatter = false; + let frontMatterCount = 0; + const processedLines = []; + + for (const line of lines) { + if (line.trim() === "---" || line.trim() === "---\r") { + frontMatterCount++; + if (frontMatterCount === 1) { + inFrontMatter = true; + continue; + } else if (frontMatterCount === 2) { + inFrontMatter = false; + continue; + } + } + if (!inFrontMatter && frontMatterCount >= 2) { + processedLines.push(line); + } + } + content = processedLines.join("\n"); + } + + // Remove XML comments + content = removeXMLComments(content); + + // Check for GitHub Actions macros and error if found + if (hasGitHubActionsMacros(content)) { + throw new Error(`File ${filepath} contains GitHub Actions macros ($\{{ ... }}) which are not allowed in runtime imports`); + } + + return content; +} + +/** + * Processes all runtime-import macros in the content + * @param {string} content - The markdown content containing runtime-import macros + * @param {string} workspaceDir - The GITHUB_WORKSPACE directory path + * @returns {string} - Content with runtime-import macros replaced by file contents + */ +function processRuntimeImports(content, workspaceDir) { + // Pattern to match {{#runtime-import filepath}} or {{#runtime-import? filepath}} + // Captures: optional flag (?), whitespace, filepath + const pattern = /\{\{#runtime-import(\?)?[ \t]+([^\}]+?)\}\}/g; + + let processedContent = content; + let match; + const importedFiles = new Set(); + + // Reset regex state + pattern.lastIndex = 0; + + while ((match = pattern.exec(content)) !== null) { + const optional = match[1] === "?"; + const filepath = match[2].trim(); + const fullMatch = match[0]; + + // Check for circular/duplicate imports + if (importedFiles.has(filepath)) { + core.warning(`File ${filepath} is imported multiple times, which may indicate a circular reference`); + } + importedFiles.add(filepath); + + try { + const importedContent = processRuntimeImport(filepath, optional, workspaceDir); + // Replace the macro with the imported content + processedContent = processedContent.replace(fullMatch, importedContent); + } catch (error) { + throw new Error(`Failed to process runtime import for ${filepath}: ${error.message}`); + } + } + + return processedContent; +} + +module.exports = { + processRuntimeImports, + processRuntimeImport, + hasFrontMatter, + removeXMLComments, + hasGitHubActionsMacros, +}; diff --git a/pkg/workflow/js/safe-outputs-mcp-server.cjs b/pkg/workflow/js/safe-outputs-mcp-server.cjs new file mode 100644 index 0000000000..2176121564 --- /dev/null +++ b/pkg/workflow/js/safe-outputs-mcp-server.cjs @@ -0,0 +1,17 @@ +#!/usr/bin/env node +// @ts-check + +// Safe-outputs MCP Server Entry Point +// This is the main entry point script for the safe-outputs MCP server +// It requires the bootstrap module and starts the server + +const { startSafeOutputsServer } = require("./safe_outputs_mcp_server.cjs"); + +// Start the server +// The server reads configuration from /tmp/gh-aw/safeoutputs/config.json +// Log directory is configured via GH_AW_MCP_LOG_DIR environment variable +if (require.main === module) { + startSafeOutputsServer(); +} + +module.exports = { startSafeOutputsServer }; diff --git a/pkg/workflow/js/safe_inputs_bootstrap.cjs b/pkg/workflow/js/safe_inputs_bootstrap.cjs new file mode 100644 index 0000000000..1b498309e6 --- /dev/null +++ b/pkg/workflow/js/safe_inputs_bootstrap.cjs @@ -0,0 +1,80 @@ +// @ts-check + +/** + * Safe Inputs Bootstrap Module + * + * This module provides shared bootstrap logic for safe-inputs MCP servers. + * It handles configuration loading, tool handler loading, and cleanup that is + * common between stdio and HTTP transport implementations. + * + * Usage: + * const { bootstrapSafeInputsServer } = require("./safe_inputs_bootstrap.cjs"); + * const { config, basePath, tools } = bootstrapSafeInputsServer(configPath, logger); + */ + +const path = require("path"); +const fs = require("fs"); +const { loadConfig } = require("./safe_inputs_config_loader.cjs"); +const { loadToolHandlers } = require("./mcp_server_core.cjs"); + +/** + * @typedef {Object} Logger + * @property {Function} debug - Debug logging function + * @property {Function} debugError - Error logging function + */ + +/** + * @typedef {Object} BootstrapResult + * @property {Object} config - Loaded configuration + * @property {string} basePath - Base path for resolving handler files + * @property {Array} tools - Loaded tool handlers + */ + +/** + * Bootstrap a safe-inputs server by loading configuration and tool handlers. + * This function performs the common initialization steps shared by both stdio + * and HTTP transport implementations. + * + * @param {string} configPath - Path to the configuration JSON file + * @param {Logger} logger - Logger instance for debug messages + * @returns {BootstrapResult} Configuration, base path, and loaded tools + */ +function bootstrapSafeInputsServer(configPath, logger) { + // Load configuration + logger.debug(`Loading safe-inputs configuration from: ${configPath}`); + const config = loadConfig(configPath); + + // Determine base path for resolving relative handler paths + const basePath = path.dirname(configPath); + logger.debug(`Base path for handlers: ${basePath}`); + logger.debug(`Tools to load: ${config.tools.length}`); + + // Load tool handlers from file paths + const tools = loadToolHandlers(logger, config.tools, basePath); + + return { config, basePath, tools }; +} + +/** + * Delete the configuration file to ensure no secrets remain on disk. + * This should be called after the server has been configured and started. + * + * @param {string} configPath - Path to the configuration file to delete + * @param {Logger} logger - Logger instance for debug messages + */ +function cleanupConfigFile(configPath, logger) { + try { + if (fs.existsSync(configPath)) { + fs.unlinkSync(configPath); + logger.debug(`Deleted configuration file: ${configPath}`); + } + } catch (error) { + logger.debugError(`Warning: Could not delete configuration file: `, error); + // Continue anyway - the server is already running + } +} + +module.exports = { + bootstrapSafeInputsServer, + cleanupConfigFile, +}; diff --git a/pkg/workflow/js/safe_inputs_config_loader.cjs b/pkg/workflow/js/safe_inputs_config_loader.cjs new file mode 100644 index 0000000000..33836c2e0e --- /dev/null +++ b/pkg/workflow/js/safe_inputs_config_loader.cjs @@ -0,0 +1,53 @@ +// @ts-check + +/** + * Safe Inputs Configuration Loader + * + * This module provides utilities for loading and validating safe-inputs + * configuration from JSON files. + */ + +const fs = require("fs"); + +/** + * @typedef {Object} SafeInputsToolConfig + * @property {string} name - Tool name + * @property {string} description - Tool description + * @property {Object} inputSchema - JSON Schema for tool inputs + * @property {string} [handler] - Path to handler file (.cjs, .sh, or .py) + * @property {number} [timeout] - Timeout in seconds for tool execution (default: 60) + */ + +/** + * @typedef {Object} SafeInputsConfig + * @property {string} [serverName] - Server name (defaults to "safeinputs") + * @property {string} [version] - Server version (defaults to "1.0.0") + * @property {string} [logDir] - Log directory path + * @property {SafeInputsToolConfig[]} tools - Array of tool configurations + */ + +/** + * Load safe-inputs configuration from a JSON file + * @param {string} configPath - Path to the configuration JSON file + * @returns {SafeInputsConfig} The loaded configuration + * @throws {Error} If the file doesn't exist or configuration is invalid + */ +function loadConfig(configPath) { + if (!fs.existsSync(configPath)) { + throw new Error(`Configuration file not found: ${configPath}`); + } + + const configContent = fs.readFileSync(configPath, "utf-8"); + const config = JSON.parse(configContent); + + // Validate required fields + if (!config.tools || !Array.isArray(config.tools)) { + throw new Error("Configuration must contain a 'tools' array"); + } + + return config; +} + +module.exports = { + loadConfig, +}; diff --git a/pkg/workflow/js/safe_inputs_mcp_server.cjs b/pkg/workflow/js/safe_inputs_mcp_server.cjs new file mode 100644 index 0000000000..208fda0847 --- /dev/null +++ b/pkg/workflow/js/safe_inputs_mcp_server.cjs @@ -0,0 +1,113 @@ +// @ts-check +/// + +/** + * Safe Inputs MCP Server Module + * + * This module provides a reusable MCP server for safe-inputs configuration. + * It uses the mcp_server_core module for JSON-RPC handling and tool registration. + * + * The server reads tool configuration from a JSON file and loads handlers from + * JavaScript (.cjs), shell script (.sh), or Python script (.py) files. + * + * Usage: + * node safe_inputs_mcp_server.cjs /path/to/tools.json + * + * Or as a module: + * const { startSafeInputsServer } = require("./safe_inputs_mcp_server.cjs"); + * startSafeInputsServer("/path/to/tools.json"); + */ + +const { createServer, registerTool, start } = require("./mcp_server_core.cjs"); +const { loadConfig } = require("./safe_inputs_config_loader.cjs"); +const { createToolConfig } = require("./safe_inputs_tool_factory.cjs"); +const { bootstrapSafeInputsServer, cleanupConfigFile } = require("./safe_inputs_bootstrap.cjs"); + +/** + * @typedef {Object} SafeInputsToolConfig + * @property {string} name - Tool name + * @property {string} description - Tool description + * @property {Object} inputSchema - JSON Schema for tool inputs + * @property {string} [handler] - Path to handler file (.cjs, .sh, or .py) + */ + +/** + * @typedef {Object} SafeInputsConfig + * @property {string} [serverName] - Server name (defaults to "safeinputs") + * @property {string} [version] - Server version (defaults to "1.0.0") + * @property {string} [logDir] - Log directory path + * @property {SafeInputsToolConfig[]} tools - Array of tool configurations + */ + +/** + * Start the safe-inputs MCP server with the given configuration + * @param {string} configPath - Path to the configuration JSON file + * @param {Object} [options] - Additional options + * @param {string} [options.logDir] - Override log directory from config + * @param {boolean} [options.skipCleanup] - Skip deletion of config file (useful for stdio mode with agent restarts) + */ +function startSafeInputsServer(configPath, options = {}) { + // Create server first to have logger available + const logDir = options.logDir || undefined; + const server = createServer({ name: "safeinputs", version: "1.0.0" }, { logDir }); + + // Bootstrap: load configuration and tools using shared logic + const { config, tools } = bootstrapSafeInputsServer(configPath, server); + + // Update server info with actual config values + server.serverInfo.name = config.serverName || "safeinputs"; + server.serverInfo.version = config.version || "1.0.0"; + + // Use logDir from config if not overridden by options + if (!options.logDir && config.logDir) { + server.logDir = config.logDir; + } + + // Register all tools with the server + for (const tool of tools) { + registerTool(server, tool); + } + + // Cleanup: delete the configuration file after loading (unless skipCleanup is true) + if (!options.skipCleanup) { + cleanupConfigFile(configPath, server); + } + + // Start the server + start(server); +} + +// If run directly, start the server with command-line arguments +if (require.main === module) { + const args = process.argv.slice(2); + + if (args.length < 1) { + console.error("Usage: node safe_inputs_mcp_server.cjs [--log-dir ]"); + process.exit(1); + } + + const configPath = args[0]; + const options = {}; + + // Parse optional arguments + for (let i = 1; i < args.length; i++) { + if (args[i] === "--log-dir" && args[i + 1]) { + options.logDir = args[i + 1]; + i++; + } + } + + try { + startSafeInputsServer(configPath, options); + } catch (error) { + console.error(`Error starting safe-inputs server: ${error instanceof Error ? error.message : String(error)}`); + process.exit(1); + } +} + +module.exports = { + startSafeInputsServer, + // Re-export helpers for convenience + loadConfig, + createToolConfig, +}; diff --git a/pkg/workflow/js/safe_inputs_mcp_server_http.cjs b/pkg/workflow/js/safe_inputs_mcp_server_http.cjs new file mode 100644 index 0000000000..700509d68f --- /dev/null +++ b/pkg/workflow/js/safe_inputs_mcp_server_http.cjs @@ -0,0 +1,342 @@ +// @ts-check +/// + +/** + * Safe Inputs MCP Server with HTTP Transport + * + * This module extends the safe-inputs MCP server to support HTTP transport + * using the StreamableHTTPServerTransport from the MCP SDK. + * + * It provides both stateful and stateless HTTP modes, as well as SSE streaming. + * + * Usage: + * node safe_inputs_mcp_server_http.cjs /path/to/tools.json [--port 3000] [--stateless] + * + * Options: + * --port Port to listen on (default: 3000) + * --stateless Run in stateless mode (no session management) + * --log-dir Directory for log files + */ + +const http = require("http"); +const { randomUUID } = require("crypto"); +const { MCPServer, MCPHTTPTransport } = require("./mcp_http_transport.cjs"); +const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); +const { createLogger } = require("./mcp_logger.cjs"); +const { bootstrapSafeInputsServer, cleanupConfigFile } = require("./safe_inputs_bootstrap.cjs"); + +/** + * Create and configure the MCP server with tools + * @param {string} configPath - Path to the configuration JSON file + * @param {Object} [options] - Additional options + * @param {string} [options.logDir] - Override log directory from config + * @returns {Object} Server instance and configuration + */ +function createMCPServer(configPath, options = {}) { + // Create logger early + const logger = createLogger("safeinputs"); + + logger.debug(`=== Creating MCP Server ===`); + logger.debug(`Configuration file: ${configPath}`); + + // Bootstrap: load configuration and tools using shared logic + const { config, tools } = bootstrapSafeInputsServer(configPath, logger); + + // Create server with configuration + const serverName = config.serverName || "safeinputs"; + const version = config.version || "1.0.0"; + + logger.debug(`Server name: ${serverName}`); + logger.debug(`Server version: ${version}`); + + // Create MCP Server instance + const server = new MCPServer( + { + name: serverName, + version: version, + }, + { + capabilities: { + tools: {}, + }, + } + ); + + // Register all tools with the MCP SDK server using the tool() method + logger.debug(`Registering tools with MCP server...`); + let registeredCount = 0; + let skippedCount = 0; + + for (const tool of tools) { + if (!tool.handler) { + logger.debug(`Skipping tool ${tool.name} - no handler loaded`); + skippedCount++; + continue; + } + + logger.debug(`Registering tool: ${tool.name}`); + + // Register the tool with the MCP SDK using the high-level API + // The callback receives the arguments directly as the first parameter + server.tool(tool.name, tool.description || "", tool.inputSchema || { type: "object", properties: {} }, async args => { + logger.debug(`Calling handler for tool: ${tool.name}`); + + // Validate required fields using helper + const missing = validateRequiredFields(args, tool.inputSchema); + if (missing.length) { + throw new Error(`Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); + } + + // Call the handler + const result = await Promise.resolve(tool.handler(args)); + logger.debug(`Handler returned for tool: ${tool.name}`); + + // Normalize result to MCP format + const content = result && result.content ? result.content : []; + return { content, isError: false }; + }); + + registeredCount++; + } + + logger.debug(`Tool registration complete: ${registeredCount} registered, ${skippedCount} skipped`); + logger.debug(`=== MCP Server Creation Complete ===`); + + // Cleanup: delete the configuration file after loading + cleanupConfigFile(configPath, logger); + + return { server, config, logger }; +} + +/** + * Start the HTTP server with MCP protocol support + * @param {string} configPath - Path to the configuration JSON file + * @param {Object} options - Server options + * @param {number} [options.port] - Port to listen on (default: 3000) + * @param {boolean} [options.stateless] - Run in stateless mode (default: false) + * @param {string} [options.logDir] - Override log directory from config + */ +async function startHttpServer(configPath, options = {}) { + const port = options.port || 3000; + const stateless = options.stateless || false; + + const logger = createLogger("safe-inputs-startup"); + + logger.debug(`=== Starting Safe Inputs MCP HTTP Server ===`); + logger.debug(`Configuration file: ${configPath}`); + logger.debug(`Port: ${port}`); + logger.debug(`Mode: ${stateless ? "stateless" : "stateful"}`); + logger.debug(`Environment: NODE_VERSION=${process.version}, PLATFORM=${process.platform}`); + + // Create the MCP server + try { + const { server, config, logger: mcpLogger } = createMCPServer(configPath, { logDir: options.logDir }); + + // Use the MCP logger for subsequent messages + Object.assign(logger, mcpLogger); + + logger.debug(`MCP server created successfully`); + logger.debug(`Server name: ${config.serverName || "safeinputs"}`); + logger.debug(`Server version: ${config.version || "1.0.0"}`); + logger.debug(`Tools configured: ${config.tools.length}`); + + logger.debug(`Creating HTTP transport...`); + // Create the HTTP transport + const transport = new MCPHTTPTransport({ + sessionIdGenerator: stateless ? undefined : () => randomUUID(), + enableJsonResponse: true, + enableDnsRebindingProtection: false, // Disable for local development + }); + logger.debug(`HTTP transport created`); + + // Connect server to transport + logger.debug(`Connecting server to transport...`); + await server.connect(transport); + logger.debug(`Server connected to transport successfully`); + + // Create HTTP server + logger.debug(`Creating HTTP server...`); + const httpServer = http.createServer(async (req, res) => { + // Set CORS headers for development + res.setHeader("Access-Control-Allow-Origin", "*"); + res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); + res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept"); + + // Handle OPTIONS preflight + if (req.method === "OPTIONS") { + res.writeHead(200); + res.end(); + return; + } + + // Handle GET /health endpoint for health checks + if (req.method === "GET" && req.url === "/health") { + res.writeHead(200, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + status: "ok", + server: config.serverName || "safeinputs", + version: config.version || "1.0.0", + tools: config.tools.length, + }) + ); + return; + } + + // Only handle POST requests for MCP protocol + if (req.method !== "POST") { + res.writeHead(405, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ error: "Method not allowed" })); + return; + } + + try { + // Parse request body for POST requests + let body = null; + if (req.method === "POST") { + const chunks = []; + for await (const chunk of req) { + chunks.push(chunk); + } + const bodyStr = Buffer.concat(chunks).toString(); + try { + body = bodyStr ? JSON.parse(bodyStr) : null; + } catch (parseError) { + res.writeHead(400, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32700, + message: "Parse error: Invalid JSON in request body", + }, + id: null, + }) + ); + return; + } + } + + // Let the transport handle the request + await transport.handleRequest(req, res, body); + } catch (error) { + // Log the full error with stack trace on the server for debugging + logger.debugError("Error handling request: ", error); + + if (!res.headersSent) { + res.writeHead(500, { "Content-Type": "application/json" }); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + error: { + code: -32603, + message: "Internal server error", + }, + id: null, + }) + ); + } + } + }); + + // Start listening + logger.debug(`Attempting to bind to port ${port}...`); + httpServer.listen(port, () => { + logger.debug(`=== Safe Inputs MCP HTTP Server Started Successfully ===`); + logger.debug(`HTTP server listening on http://localhost:${port}`); + logger.debug(`MCP endpoint: POST http://localhost:${port}/`); + logger.debug(`Server name: ${config.serverName || "safeinputs"}`); + logger.debug(`Server version: ${config.version || "1.0.0"}`); + logger.debug(`Tools available: ${config.tools.length}`); + logger.debug(`Server is ready to accept requests`); + }); + + // Handle bind errors + httpServer.on("error", error => { + if (error.code === "EADDRINUSE") { + logger.debugError(`ERROR: Port ${port} is already in use. `, error); + } else if (error.code === "EACCES") { + logger.debugError(`ERROR: Permission denied to bind to port ${port}. `, error); + } else { + logger.debugError(`ERROR: Failed to start HTTP server: `, error); + } + process.exit(1); + }); + + // Handle shutdown gracefully + process.on("SIGINT", () => { + logger.debug("Received SIGINT, shutting down..."); + httpServer.close(() => { + logger.debug("HTTP server closed"); + process.exit(0); + }); + }); + + process.on("SIGTERM", () => { + logger.debug("Received SIGTERM, shutting down..."); + httpServer.close(() => { + logger.debug("HTTP server closed"); + process.exit(0); + }); + }); + + return httpServer; + } catch (error) { + // Log detailed error information for startup failures + const errorLogger = createLogger("safe-inputs-startup-error"); + errorLogger.debug(`=== FATAL ERROR: Failed to start Safe Inputs MCP HTTP Server ===`); + errorLogger.debug(`Error type: ${error.constructor.name}`); + errorLogger.debug(`Error message: ${error.message}`); + if (error.stack) { + errorLogger.debug(`Stack trace:\n${error.stack}`); + } + if (error.code) { + errorLogger.debug(`Error code: ${error.code}`); + } + errorLogger.debug(`Configuration file: ${configPath}`); + errorLogger.debug(`Port: ${port}`); + + // Re-throw the error to be caught by the caller + throw error; + } +} + +// If run directly, start the HTTP server with command-line arguments +if (require.main === module) { + const args = process.argv.slice(2); + + if (args.length < 1) { + console.error("Usage: node safe_inputs_mcp_server_http.cjs [--port ] [--stateless] [--log-dir ]"); + process.exit(1); + } + + const configPath = args[0]; + const options = { + port: 3000, + stateless: false, + logDir: undefined, + }; + + // Parse optional arguments + for (let i = 1; i < args.length; i++) { + if (args[i] === "--port" && args[i + 1]) { + options.port = parseInt(args[i + 1], 10); + i++; + } else if (args[i] === "--stateless") { + options.stateless = true; + } else if (args[i] === "--log-dir" && args[i + 1]) { + options.logDir = args[i + 1]; + i++; + } + } + + startHttpServer(configPath, options).catch(error => { + console.error(`Error starting HTTP server: ${error instanceof Error ? error.message : String(error)}`); + process.exit(1); + }); +} + +module.exports = { + startHttpServer, + createMCPServer, +}; diff --git a/pkg/workflow/js/safe_inputs_tool_factory.cjs b/pkg/workflow/js/safe_inputs_tool_factory.cjs new file mode 100644 index 0000000000..19cfb11926 --- /dev/null +++ b/pkg/workflow/js/safe_inputs_tool_factory.cjs @@ -0,0 +1,37 @@ +// @ts-check + +/** + * Safe Inputs Tool Factory + * + * This module provides a factory function for creating tool configuration objects + * for different handler types (JavaScript, Shell, Python). + */ + +/** + * @typedef {Object} SafeInputsToolConfig + * @property {string} name - Tool name + * @property {string} description - Tool description + * @property {Object} inputSchema - JSON Schema for tool inputs + * @property {string} handler - Path to handler file (.cjs, .sh, or .py) + */ + +/** + * Create a tool configuration object + * @param {string} name - Tool name + * @param {string} description - Tool description + * @param {Object} inputSchema - JSON Schema for tool inputs + * @param {string} handlerPath - Path to the handler file (.cjs, .sh, or .py) + * @returns {SafeInputsToolConfig} Tool configuration object + */ +function createToolConfig(name, description, inputSchema, handlerPath) { + return { + name, + description, + inputSchema, + handler: handlerPath, + }; +} + +module.exports = { + createToolConfig, +}; diff --git a/pkg/workflow/js/safe_inputs_validation.cjs b/pkg/workflow/js/safe_inputs_validation.cjs new file mode 100644 index 0000000000..4c64f6a4b1 --- /dev/null +++ b/pkg/workflow/js/safe_inputs_validation.cjs @@ -0,0 +1,32 @@ +// @ts-check + +/** + * Safe Inputs Validation Helpers + * + * This module provides validation utilities for safe-inputs MCP server. + */ + +/** + * Validate required fields in tool arguments + * @param {Object} args - The arguments object to validate + * @param {Object} inputSchema - The input schema containing required fields + * @returns {string[]} Array of missing field names (empty if all required fields are present) + */ +function validateRequiredFields(args, inputSchema) { + const requiredFields = inputSchema && Array.isArray(inputSchema.required) ? inputSchema.required : []; + + if (!requiredFields.length) { + return []; + } + + const missing = requiredFields.filter(f => { + const value = args[f]; + return value === undefined || value === null || (typeof value === "string" && value.trim() === ""); + }); + + return missing; +} + +module.exports = { + validateRequiredFields, +}; diff --git a/pkg/workflow/js/safe_output_helpers.cjs b/pkg/workflow/js/safe_output_helpers.cjs new file mode 100644 index 0000000000..901e76021b --- /dev/null +++ b/pkg/workflow/js/safe_output_helpers.cjs @@ -0,0 +1,170 @@ +// @ts-check +/// + +/** + * Shared helper functions for safe-output scripts + * Provides common validation and target resolution logic + */ + +/** + * Parse a comma-separated list of allowed items from environment variable + * @param {string|undefined} envValue - Environment variable value + * @returns {string[]|undefined} Array of allowed items, or undefined if no restrictions + */ +function parseAllowedItems(envValue) { + const trimmed = envValue?.trim(); + if (!trimmed) { + return undefined; + } + return trimmed + .split(",") + .map(item => item.trim()) + .filter(item => item); +} + +/** + * Parse and validate max count from environment variable + * @param {string|undefined} envValue - Environment variable value + * @param {number} defaultValue - Default value if not specified + * @returns {{valid: true, value: number} | {valid: false, error: string}} Validation result + */ +function parseMaxCount(envValue, defaultValue = 3) { + if (!envValue) { + return { valid: true, value: defaultValue }; + } + + const parsed = parseInt(envValue, 10); + if (isNaN(parsed) || parsed < 1) { + return { + valid: false, + error: `Invalid max value: ${envValue}. Must be a positive integer`, + }; + } + + return { valid: true, value: parsed }; +} + +/** + * Resolve the target number (issue/PR) based on configuration and context + * @param {Object} params - Resolution parameters + * @param {string} params.targetConfig - Target configuration ("triggering", "*", or explicit number) + * @param {any} params.item - Safe output item with optional item_number or pull_request_number + * @param {any} params.context - GitHub Actions context + * @param {string} params.itemType - Type of item being processed (for error messages) + * @param {boolean} params.supportsPR - Whether this safe output supports PR context + * @returns {{success: true, number: number, contextType: string} | {success: false, error: string, shouldFail: boolean}} Resolution result + */ +function resolveTarget(params) { + const { targetConfig, item, context, itemType, supportsPR = false } = params; + + // Check context type + const isIssueContext = context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = context.eventName === "pull_request" || context.eventName === "pull_request_review" || context.eventName === "pull_request_review_comment"; + + // Default target is "triggering" + const target = targetConfig || "triggering"; + + // Validate context for triggering mode + if (target === "triggering") { + if (supportsPR) { + if (!isIssueContext && !isPRContext) { + return { + success: false, + error: `Target is "triggering" but not running in issue or pull request context, skipping ${itemType}`, + shouldFail: false, // Just skip, don't fail the workflow + }; + } + } else { + if (!isPRContext) { + return { + success: false, + error: `Target is "triggering" but not running in pull request context, skipping ${itemType}`, + shouldFail: false, // Just skip, don't fail the workflow + }; + } + } + } + + // Resolve target number + let itemNumber; + let contextType; + + if (target === "*") { + // Use item_number, issue_number, or pull_request_number from item + const numberField = supportsPR ? item.item_number || item.issue_number || item.pull_request_number : item.pull_request_number; + + if (numberField) { + itemNumber = typeof numberField === "number" ? numberField : parseInt(String(numberField), 10); + if (isNaN(itemNumber) || itemNumber <= 0) { + return { + success: false, + error: `Invalid ${supportsPR ? "item_number/issue_number/pull_request_number" : "pull_request_number"} specified: ${numberField}`, + shouldFail: true, + }; + } + contextType = supportsPR && (item.item_number || item.issue_number) ? "issue" : "pull request"; + } else { + return { + success: false, + error: `Target is "*" but no ${supportsPR ? "item_number/issue_number" : "pull_request_number"} specified in ${itemType} item`, + shouldFail: true, + }; + } + } else if (target !== "triggering") { + // Explicit number + itemNumber = parseInt(target, 10); + if (isNaN(itemNumber) || itemNumber <= 0) { + return { + success: false, + error: `Invalid ${supportsPR ? "issue" : "pull request"} number in target configuration: ${target}`, + shouldFail: true, + }; + } + contextType = supportsPR ? "issue" : "pull request"; + } else { + // Use triggering context + if (isIssueContext) { + if (context.payload.issue) { + itemNumber = context.payload.issue.number; + contextType = "issue"; + } else { + return { + success: false, + error: "Issue context detected but no issue found in payload", + shouldFail: true, + }; + } + } else if (isPRContext) { + if (context.payload.pull_request) { + itemNumber = context.payload.pull_request.number; + contextType = "pull request"; + } else { + return { + success: false, + error: "Pull request context detected but no pull request found in payload", + shouldFail: true, + }; + } + } + } + + if (!itemNumber) { + return { + success: false, + error: `Could not determine ${supportsPR ? "issue or pull request" : "pull request"} number`, + shouldFail: true, + }; + } + + return { + success: true, + number: itemNumber, + contextType: contextType || (supportsPR ? "issue" : "pull request"), + }; +} + +module.exports = { + parseAllowedItems, + parseMaxCount, + resolveTarget, +}; diff --git a/pkg/workflow/js/safe_output_processor.cjs b/pkg/workflow/js/safe_output_processor.cjs new file mode 100644 index 0000000000..07c728e728 --- /dev/null +++ b/pkg/workflow/js/safe_output_processor.cjs @@ -0,0 +1,256 @@ +// @ts-check +/// + +/** + * Shared processor for safe-output scripts + * Provides common pipeline: load agent output, handle staged mode, parse config, resolve target + */ + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { parseAllowedItems, resolveTarget } = require("./safe_output_helpers.cjs"); +const { getSafeOutputConfig, validateMaxCount } = require("./safe_output_validator.cjs"); + +/** + * @typedef {Object} ProcessorConfig + * @property {string} itemType - The type field value to match in agent output (e.g., "add_labels") + * @property {string} configKey - The key to use when reading from config.json (e.g., "add_labels") + * @property {string} displayName - Human-readable name for logging (e.g., "Add Labels") + * @property {string} itemTypeName - Name used in error messages (e.g., "label addition") + * @property {boolean} [supportsPR] - When true, allows both issue AND PR contexts; when false, only PR context (default: false) + * @property {boolean} [supportsIssue] - When true, passes supportsPR=true to resolveTarget to enable both contexts (default: false) + * @property {boolean} [findMultiple] - Whether to find multiple items instead of just one (default: false) + * @property {Object} envVars - Environment variable names + * @property {string} [envVars.allowed] - Env var for allowed items list + * @property {string} [envVars.maxCount] - Env var for max count + * @property {string} [envVars.target] - Env var for target configuration + */ + +/** + * @typedef {Object} ProcessorResult + * @property {boolean} success - Whether processing should continue + * @property {any} [item] - The found item (when findMultiple is false) + * @property {any[]} [items] - The found items (when findMultiple is true) + * @property {Object} [config] - Parsed configuration + * @property {string[]|undefined} [config.allowed] - Allowed items list + * @property {number} [config.maxCount] - Maximum count + * @property {string} [config.target] - Target configuration + * @property {Object} [targetResult] - Result from resolveTarget (when findMultiple is false) + * @property {number} [targetResult.number] - Target issue/PR number + * @property {string} [targetResult.contextType] - Type of context (issue or pull request) + * @property {string} [reason] - Reason why processing should not continue + */ + +/** + * Process the initial steps common to safe-output scripts: + * 1. Load agent output + * 2. Find matching item(s) + * 3. Handle staged mode + * 4. Parse configuration + * 5. Resolve target (for single-item processors) + * + * @param {ProcessorConfig} config - Processor configuration + * @param {Object} stagedPreviewOptions - Options for staged preview + * @param {string} stagedPreviewOptions.title - Title for staged preview + * @param {string} stagedPreviewOptions.description - Description for staged preview + * @param {(item: any, index: number) => string} stagedPreviewOptions.renderItem - Function to render item in preview + * @returns {Promise} Processing result + */ +async function processSafeOutput(config, stagedPreviewOptions) { + const { itemType, configKey, displayName, itemTypeName, supportsPR = false, supportsIssue = false, findMultiple = false, envVars } = config; + + // Step 1: Load agent output + const result = loadAgentOutput(); + if (!result.success) { + return { success: false, reason: "Agent output not available" }; + } + + // Step 2: Find matching item(s) + let items; + if (findMultiple) { + items = result.items.filter(item => item.type === itemType); + if (items.length === 0) { + core.info(`No ${itemType} items found in agent output`); + return { success: false, reason: `No ${itemType} items found` }; + } + core.info(`Found ${items.length} ${itemType} item(s)`); + } else { + const item = result.items.find(item => item.type === itemType); + if (!item) { + core.warning(`No ${itemType.replace(/_/g, "-")} item found in agent output`); + return { success: false, reason: `No ${itemType} item found` }; + } + items = [item]; + // Log item details based on common fields + const itemDetails = getItemDetails(item); + if (itemDetails) { + core.info(`Found ${itemType.replace(/_/g, "-")} item with ${itemDetails}`); + } + } + + // Step 3: Handle staged mode + if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") { + await generateStagedPreview({ + title: stagedPreviewOptions.title, + description: stagedPreviewOptions.description, + items: items, + renderItem: stagedPreviewOptions.renderItem, + }); + return { success: false, reason: "Staged mode - preview generated" }; + } + + // Step 4: Parse configuration + const safeOutputConfig = getSafeOutputConfig(configKey); + + // Parse allowed items (from env or config) + const allowedEnvValue = envVars.allowed ? process.env[envVars.allowed] : undefined; + const allowed = parseAllowedItems(allowedEnvValue) || safeOutputConfig.allowed; + if (allowed) { + core.info(`Allowed ${itemTypeName}s: ${JSON.stringify(allowed)}`); + } else { + core.info(`No ${itemTypeName} restrictions - any ${itemTypeName}s are allowed`); + } + + // Parse max count (env takes priority, then config) + const maxCountEnvValue = envVars.maxCount ? process.env[envVars.maxCount] : undefined; + const maxCountResult = validateMaxCount(maxCountEnvValue, safeOutputConfig.max); + if (!maxCountResult.valid) { + core.setFailed(maxCountResult.error); + return { success: false, reason: "Invalid max count configuration" }; + } + const maxCount = maxCountResult.value; + core.info(`Max count: ${maxCount}`); + + // Get target configuration + const target = envVars.target ? process.env[envVars.target] || "triggering" : "triggering"; + core.info(`${displayName} target configuration: ${target}`); + + // For multiple items, return early without target resolution + if (findMultiple) { + return { + success: true, + items: items, + config: { + allowed, + maxCount, + target, + }, + }; + } + + // Step 5: Resolve target (for single-item processors) + const item = items[0]; + const targetResult = resolveTarget({ + targetConfig: target, + item: item, + context, + itemType: itemTypeName, + // supportsPR in resolveTarget: true=both issue and PR contexts, false=PR-only + // If supportsIssue is true, we pass supportsPR=true to enable both contexts + supportsPR: supportsPR || supportsIssue, + }); + + if (!targetResult.success) { + if (targetResult.shouldFail) { + core.setFailed(targetResult.error); + } else { + core.info(targetResult.error); + } + return { success: false, reason: targetResult.error }; + } + + return { + success: true, + item: item, + config: { + allowed, + maxCount, + target, + }, + targetResult: { + number: targetResult.number, + contextType: targetResult.contextType, + }, + }; +} + +/** + * Get a description of item details for logging + * @param {any} item - The safe output item + * @returns {string|null} Description string or null + */ +function getItemDetails(item) { + if (item.labels && Array.isArray(item.labels)) { + return `${item.labels.length} labels`; + } + if (item.reviewers && Array.isArray(item.reviewers)) { + return `${item.reviewers.length} reviewers`; + } + return null; +} + +/** + * Sanitize and deduplicate an array of string items + * @param {any[]} items - Raw items array + * @returns {string[]} Sanitized and deduplicated array + */ +function sanitizeItems(items) { + return items + .filter(item => item != null && item !== false && item !== 0) + .map(item => String(item).trim()) + .filter(item => item) + .filter((item, index, arr) => arr.indexOf(item) === index); +} + +/** + * Filter items by allowed list + * @param {string[]} items - Items to filter + * @param {string[]|undefined} allowed - Allowed items list (undefined means all allowed) + * @returns {string[]} Filtered items + */ +function filterByAllowed(items, allowed) { + if (!allowed || allowed.length === 0) { + return items; + } + return items.filter(item => allowed.includes(item)); +} + +/** + * Limit items to max count + * @param {string[]} items - Items to limit + * @param {number} maxCount - Maximum number of items + * @returns {string[]} Limited items + */ +function limitToMaxCount(items, maxCount) { + if (items.length > maxCount) { + core.info(`Too many items (${items.length}), limiting to ${maxCount}`); + return items.slice(0, maxCount); + } + return items; +} + +/** + * Process items through the standard pipeline: filter by allowed, sanitize, dedupe, limit + * @param {any[]} rawItems - Raw items array from agent output + * @param {string[]|undefined} allowed - Allowed items list + * @param {number} maxCount - Maximum number of items + * @returns {string[]} Processed items + */ +function processItems(rawItems, allowed, maxCount) { + // Filter by allowed list first + const filtered = filterByAllowed(rawItems, allowed); + + // Sanitize and deduplicate + const sanitized = sanitizeItems(filtered); + + // Limit to max count + return limitToMaxCount(sanitized, maxCount); +} + +module.exports = { + processSafeOutput, + sanitizeItems, + filterByAllowed, + limitToMaxCount, + processItems, +}; diff --git a/pkg/workflow/js/safe_output_type_validator.cjs b/pkg/workflow/js/safe_output_type_validator.cjs new file mode 100644 index 0000000000..71d8ac7b96 --- /dev/null +++ b/pkg/workflow/js/safe_output_type_validator.cjs @@ -0,0 +1,568 @@ +// @ts-check +/// + +/** + * Safe Output Type Validator + * + * A data-driven validation engine for safe output types. + * Validation rules are loaded from GH_AW_VALIDATION_CONFIG environment variable, + * which is generated by the Go compiler from the single source of truth. + */ + +const { sanitizeContent } = require("./sanitize_content.cjs"); +const { isTemporaryId } = require("./temporary_id.cjs"); + +/** + * Default max body length for GitHub content + */ +const MAX_BODY_LENGTH = 65000; + +/** + * Maximum length for GitHub usernames + * Reference: https://github.com/dead-claudia/github-limits + */ +const MAX_GITHUB_USERNAME_LENGTH = 39; + +/** + * @typedef {Object} FieldValidation + * @property {boolean} [required] - Whether the field is required + * @property {string} [type] - Expected type: 'string', 'number', 'boolean', 'array' + * @property {boolean} [sanitize] - Whether to sanitize string content + * @property {number} [maxLength] - Maximum length for strings + * @property {boolean} [positiveInteger] - Must be a positive integer + * @property {boolean} [optionalPositiveInteger] - Optional but if present must be positive integer + * @property {boolean} [issueOrPRNumber] - Can be issue/PR number or undefined + * @property {boolean} [issueNumberOrTemporaryId] - Can be issue number or temporary ID + * @property {string[]} [enum] - Allowed values for the field + * @property {string} [itemType] - For arrays, the type of items + * @property {boolean} [itemSanitize] - For arrays, whether to sanitize items + * @property {number} [itemMaxLength] - For arrays, max length per item + * @property {string} [pattern] - Regex pattern the value must match + * @property {string} [patternError] - Error message for pattern mismatch + */ + +/** + * @typedef {Object} TypeValidationConfig + * @property {number} defaultMax - Default max count for this type + * @property {Object.} fields - Field validation rules + * @property {string} [customValidation] - Custom validation rule identifier + */ + +/** @type {Object.|null} */ +let cachedValidationConfig = null; + +/** + * Load validation configuration from environment variable + * @returns {Object.} + */ +function loadValidationConfig() { + if (cachedValidationConfig !== null) { + return cachedValidationConfig; + } + + const configJson = process.env.GH_AW_VALIDATION_CONFIG; + if (!configJson) { + // Return empty config if not provided - validation will be skipped + cachedValidationConfig = {}; + return cachedValidationConfig; + } + + try { + /** @type {Object.} */ + const parsed = JSON.parse(configJson); + cachedValidationConfig = parsed || {}; + return cachedValidationConfig; + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + // Log as error since missing validation config is critical + if (typeof core !== "undefined") { + core.error(`CRITICAL: Failed to parse validation config: ${errorMsg}. Validation will be skipped.`); + } + cachedValidationConfig = {}; + return cachedValidationConfig; + } +} + +/** + * Reset the cached validation config (for testing) + */ +function resetValidationConfigCache() { + cachedValidationConfig = null; +} + +/** + * Get the default max count for a type + * @param {string} itemType - The safe output type + * @param {Object} [config] - Configuration override from safe-outputs config + * @returns {number} The max allowed count + */ +function getMaxAllowedForType(itemType, config) { + const itemConfig = config?.[itemType]; + if (itemConfig && typeof itemConfig === "object" && "max" in itemConfig && itemConfig.max) { + return itemConfig.max; + } + const validationConfig = loadValidationConfig(); + const typeConfig = validationConfig[itemType]; + return typeConfig?.defaultMax ?? 1; +} + +/** + * Get the minimum required count for a type + * @param {string} itemType - The safe output type + * @param {Object} [config] - Configuration from safe-outputs config + * @returns {number} The minimum required count + */ +function getMinRequiredForType(itemType, config) { + const itemConfig = config?.[itemType]; + if (itemConfig && typeof itemConfig === "object" && "min" in itemConfig && itemConfig.min) { + return itemConfig.min; + } + return 0; +} + +/** + * Validate a positive integer field + * @param {any} value - Value to validate + * @param {string} fieldName - Field name for error messages + * @param {number} lineNum - Line number for error messages + * @returns {{isValid: boolean, normalizedValue?: number, error?: string}} + */ +function validatePositiveInteger(value, fieldName, lineNum) { + if (value === undefined || value === null) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} is required`, + }; + } + if (typeof value !== "number" && typeof value !== "string") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a number or string`, + }; + } + const parsed = typeof value === "string" ? parseInt(value, 10) : value; + if (isNaN(parsed) || parsed <= 0 || !Number.isInteger(parsed)) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a valid positive integer (got: ${value})`, + }; + } + return { isValid: true, normalizedValue: parsed }; +} + +/** + * Validate an optional positive integer field + * @param {any} value - Value to validate + * @param {string} fieldName - Field name for error messages + * @param {number} lineNum - Line number for error messages + * @returns {{isValid: boolean, normalizedValue?: number, error?: string}} + */ +function validateOptionalPositiveInteger(value, fieldName, lineNum) { + if (value === undefined) { + return { isValid: true }; + } + if (typeof value !== "number" && typeof value !== "string") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a number or string`, + }; + } + const parsed = typeof value === "string" ? parseInt(value, 10) : value; + if (isNaN(parsed) || parsed <= 0 || !Number.isInteger(parsed)) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a valid positive integer (got: ${value})`, + }; + } + return { isValid: true, normalizedValue: parsed }; +} + +/** + * Validate an issue/PR number field (optional, accepts number or string) + * @param {any} value - Value to validate + * @param {string} fieldName - Field name for error messages + * @param {number} lineNum - Line number for error messages + * @returns {{isValid: boolean, error?: string}} + */ +function validateIssueOrPRNumber(value, fieldName, lineNum) { + if (value === undefined) { + return { isValid: true }; + } + if (typeof value !== "number" && typeof value !== "string") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a number or string`, + }; + } + return { isValid: true }; +} + +/** + * Validate a value that can be either a positive integer (issue number) or a temporary ID. + * @param {any} value - The value to validate + * @param {string} fieldName - Name of the field for error messages + * @param {number} lineNum - Line number for error messages + * @returns {{isValid: boolean, normalizedValue?: number|string, isTemporary?: boolean, error?: string}} + */ +function validateIssueNumberOrTemporaryId(value, fieldName, lineNum) { + if (value === undefined || value === null) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} is required`, + }; + } + if (typeof value !== "number" && typeof value !== "string") { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a number or string`, + }; + } + // Check if it's a temporary ID + if (isTemporaryId(value)) { + return { isValid: true, normalizedValue: String(value).toLowerCase(), isTemporary: true }; + } + // Try to parse as positive integer + const parsed = typeof value === "string" ? parseInt(value, 10) : value; + if (isNaN(parsed) || parsed <= 0 || !Number.isInteger(parsed)) { + return { + isValid: false, + error: `Line ${lineNum}: ${fieldName} must be a positive integer or temporary ID (got: ${value})`, + }; + } + return { isValid: true, normalizedValue: parsed, isTemporary: false }; +} + +/** + * Validate a single field based on its validation configuration + * @param {any} value - The field value + * @param {string} fieldName - The field name + * @param {FieldValidation} validation - The validation configuration + * @param {string} itemType - The item type for error messages + * @param {number} lineNum - Line number for error messages + * @param {Object} [options] - Optional sanitization options + * @param {string[]} [options.allowedAliases] - List of allowed @mentions + * @returns {{isValid: boolean, normalizedValue?: any, error?: string}} + */ +function validateField(value, fieldName, validation, itemType, lineNum, options) { + // For positiveInteger fields, delegate required check to validatePositiveInteger + if (validation.positiveInteger) { + return validatePositiveInteger(value, `${itemType} '${fieldName}'`, lineNum); + } + + // For issueNumberOrTemporaryId fields, delegate required check to validateIssueNumberOrTemporaryId + if (validation.issueNumberOrTemporaryId) { + return validateIssueNumberOrTemporaryId(value, `${itemType} '${fieldName}'`, lineNum); + } + + // Handle required check for other fields + if (validation.required && (value === undefined || value === null)) { + const fieldType = validation.type || "string"; + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} requires a '${fieldName}' field (${fieldType})`, + }; + } + + // If not required and not present, skip other validations + if (value === undefined || value === null) { + return { isValid: true }; + } + + // Handle optionalPositiveInteger validation + if (validation.optionalPositiveInteger) { + return validateOptionalPositiveInteger(value, `${itemType} '${fieldName}'`, lineNum); + } + + // Handle issueOrPRNumber validation + if (validation.issueOrPRNumber) { + return validateIssueOrPRNumber(value, `${itemType} '${fieldName}'`, lineNum); + } + + // Handle type validation + if (validation.type === "string") { + if (typeof value !== "string") { + // For required fields, use "requires a" format for both missing and wrong type + if (validation.required) { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} requires a '${fieldName}' field (string)`, + }; + } + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} '${fieldName}' must be a string`, + }; + } + + // Handle pattern validation + if (validation.pattern) { + const regex = new RegExp(validation.pattern); + if (!regex.test(value.trim())) { + const errorMsg = validation.patternError || `must match pattern ${validation.pattern}`; + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} '${fieldName}' ${errorMsg}`, + }; + } + } + + // Handle enum validation + if (validation.enum) { + const normalizedValue = value.toLowerCase ? value.toLowerCase() : value; + const normalizedEnum = validation.enum.map(e => (e.toLowerCase ? e.toLowerCase() : e)); + if (!normalizedEnum.includes(normalizedValue)) { + // Use special format for 2-option enums: "'field' must be 'A' or 'B'" + // Use standard format for more options: "'field' must be one of: A, B, C" + let errorMsg; + if (validation.enum.length === 2) { + errorMsg = `Line ${lineNum}: ${itemType} '${fieldName}' must be '${validation.enum[0]}' or '${validation.enum[1]}'`; + } else { + errorMsg = `Line ${lineNum}: ${itemType} '${fieldName}' must be one of: ${validation.enum.join(", ")}`; + } + return { + isValid: false, + error: errorMsg, + }; + } + // Return the properly cased enum value if there's a case difference + const matchIndex = normalizedEnum.indexOf(normalizedValue); + let normalizedResult = validation.enum[matchIndex]; + // Apply sanitization if configured + if (validation.sanitize && validation.maxLength) { + normalizedResult = sanitizeContent(normalizedResult, { + maxLength: validation.maxLength, + allowedAliases: options?.allowedAliases || [], + }); + } + return { isValid: true, normalizedValue: normalizedResult }; + } + + // Handle sanitization + if (validation.sanitize) { + const sanitized = sanitizeContent(value, { + maxLength: validation.maxLength || MAX_BODY_LENGTH, + allowedAliases: options?.allowedAliases || [], + }); + return { isValid: true, normalizedValue: sanitized }; + } + + return { isValid: true, normalizedValue: value }; + } + + if (validation.type === "array") { + if (!Array.isArray(value)) { + // For required fields, use "requires a" format for both missing and wrong type + if (validation.required) { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} requires a '${fieldName}' field (array)`, + }; + } + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} '${fieldName}' must be an array`, + }; + } + + // Validate array items + if (validation.itemType === "string") { + const hasInvalidItem = value.some(item => typeof item !== "string"); + if (hasInvalidItem) { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} ${fieldName} array must contain only strings`, + }; + } + + // Sanitize items if configured + if (validation.itemSanitize) { + const sanitizedItems = value.map(item => + typeof item === "string" + ? sanitizeContent(item, { + maxLength: validation.itemMaxLength || 128, + allowedAliases: options?.allowedAliases || [], + }) + : item + ); + return { isValid: true, normalizedValue: sanitizedItems }; + } + } + + return { isValid: true, normalizedValue: value }; + } + + if (validation.type === "boolean") { + if (typeof value !== "boolean") { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} '${fieldName}' must be a boolean`, + }; + } + return { isValid: true, normalizedValue: value }; + } + + if (validation.type === "number") { + if (typeof value !== "number") { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} '${fieldName}' must be a number`, + }; + } + return { isValid: true, normalizedValue: value }; + } + + // No specific type validation, return as-is + return { isValid: true, normalizedValue: value }; +} + +/** + * Execute custom validation rules + * @param {Object} item - The item to validate + * @param {string} customValidation - The custom validation rule identifier + * @param {number} lineNum - Line number for error messages + * @param {string} itemType - The item type for error messages + * @returns {{isValid: boolean, error?: string}|null} + */ +function executeCustomValidation(item, customValidation, lineNum, itemType) { + if (!customValidation) { + return null; + } + + // Parse custom validation rule + if (customValidation.startsWith("requiresOneOf:")) { + const fields = customValidation.slice("requiresOneOf:".length).split(","); + const hasValidField = fields.some(field => item[field] !== undefined); + if (!hasValidField) { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} requires at least one of: ${fields.map(f => `'${f}'`).join(", ")} fields`, + }; + } + } + + if (customValidation === "startLineLessOrEqualLine") { + if (item.start_line !== undefined && item.line !== undefined) { + const startLine = typeof item.start_line === "string" ? parseInt(item.start_line, 10) : item.start_line; + const endLine = typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if (startLine > endLine) { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} 'start_line' must be less than or equal to 'line'`, + }; + } + } + } + + if (customValidation === "parentAndSubDifferent") { + // Normalize values for comparison + const normalizeValue = v => (typeof v === "string" ? v.toLowerCase() : v); + if (normalizeValue(item.parent_issue_number) === normalizeValue(item.sub_issue_number)) { + return { + isValid: false, + error: `Line ${lineNum}: ${itemType} 'parent_issue_number' and 'sub_issue_number' must be different`, + }; + } + } + + return null; +} + +/** + * Validate a safe output item against its type configuration + * @param {Object} item - The item to validate + * @param {string} itemType - The item type (e.g., "create_issue") + * @param {number} lineNum - Line number for error messages + * @param {Object} [options] - Optional sanitization options + * @param {string[]} [options.allowedAliases] - List of allowed @mentions + * @returns {{isValid: boolean, normalizedItem?: Object, error?: string}} + */ +function validateItem(item, itemType, lineNum, options) { + const validationConfig = loadValidationConfig(); + const typeConfig = validationConfig[itemType]; + + if (!typeConfig) { + // Unknown type - let the caller handle this + return { isValid: true, normalizedItem: item }; + } + + const normalizedItem = { ...item }; + const errors = []; + + // Run custom validation first if defined + if (typeConfig.customValidation) { + const customResult = executeCustomValidation(item, typeConfig.customValidation, lineNum, itemType); + if (customResult && !customResult.isValid) { + return customResult; + } + } + + // Validate each configured field + for (const [fieldName, validation] of Object.entries(typeConfig.fields)) { + const fieldValue = item[fieldName]; + const result = validateField(fieldValue, fieldName, validation, itemType, lineNum, options); + + if (!result.isValid) { + errors.push(result.error); + } else if (result.normalizedValue !== undefined) { + normalizedItem[fieldName] = result.normalizedValue; + } + } + + if (errors.length > 0) { + return { isValid: false, error: errors[0] }; // Return first error + } + + return { isValid: true, normalizedItem }; +} + +/** + * Check if a type has validation configuration + * @param {string} itemType - The item type + * @returns {boolean} + */ +function hasValidationConfig(itemType) { + const validationConfig = loadValidationConfig(); + return itemType in validationConfig; +} + +/** + * Get the validation configuration for a type + * @param {string} itemType - The item type + * @returns {TypeValidationConfig|undefined} + */ +function getValidationConfig(itemType) { + const validationConfig = loadValidationConfig(); + return validationConfig[itemType]; +} + +/** + * Get all known safe output types + * @returns {string[]} + */ +function getKnownTypes() { + const validationConfig = loadValidationConfig(); + return Object.keys(validationConfig); +} + +module.exports = { + // Main validation functions + validateItem, + validateField, + validatePositiveInteger, + validateOptionalPositiveInteger, + validateIssueOrPRNumber, + validateIssueNumberOrTemporaryId, + + // Configuration accessors + loadValidationConfig, + resetValidationConfigCache, + getMaxAllowedForType, + getMinRequiredForType, + hasValidationConfig, + getValidationConfig, + getKnownTypes, + + // Constants + MAX_BODY_LENGTH, + MAX_GITHUB_USERNAME_LENGTH, +}; diff --git a/pkg/workflow/js/safe_output_validator.cjs b/pkg/workflow/js/safe_output_validator.cjs new file mode 100644 index 0000000000..9ada5a5334 --- /dev/null +++ b/pkg/workflow/js/safe_output_validator.cjs @@ -0,0 +1,164 @@ +// @ts-check +/// + +const fs = require("fs"); +const { sanitizeLabelContent } = require("./sanitize_label_content.cjs"); + +/** + * Load and parse the safe outputs configuration from config.json + * @returns {object} The parsed configuration object + */ +function loadSafeOutputsConfig() { + const configPath = "/tmp/gh-aw/safeoutputs/config.json"; + try { + if (!fs.existsSync(configPath)) { + core.warning(`Config file not found at ${configPath}, using defaults`); + return {}; + } + const configContent = fs.readFileSync(configPath, "utf8"); + return JSON.parse(configContent); + } catch (error) { + core.warning(`Failed to load config: ${error instanceof Error ? error.message : String(error)}`); + return {}; + } +} + +/** + * Get configuration for a specific safe output type + * @param {string} outputType - The type of safe output (e.g., "add_labels", "update_issue") + * @returns {{max?: number, target?: string, allowed?: string[]}} The configuration for this output type + */ +function getSafeOutputConfig(outputType) { + const config = loadSafeOutputsConfig(); + return config[outputType] || {}; +} + +/** + * Validate and sanitize a title string + * @param {any} title - The title to validate + * @param {string} fieldName - The name of the field for error messages (default: "title") + * @returns {{valid: boolean, value?: string, error?: string}} Validation result + */ +function validateTitle(title, fieldName = "title") { + if (title === undefined || title === null) { + return { valid: false, error: `${fieldName} is required` }; + } + + if (typeof title !== "string") { + return { valid: false, error: `${fieldName} must be a string` }; + } + + const trimmed = title.trim(); + if (trimmed.length === 0) { + return { valid: false, error: `${fieldName} cannot be empty` }; + } + + return { valid: true, value: trimmed }; +} + +/** + * Validate and sanitize a body/content string + * @param {any} body - The body to validate + * @param {string} fieldName - The name of the field for error messages (default: "body") + * @param {boolean} required - Whether the body is required (default: false) + * @returns {{valid: boolean, value?: string, error?: string}} Validation result + */ +function validateBody(body, fieldName = "body", required = false) { + if (body === undefined || body === null) { + if (required) { + return { valid: false, error: `${fieldName} is required` }; + } + return { valid: true, value: "" }; + } + + if (typeof body !== "string") { + return { valid: false, error: `${fieldName} must be a string` }; + } + + return { valid: true, value: body }; +} + +/** + * Validate and sanitize an array of labels + * @param {any} labels - The labels to validate + * @param {string[]|undefined} allowedLabels - Optional list of allowed labels + * @param {number} maxCount - Maximum number of labels allowed + * @returns {{valid: boolean, value?: string[], error?: string}} Validation result + */ +function validateLabels(labels, allowedLabels = undefined, maxCount = 3) { + if (!labels || !Array.isArray(labels)) { + return { valid: false, error: "labels must be an array" }; + } + + // Check for removal attempts (labels starting with '-') + for (const label of labels) { + if (label && typeof label === "string" && label.startsWith("-")) { + return { valid: false, error: `Label removal is not permitted. Found line starting with '-': ${label}` }; + } + } + + // Filter labels based on allowed list if provided + let validLabels = labels; + if (allowedLabels && allowedLabels.length > 0) { + validLabels = labels.filter(label => allowedLabels.includes(label)); + } + + // Sanitize and deduplicate labels + const uniqueLabels = validLabels + .filter(label => label != null && label !== false && label !== 0) + .map(label => String(label).trim()) + .filter(label => label) + .map(label => sanitizeLabelContent(label)) + .filter(label => label) + .map(label => (label.length > 64 ? label.substring(0, 64) : label)) + .filter((label, index, arr) => arr.indexOf(label) === index); + + // Apply max count limit + if (uniqueLabels.length > maxCount) { + core.info(`Too many labels (${uniqueLabels.length}), limiting to ${maxCount}`); + return { valid: true, value: uniqueLabels.slice(0, maxCount) }; + } + + if (uniqueLabels.length === 0) { + return { valid: false, error: "No valid labels found after sanitization" }; + } + + return { valid: true, value: uniqueLabels }; +} + +/** + * Validate max count from environment variable with config fallback + * @param {string|undefined} envValue - Environment variable value + * @param {number|undefined} configDefault - Default from config.json + * @param {number} [fallbackDefault] - Fallback default for testing (optional, defaults to 1) + * @returns {{valid: true, value: number} | {valid: false, error: string}} Validation result + */ +function validateMaxCount(envValue, configDefault, fallbackDefault = 1) { + // Priority: env var > config.json > fallback default + // In production, config.json should always have the default + // Fallback is provided for backward compatibility and testing + const defaultValue = configDefault !== undefined ? configDefault : fallbackDefault; + + if (!envValue) { + return { valid: true, value: defaultValue }; + } + + const parsed = parseInt(envValue, 10); + if (isNaN(parsed) || parsed < 1) { + return { + valid: false, + error: `Invalid max value: ${envValue}. Must be a positive integer`, + }; + } + + return { valid: true, value: parsed }; +} + +module.exports = { + loadSafeOutputsConfig, + getSafeOutputConfig, + validateTitle, + validateBody, + validateLabels, + validateMaxCount, +}; diff --git a/pkg/workflow/js/safe_outputs_append.cjs b/pkg/workflow/js/safe_outputs_append.cjs new file mode 100644 index 0000000000..7561ac2995 --- /dev/null +++ b/pkg/workflow/js/safe_outputs_append.cjs @@ -0,0 +1,35 @@ +// @ts-check + +const fs = require("fs"); + +/** + * Create an append function for the safe outputs file + * @param {string} outputFile - Path to the output file + * @returns {Function} A function that appends entries to the safe outputs file + */ +function createAppendFunction(outputFile) { + /** + * Append an entry to the safe outputs file + * + * CRITICAL: The output file is in JSONL (JSON Lines) format where each entry + * MUST be a single line. JSON.stringify must be called WITHOUT formatting + * parameters (no indentation, no pretty-printing) to ensure one JSON object per line. + * + * @param {Object} entry - The entry to append + */ + return function appendSafeOutput(entry) { + if (!outputFile) throw new Error("No output file configured"); + // Normalize type to use underscores (convert any dashes to underscores) + entry.type = entry.type.replace(/-/g, "_"); + // CRITICAL: Use JSON.stringify WITHOUT formatting parameters for JSONL format + // Each entry must be on a single line, followed by a newline character + const jsonLine = JSON.stringify(entry) + "\n"; + try { + fs.appendFileSync(outputFile, jsonLine); + } catch (error) { + throw new Error(`Failed to write to output file: ${error instanceof Error ? error.message : String(error)}`); + } + }; +} + +module.exports = { createAppendFunction }; diff --git a/pkg/workflow/js/safe_outputs_bootstrap.cjs b/pkg/workflow/js/safe_outputs_bootstrap.cjs new file mode 100644 index 0000000000..68b63569e5 --- /dev/null +++ b/pkg/workflow/js/safe_outputs_bootstrap.cjs @@ -0,0 +1,74 @@ +// @ts-check + +/** + * Safe Outputs Bootstrap Module + * + * This module provides shared bootstrap logic for safe-outputs MCP server. + * It handles configuration loading, tools loading, and cleanup that is + * common initialization logic. + * + * Usage: + * const { bootstrapSafeOutputsServer } = require("./safe_outputs_bootstrap.cjs"); + * const { config, outputFile, tools } = bootstrapSafeOutputsServer(server); + */ + +const fs = require("fs"); +const { loadConfig } = require("./safe_outputs_config.cjs"); +const { loadTools } = require("./safe_outputs_tools_loader.cjs"); + +/** + * @typedef {Object} Logger + * @property {Function} debug - Debug logging function + * @property {Function} debugError - Error logging function + */ + +/** + * @typedef {Object} BootstrapResult + * @property {Object} config - Loaded configuration + * @property {string} outputFile - Path to the output file + * @property {Array} tools - Loaded tool definitions + */ + +/** + * Bootstrap a safe-outputs server by loading configuration and tools. + * This function performs the common initialization steps. + * + * @param {Logger} logger - Logger instance for debug messages + * @returns {BootstrapResult} Configuration, output file path, and loaded tools + */ +function bootstrapSafeOutputsServer(logger) { + // Load configuration + logger.debug("Loading safe-outputs configuration"); + const { config, outputFile } = loadConfig(logger); + + // Load tools + logger.debug("Loading safe-outputs tools"); + const tools = loadTools(logger); + + return { config, outputFile, tools }; +} + +/** + * Delete the configuration file to ensure no secrets remain on disk. + * This should be called after the server has been configured and started. + * + * @param {Logger} logger - Logger instance for debug messages + */ +function cleanupConfigFile(logger) { + const configPath = process.env.GH_AW_SAFE_OUTPUTS_CONFIG_PATH || "/tmp/gh-aw/safeoutputs/config.json"; + + try { + if (fs.existsSync(configPath)) { + fs.unlinkSync(configPath); + logger.debug(`Deleted configuration file: ${configPath}`); + } + } catch (error) { + logger.debugError("Warning: Could not delete configuration file: ", error); + // Continue anyway - the server is already running + } +} + +module.exports = { + bootstrapSafeOutputsServer, + cleanupConfigFile, +}; diff --git a/pkg/workflow/js/safe_outputs_config.cjs b/pkg/workflow/js/safe_outputs_config.cjs new file mode 100644 index 0000000000..debc341042 --- /dev/null +++ b/pkg/workflow/js/safe_outputs_config.cjs @@ -0,0 +1,59 @@ +// @ts-check + +const fs = require("fs"); +const path = require("path"); + +/** + * Load and process safe outputs configuration + * @param {Object} server - The MCP server instance for logging + * @returns {Object} An object containing the processed config and output file path + */ +function loadConfig(server) { + // Read configuration from file + const configPath = process.env.GH_AW_SAFE_OUTPUTS_CONFIG_PATH || "/tmp/gh-aw/safeoutputs/config.json"; + let safeOutputsConfigRaw; + + server.debug(`Reading config from file: ${configPath}`); + + try { + if (fs.existsSync(configPath)) { + server.debug(`Config file exists at: ${configPath}`); + const configFileContent = fs.readFileSync(configPath, "utf8"); + server.debug(`Config file content length: ${configFileContent.length} characters`); + // Don't log raw content to avoid exposing sensitive configuration data + server.debug(`Config file read successfully, attempting to parse JSON`); + safeOutputsConfigRaw = JSON.parse(configFileContent); + server.debug(`Successfully parsed config from file with ${Object.keys(safeOutputsConfigRaw).length} configuration keys`); + } else { + server.debug(`Config file does not exist at: ${configPath}`); + server.debug(`Using minimal default configuration`); + safeOutputsConfigRaw = {}; + } + } catch (error) { + server.debug(`Error reading config file: ${error instanceof Error ? error.message : String(error)}`); + server.debug(`Falling back to empty configuration`); + safeOutputsConfigRaw = {}; + } + + const safeOutputsConfig = Object.fromEntries(Object.entries(safeOutputsConfigRaw).map(([k, v]) => [k.replace(/-/g, "_"), v])); + server.debug(`Final processed config: ${JSON.stringify(safeOutputsConfig)}`); + + // Handle GH_AW_SAFE_OUTPUTS with default fallback + const outputFile = process.env.GH_AW_SAFE_OUTPUTS || "/tmp/gh-aw/safeoutputs/outputs.jsonl"; + if (!process.env.GH_AW_SAFE_OUTPUTS) { + server.debug(`GH_AW_SAFE_OUTPUTS not set, using default: ${outputFile}`); + } + // Always ensure the directory exists, regardless of whether env var is set + const outputDir = path.dirname(outputFile); + if (!fs.existsSync(outputDir)) { + server.debug(`Creating output directory: ${outputDir}`); + fs.mkdirSync(outputDir, { recursive: true }); + } + + return { + config: safeOutputsConfig, + outputFile: outputFile, + }; +} + +module.exports = { loadConfig }; diff --git a/pkg/workflow/js/safe_outputs_handlers.cjs b/pkg/workflow/js/safe_outputs_handlers.cjs new file mode 100644 index 0000000000..60c7569222 --- /dev/null +++ b/pkg/workflow/js/safe_outputs_handlers.cjs @@ -0,0 +1,322 @@ +// @ts-check + +const fs = require("fs"); +const path = require("path"); +const crypto = require("crypto"); + +const { normalizeBranchName } = require("./normalize_branch_name.cjs"); +const { estimateTokens } = require("./estimate_tokens.cjs"); +const { writeLargeContentToFile } = require("./write_large_content_to_file.cjs"); +const { getCurrentBranch } = require("./get_current_branch.cjs"); +const { getBaseBranch } = require("./get_base_branch.cjs"); +const { generateGitPatch } = require("./generate_git_patch.cjs"); + +/** + * Create handlers for safe output tools + * @param {Object} server - The MCP server instance for logging + * @param {Function} appendSafeOutput - Function to append entries to the output file + * @param {Object} [config] - Optional configuration object with safe output settings + * @returns {Object} An object containing all handler functions + */ +function createHandlers(server, appendSafeOutput, config = {}) { + /** + * Default handler for safe output tools + * @param {string} type - The tool type + * @returns {Function} Handler function + */ + const defaultHandler = type => args => { + const entry = { ...(args || {}), type }; + + // Check if any field in the entry has content exceeding 16000 tokens + let largeContent = null; + let largeFieldName = null; + const TOKEN_THRESHOLD = 16000; + + for (const [key, value] of Object.entries(entry)) { + if (typeof value === "string") { + const tokens = estimateTokens(value); + if (tokens > TOKEN_THRESHOLD) { + largeContent = value; + largeFieldName = key; + server.debug(`Field '${key}' has ${tokens} tokens (exceeds ${TOKEN_THRESHOLD})`); + break; + } + } + } + + if (largeContent && largeFieldName) { + // Write large content to file + const fileInfo = writeLargeContentToFile(largeContent); + + // Replace large field with file reference + entry[largeFieldName] = `[Content too large, saved to file: ${fileInfo.filename}]`; + + // Append modified entry to safe outputs + appendSafeOutput(entry); + + // Return file info to the agent + return { + content: [ + { + type: "text", + text: JSON.stringify(fileInfo), + }, + ], + }; + } + + // Normal case - no large content + appendSafeOutput(entry); + return { + content: [ + { + type: "text", + text: JSON.stringify({ result: "success" }), + }, + ], + }; + }; + + /** + * Handler for upload_asset tool + */ + const uploadAssetHandler = args => { + const branchName = process.env.GH_AW_ASSETS_BRANCH; + if (!branchName) throw new Error("GH_AW_ASSETS_BRANCH not set"); + + // Normalize the branch name to ensure it's a valid git branch name + const normalizedBranchName = normalizeBranchName(branchName); + + const { path: filePath } = args; + + // Validate file path is within allowed directories + const absolutePath = path.resolve(filePath); + const workspaceDir = process.env.GITHUB_WORKSPACE || process.cwd(); + const tmpDir = "/tmp"; + + const isInWorkspace = absolutePath.startsWith(path.resolve(workspaceDir)); + const isInTmp = absolutePath.startsWith(tmpDir); + + if (!isInWorkspace && !isInTmp) { + throw new Error(`File path must be within workspace directory (${workspaceDir}) or /tmp directory. ` + `Provided path: ${filePath} (resolved to: ${absolutePath})`); + } + + // Validate file exists + if (!fs.existsSync(filePath)) { + throw new Error(`File not found: ${filePath}`); + } + + // Get file stats + const stats = fs.statSync(filePath); + const sizeBytes = stats.size; + const sizeKB = Math.ceil(sizeBytes / 1024); + + // Check file size - read from environment variable if available + const maxSizeKB = process.env.GH_AW_ASSETS_MAX_SIZE_KB ? parseInt(process.env.GH_AW_ASSETS_MAX_SIZE_KB, 10) : 10240; // Default 10MB + if (sizeKB > maxSizeKB) { + throw new Error(`File size ${sizeKB} KB exceeds maximum allowed size ${maxSizeKB} KB`); + } + + // Check file extension - read from environment variable if available + const ext = path.extname(filePath).toLowerCase(); + const allowedExts = process.env.GH_AW_ASSETS_ALLOWED_EXTS + ? process.env.GH_AW_ASSETS_ALLOWED_EXTS.split(",").map(ext => ext.trim()) + : [ + // Default set as specified in problem statement + ".png", + ".jpg", + ".jpeg", + ]; + + if (!allowedExts.includes(ext)) { + throw new Error(`File extension '${ext}' is not allowed. Allowed extensions: ${allowedExts.join(", ")}`); + } + + // Create assets directory + const assetsDir = "/tmp/gh-aw/safeoutputs/assets"; + if (!fs.existsSync(assetsDir)) { + fs.mkdirSync(assetsDir, { recursive: true }); + } + + // Read file and compute hash + const fileContent = fs.readFileSync(filePath); + const sha = crypto.createHash("sha256").update(fileContent).digest("hex"); + + // Extract filename and extension + const fileName = path.basename(filePath); + const fileExt = path.extname(fileName).toLowerCase(); + + // Copy file to assets directory with original name + const targetPath = path.join(assetsDir, fileName); + fs.copyFileSync(filePath, targetPath); + + // Generate target filename as sha + extension (lowercased) + const targetFileName = (sha + fileExt).toLowerCase(); + + const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com"; + const repo = process.env.GITHUB_REPOSITORY || "owner/repo"; + const url = `${githubServer.replace("github.com", "raw.githubusercontent.com")}/${repo}/${normalizedBranchName}/${targetFileName}`; + + // Create entry for safe outputs + const entry = { + type: "upload_asset", + path: filePath, + fileName: fileName, + sha: sha, + size: sizeBytes, + url: url, + targetFileName: targetFileName, + }; + + appendSafeOutput(entry); + + return { + content: [ + { + type: "text", + text: JSON.stringify({ result: url }), + }, + ], + }; + }; + + /** + * Handler for create_pull_request tool + * Resolves the current branch if branch is not provided or is the base branch + * Generates git patch for the changes (unless allow-empty is true) + */ + const createPullRequestHandler = args => { + const entry = { ...args, type: "create_pull_request" }; + const baseBranch = getBaseBranch(); + + // If branch is not provided, is empty, or equals the base branch, use the current branch from git + // This handles cases where the agent incorrectly passes the base branch instead of the working branch + if (!entry.branch || entry.branch.trim() === "" || entry.branch === baseBranch) { + const detectedBranch = getCurrentBranch(); + + if (entry.branch === baseBranch) { + server.debug(`Branch equals base branch (${baseBranch}), detecting actual working branch: ${detectedBranch}`); + } else { + server.debug(`Using current branch for create_pull_request: ${detectedBranch}`); + } + + entry.branch = detectedBranch; + } + + // Check if allow-empty is enabled in configuration + const allowEmpty = config.create_pull_request?.allow_empty === true; + + if (allowEmpty) { + server.debug(`allow-empty is enabled for create_pull_request - skipping patch generation`); + // Append the safe output entry without generating a patch + appendSafeOutput(entry); + return { + content: [ + { + type: "text", + text: JSON.stringify({ + result: "success", + message: "Pull request prepared (allow-empty mode - no patch generated)", + branch: entry.branch, + }), + }, + ], + }; + } + + // Generate git patch + server.debug(`Generating patch for create_pull_request with branch: ${entry.branch}`); + const patchResult = generateGitPatch(entry.branch); + + if (!patchResult.success) { + // Patch generation failed or patch is empty + const errorMsg = patchResult.error || "Failed to generate patch"; + server.debug(`Patch generation failed: ${errorMsg}`); + throw new Error(errorMsg); + } + + // prettier-ignore + server.debug(`Patch generated successfully: ${patchResult.patchPath} (${patchResult.patchSize} bytes, ${patchResult.patchLines} lines)`); + + appendSafeOutput(entry); + return { + content: [ + { + type: "text", + text: JSON.stringify({ + result: "success", + patch: { + path: patchResult.patchPath, + size: patchResult.patchSize, + lines: patchResult.patchLines, + }, + }), + }, + ], + }; + }; + + /** + * Handler for push_to_pull_request_branch tool + * Resolves the current branch if branch is not provided or is the base branch + * Generates git patch for the changes + */ + const pushToPullRequestBranchHandler = args => { + const entry = { ...args, type: "push_to_pull_request_branch" }; + const baseBranch = getBaseBranch(); + + // If branch is not provided, is empty, or equals the base branch, use the current branch from git + // This handles cases where the agent incorrectly passes the base branch instead of the working branch + if (!entry.branch || entry.branch.trim() === "" || entry.branch === baseBranch) { + const detectedBranch = getCurrentBranch(); + + if (entry.branch === baseBranch) { + server.debug(`Branch equals base branch (${baseBranch}), detecting actual working branch: ${detectedBranch}`); + } else { + server.debug(`Using current branch for push_to_pull_request_branch: ${detectedBranch}`); + } + + entry.branch = detectedBranch; + } + + // Generate git patch + server.debug(`Generating patch for push_to_pull_request_branch with branch: ${entry.branch}`); + const patchResult = generateGitPatch(entry.branch); + + if (!patchResult.success) { + // Patch generation failed or patch is empty + const errorMsg = patchResult.error || "Failed to generate patch"; + server.debug(`Patch generation failed: ${errorMsg}`); + throw new Error(errorMsg); + } + + // prettier-ignore + server.debug(`Patch generated successfully: ${patchResult.patchPath} (${patchResult.patchSize} bytes, ${patchResult.patchLines} lines)`); + + appendSafeOutput(entry); + return { + content: [ + { + type: "text", + text: JSON.stringify({ + result: "success", + patch: { + path: patchResult.patchPath, + size: patchResult.patchSize, + lines: patchResult.patchLines, + }, + }), + }, + ], + }; + }; + + return { + defaultHandler, + uploadAssetHandler, + createPullRequestHandler, + pushToPullRequestBranchHandler, + }; +} + +module.exports = { createHandlers }; diff --git a/pkg/workflow/js/safe_outputs_mcp_server.cjs b/pkg/workflow/js/safe_outputs_mcp_server.cjs new file mode 100644 index 0000000000..0abd29c73a --- /dev/null +++ b/pkg/workflow/js/safe_outputs_mcp_server.cjs @@ -0,0 +1,80 @@ +// @ts-check + +// Safe Outputs MCP Server Module +// +// This module provides a reusable MCP server for safe-outputs configuration. +// It uses the mcp_server_core module for JSON-RPC handling and tool registration. +// +// Usage: +// node safe_outputs_mcp_server.cjs +// +// Or as a module: +// const server = require("./safe_outputs_mcp_server.cjs"); +// server.startSafeOutputsServer(); + +const { createServer, registerTool, normalizeTool, start } = require("./mcp_server_core.cjs"); +const { createAppendFunction } = require("./safe_outputs_append.cjs"); +const { createHandlers } = require("./safe_outputs_handlers.cjs"); +const { attachHandlers, registerPredefinedTools, registerDynamicTools } = require("./safe_outputs_tools_loader.cjs"); +const { bootstrapSafeOutputsServer, cleanupConfigFile } = require("./safe_outputs_bootstrap.cjs"); + +/** + * Start the safe-outputs MCP server + * @param {Object} [options] - Additional options + * @param {string} [options.logDir] - Override log directory + * @param {boolean} [options.skipCleanup] - Skip deletion of config file (useful for testing) + */ +function startSafeOutputsServer(options = {}) { + // Server info for safe outputs MCP server + const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; + + // Create the server instance with optional log directory + const MCP_LOG_DIR = options.logDir || process.env.GH_AW_MCP_LOG_DIR; + const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); + + // Bootstrap: load configuration and tools using shared logic + const { config: safeOutputsConfig, outputFile, tools: ALL_TOOLS } = bootstrapSafeOutputsServer(server); + + // Create append function + const appendSafeOutput = createAppendFunction(outputFile); + + // Create handlers with configuration + const handlers = createHandlers(server, appendSafeOutput, safeOutputsConfig); + const { defaultHandler } = handlers; + + // Attach handlers to tools + const toolsWithHandlers = attachHandlers(ALL_TOOLS, handlers); + + server.debug(` output file: ${outputFile}`); + server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); + + // Register predefined tools that are enabled in configuration + registerPredefinedTools(server, toolsWithHandlers, safeOutputsConfig, registerTool, normalizeTool); + + // Add safe-jobs as dynamic tools + registerDynamicTools(server, toolsWithHandlers, safeOutputsConfig, outputFile, registerTool, normalizeTool); + + server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); + if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); + + // Note: We do NOT cleanup the config file here because it's needed by the ingestion + // phase (collect_ndjson_output.cjs) that runs after the MCP server completes. + // The config file only contains schema information (no secrets), so it's safe to leave. + + // Start the server with the default handler + start(server, { defaultHandler }); +} + +// If run directly, start the server +if (require.main === module) { + try { + startSafeOutputsServer(); + } catch (error) { + console.error(`Error starting safe-outputs server: ${error instanceof Error ? error.message : String(error)}`); + process.exit(1); + } +} + +module.exports = { + startSafeOutputsServer, +}; diff --git a/pkg/workflow/js/safe_outputs_tools_loader.cjs b/pkg/workflow/js/safe_outputs_tools_loader.cjs new file mode 100644 index 0000000000..2e9edb80f2 --- /dev/null +++ b/pkg/workflow/js/safe_outputs_tools_loader.cjs @@ -0,0 +1,164 @@ +// @ts-check + +const fs = require("fs"); + +/** + * Load tools from tools.json file + * @param {Object} server - The MCP server instance for logging + * @returns {Array} Array of tool definitions + */ +function loadTools(server) { + const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; + + server.debug(`Reading tools from file: ${toolsPath}`); + + if (!fs.existsSync(toolsPath)) { + server.debug(`Tools file does not exist at: ${toolsPath}`); + server.debug(`Using empty tools array`); + return []; + } + + try { + server.debug(`Tools file exists at: ${toolsPath}`); + const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); + server.debug(`Tools file content length: ${toolsFileContent.length} characters`); + server.debug(`Tools file read successfully, attempting to parse JSON`); + const tools = JSON.parse(toolsFileContent); + server.debug(`Successfully parsed ${tools.length} tools from file`); + return tools; + } catch (error) { + server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); + server.debug(`Falling back to empty tools array`); + return []; + } +} + +/** + * Attach handlers to tools + * @param {Array} tools - Array of tool definitions + * @param {Object} handlers - Object containing handler functions + * @returns {Array} Tools with handlers attached + */ +function attachHandlers(tools, handlers) { + const handlerMap = { + create_pull_request: handlers.createPullRequestHandler, + push_to_pull_request_branch: handlers.pushToPullRequestBranchHandler, + upload_asset: handlers.uploadAssetHandler, + }; + + tools.forEach(tool => { + const handler = handlerMap[tool.name]; + if (handler) { + tool.handler = handler; + } + }); + + return tools; +} + +/** + * Register predefined tools based on configuration + * @param {Object} server - The MCP server instance + * @param {Array} tools - Array of tool definitions + * @param {Object} config - Safe outputs configuration + * @param {Function} registerTool - Function to register a tool + * @param {Function} normalizeTool - Function to normalize tool names + */ +function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { + tools.forEach(tool => { + if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { + registerTool(server, tool); + } + }); +} + +/** + * Register dynamic safe-job tools based on configuration + * @param {Object} server - The MCP server instance + * @param {Array} tools - Array of predefined tool definitions + * @param {Object} config - Safe outputs configuration + * @param {string} outputFile - Path to the output file + * @param {Function} registerTool - Function to register a tool + * @param {Function} normalizeTool - Function to normalize tool names + */ +function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { + Object.keys(config).forEach(configKey => { + const normalizedKey = normalizeTool(configKey); + + // Skip if it's already a predefined tool + if (server.tools[normalizedKey] || tools.find(t => t.name === normalizedKey)) { + return; + } + + const jobConfig = config[configKey]; + + // Create a dynamic tool for this safe-job + const dynamicTool = { + name: normalizedKey, + description: jobConfig?.description ?? `Custom safe-job: ${configKey}`, + inputSchema: { + type: "object", + properties: {}, + additionalProperties: true, // Allow any properties for flexibility + }, + handler: args => { + // Create a generic safe-job output entry + const entry = { type: normalizedKey, ...args }; + + // Write the entry to the output file in JSONL format + // CRITICAL: Use JSON.stringify WITHOUT formatting parameters for JSONL format + // Each entry must be on a single line, followed by a newline character + fs.appendFileSync(outputFile, `${JSON.stringify(entry)}\n`); + + // Use output from safe-job config if available + const outputText = jobConfig?.output ?? `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; + + return { + content: [{ type: "text", text: JSON.stringify({ result: outputText }) }], + }; + }, + }; + + // Add input schema based on job configuration if available + if (jobConfig?.inputs) { + dynamicTool.inputSchema.properties = {}; + dynamicTool.inputSchema.required = []; + + Object.keys(jobConfig.inputs).forEach(inputName => { + const inputDef = jobConfig.inputs[inputName]; + + // Convert GitHub Actions choice type to JSON Schema string type + // GitHub Actions uses "choice" type with "options" array + // JSON Schema requires "string" type with "enum" array + let jsonSchemaType = inputDef.type || "string"; + if (jsonSchemaType === "choice") { + jsonSchemaType = "string"; + } + + const propSchema = { + type: jsonSchemaType, + description: inputDef.description || `Input parameter: ${inputName}`, + }; + + if (Array.isArray(inputDef.options)) { + propSchema.enum = inputDef.options; + } + + dynamicTool.inputSchema.properties[inputName] = propSchema; + + if (inputDef.required) { + dynamicTool.inputSchema.required.push(inputName); + } + }); + } + + registerTool(server, dynamicTool); + }); +} + +module.exports = { + loadTools, + attachHandlers, + registerPredefinedTools, + registerDynamicTools, +}; diff --git a/pkg/workflow/js/sanitize_content.cjs b/pkg/workflow/js/sanitize_content.cjs new file mode 100644 index 0000000000..635ec6e4b6 --- /dev/null +++ b/pkg/workflow/js/sanitize_content.cjs @@ -0,0 +1,117 @@ +// @ts-check +/** + * Full sanitization utilities with mention filtering support + * This module provides the complete sanitization with selective mention filtering. + * For incoming text that doesn't need mention filtering, use sanitize_incoming_text.cjs instead. + */ + +const { + sanitizeContentCore, + getRedactedDomains, + clearRedactedDomains, + writeRedactedDomainsLog, + buildAllowedDomains, + sanitizeUrlProtocols, + sanitizeUrlDomains, + neutralizeCommands, + removeXmlComments, + convertXmlTags, + neutralizeBotTriggers, + applyTruncation, +} = require("./sanitize_content_core.cjs"); + +/** + * @typedef {Object} SanitizeOptions + * @property {number} [maxLength] - Maximum length of content (default: 524288) + * @property {string[]} [allowedAliases] - List of aliases (@mentions) that should not be neutralized + */ + +/** + * Sanitizes content for safe output in GitHub Actions with optional mention filtering + * @param {string} content - The content to sanitize + * @param {number | SanitizeOptions} [maxLengthOrOptions] - Maximum length of content (default: 524288) or options object + * @returns {string} The sanitized content + */ +function sanitizeContent(content, maxLengthOrOptions) { + // Handle both old signature (maxLength) and new signature (options object) + /** @type {number | undefined} */ + let maxLength; + /** @type {string[]} */ + let allowedAliasesLowercase = []; + + if (typeof maxLengthOrOptions === "number") { + maxLength = maxLengthOrOptions; + } else if (maxLengthOrOptions && typeof maxLengthOrOptions === "object") { + maxLength = maxLengthOrOptions.maxLength; + // Pre-process allowed aliases to lowercase for efficient comparison + allowedAliasesLowercase = (maxLengthOrOptions.allowedAliases || []).map(alias => alias.toLowerCase()); + } + + // If no allowed aliases specified, use core sanitization (which neutralizes all mentions) + if (allowedAliasesLowercase.length === 0) { + return sanitizeContentCore(content, maxLength); + } + + // If allowed aliases are specified, we need custom mention filtering + // We'll apply the same sanitization pipeline but with selective mention filtering + + if (!content || typeof content !== "string") { + return ""; + } + + // Build list of allowed domains (shared with core) + const allowedDomains = buildAllowedDomains(); + + let sanitized = content; + + // Remove ANSI escape sequences and control characters early + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); + + // Neutralize commands at the start of text + sanitized = neutralizeCommands(sanitized); + + // Neutralize @mentions with selective filtering (custom logic for allowed aliases) + sanitized = neutralizeMentions(sanitized, allowedAliasesLowercase); + + // Remove XML comments + sanitized = removeXmlComments(sanitized); + + // Convert XML tags + sanitized = convertXmlTags(sanitized); + + // URI filtering (shared with core) + sanitized = sanitizeUrlProtocols(sanitized); + sanitized = sanitizeUrlDomains(sanitized, allowedDomains); + + // Apply truncation limits (shared with core) + sanitized = applyTruncation(sanitized, maxLength); + + // Neutralize bot triggers + sanitized = neutralizeBotTriggers(sanitized); + + return sanitized.trim(); + + /** + * Neutralize @mentions with selective filtering + * @param {string} s - The string to process + * @param {string[]} allowedLowercase - List of allowed aliases (lowercase) + * @returns {string} Processed string + */ + function neutralizeMentions(s, allowedLowercase) { + return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, (_m, p1, p2) => { + // Check if this mention is in the allowed aliases list (case-insensitive) + const isAllowed = allowedLowercase.includes(p2.toLowerCase()); + if (isAllowed) { + return `${p1}@${p2}`; // Keep the original mention + } + // Log when a mention is escaped + if (typeof core !== "undefined" && core.info) { + core.info(`Escaped mention: @${p2} (not in allowed list)`); + } + return `${p1}\`@${p2}\``; // Neutralize the mention + }); + } +} + +module.exports = { sanitizeContent, getRedactedDomains, clearRedactedDomains, writeRedactedDomainsLog }; diff --git a/pkg/workflow/js/sanitize_content_core.cjs b/pkg/workflow/js/sanitize_content_core.cjs new file mode 100644 index 0000000000..ad24f1fa2b --- /dev/null +++ b/pkg/workflow/js/sanitize_content_core.cjs @@ -0,0 +1,431 @@ +// @ts-check +/** + * Core sanitization utilities without mention filtering + * This module provides the base sanitization functions that don't require + * mention resolution or filtering. It's designed to be imported by both + * sanitize_content.cjs (full version) and sanitize_incoming_text.cjs (minimal version). + */ + +/** + * Module-level set to collect redacted URL domains across sanitization calls. + * @type {string[]} + */ +const redactedDomains = []; + +/** + * Gets the list of redacted URL domains collected during sanitization. + * @returns {string[]} Array of redacted domain strings + */ +function getRedactedDomains() { + return [...redactedDomains]; +} + +/** + * Adds a domain to the redacted domains list + * @param {string} domain - Domain to add + */ +function addRedactedDomain(domain) { + redactedDomains.push(domain); +} + +/** + * Clears the list of redacted URL domains. + * Useful for testing or resetting state between operations. + */ +function clearRedactedDomains() { + redactedDomains.length = 0; +} + +/** + * Writes the collected redacted URL domains to a log file. + * Only creates the file if there are redacted domains. + * @param {string} [filePath] - Path to write the log file. Defaults to /tmp/gh-aw/redacted-urls.log + * @returns {string|null} The file path if written, null if no domains to write + */ +function writeRedactedDomainsLog(filePath) { + if (redactedDomains.length === 0) { + return null; + } + + const fs = require("fs"); + const path = require("path"); + const targetPath = filePath || "/tmp/gh-aw/redacted-urls.log"; + + // Ensure directory exists + const dir = path.dirname(targetPath); + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } + + // Write domains to file, one per line + fs.writeFileSync(targetPath, redactedDomains.join("\n") + "\n"); + + return targetPath; +} + +/** + * Extract domains from a URL and return an array of domain variations + * @param {string} url - The URL to extract domains from + * @returns {string[]} Array of domain variations + */ +function extractDomainsFromUrl(url) { + if (!url || typeof url !== "string") { + return []; + } + + try { + // Parse the URL + const urlObj = new URL(url); + const hostname = urlObj.hostname.toLowerCase(); + + // Return both the exact hostname and common variations + const domains = [hostname]; + + // For github.com, add api and raw content domain variations + if (hostname === "github.com") { + domains.push("api.github.com"); + domains.push("raw.githubusercontent.com"); + domains.push("*.githubusercontent.com"); + } + // For custom GitHub Enterprise domains, add api. prefix and raw content variations + else if (!hostname.startsWith("api.")) { + domains.push("api." + hostname); + // For GitHub Enterprise, raw content is typically served from raw.hostname + domains.push("raw." + hostname); + } + + return domains; + } catch (e) { + // Invalid URL, return empty array + return []; + } +} + +/** + * Build the list of allowed domains from environment variables and GitHub context + * @returns {string[]} Array of allowed domains + */ +function buildAllowedDomains() { + const allowedDomainsEnv = process.env.GH_AW_ALLOWED_DOMAINS; + const defaultAllowedDomains = ["github.com", "github.io", "githubusercontent.com", "githubassets.com", "github.dev", "codespaces.new"]; + + let allowedDomains = allowedDomainsEnv + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) + : defaultAllowedDomains; + + // Extract and add GitHub domains from GitHub context URLs + const githubServerUrl = process.env.GITHUB_SERVER_URL; + const githubApiUrl = process.env.GITHUB_API_URL; + + if (githubServerUrl) { + const serverDomains = extractDomainsFromUrl(githubServerUrl); + allowedDomains = allowedDomains.concat(serverDomains); + } + + if (githubApiUrl) { + const apiDomains = extractDomainsFromUrl(githubApiUrl); + allowedDomains = allowedDomains.concat(apiDomains); + } + + // Remove duplicates + return [...new Set(allowedDomains)]; +} + +/** + * Sanitize URL protocols - replace non-https with (redacted) + * @param {string} s - The string to process + * @returns {string} The string with non-https protocols redacted + */ +function sanitizeUrlProtocols(s) { + // Match common non-https protocols + // This regex matches: protocol://domain or protocol:path or incomplete protocol:// + // Examples: http://, ftp://, file://, data:, javascript:, mailto:, tel:, ssh://, git:// + // The regex also matches incomplete protocols like "http://" or "ftp://" without a domain + // Note: No word boundary check to catch protocols even when preceded by word characters + return s.replace(/((?:http|ftp|file|ssh|git):\/\/([\w.-]*)(?:[^\s]*)|(?:data|javascript|vbscript|about|mailto|tel):[^\s]+)/gi, (match, _fullMatch, domain) => { + // Extract domain for http/ftp/file/ssh/git protocols + if (domain) { + const domainLower = domain.toLowerCase(); + const truncated = domainLower.length > 12 ? domainLower.substring(0, 12) + "..." : domainLower; + if (typeof core !== "undefined" && core.info) { + core.info(`Redacted URL: ${truncated}`); + } + if (typeof core !== "undefined" && core.debug) { + core.debug(`Redacted URL (full): ${match}`); + } + addRedactedDomain(domainLower); + } else { + // For other protocols (data:, javascript:, etc.), track the protocol itself + const protocolMatch = match.match(/^([^:]+):/); + if (protocolMatch) { + const protocol = protocolMatch[1] + ":"; + // Truncate the matched URL for logging (keep first 12 chars + "...") + const truncated = match.length > 12 ? match.substring(0, 12) + "..." : match; + if (typeof core !== "undefined" && core.info) { + core.info(`Redacted URL: ${truncated}`); + } + if (typeof core !== "undefined" && core.debug) { + core.debug(`Redacted URL (full): ${match}`); + } + addRedactedDomain(protocol); + } + } + return "(redacted)"; + }); +} + +/** + * Remove unknown domains + * @param {string} s - The string to process + * @param {string[]} allowed - List of allowed domains + * @returns {string} The string with unknown domains redacted + */ +function sanitizeUrlDomains(s, allowed) { + // Match HTTPS URLs with optional port and path + // This regex is designed to: + // 1. Match https:// URIs with explicit protocol + // 2. Capture the hostname/domain + // 3. Allow optional port (:8080) + // 4. Allow optional path and query string (but not trailing commas/periods) + // 5. Stop before another https:// URL in query params (using negative lookahead) + const httpsUrlRegex = /https:\/\/([\w.-]+(?::\d+)?)(\/(?:(?!https:\/\/)[^\s,])*)?/gi; + + return s.replace(httpsUrlRegex, (match, hostnameWithPort, pathPart) => { + // Extract just the hostname (remove port if present) + const hostname = hostnameWithPort.split(":")[0].toLowerCase(); + pathPart = pathPart || ""; + + // Check if domain is in the allowed list or is a subdomain of an allowed domain + const isAllowed = allowed.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + + // Exact match + if (hostname === normalizedAllowed) { + return true; + } + + // Wildcard match (*.example.com matches subdomain.example.com) + if (normalizedAllowed.startsWith("*.")) { + const baseDomain = normalizedAllowed.substring(2); // Remove *. + return hostname.endsWith("." + baseDomain) || hostname === baseDomain; + } + + // Subdomain match (example.com matches subdomain.example.com) + return hostname.endsWith("." + normalizedAllowed); + }); + + if (isAllowed) { + return match; // Keep the full URL as-is + } else { + // Redact the domain but preserve the protocol and structure for debugging + const truncated = hostname.length > 12 ? hostname.substring(0, 12) + "..." : hostname; + if (typeof core !== "undefined" && core.info) { + core.info(`Redacted URL: ${truncated}`); + } + if (typeof core !== "undefined" && core.debug) { + core.debug(`Redacted URL (full): ${match}`); + } + addRedactedDomain(hostname); + return "(redacted)"; + } + }); +} + +/** + * Neutralizes commands at the start of text by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized commands + */ +function neutralizeCommands(s) { + const commandName = process.env.GH_AW_COMMAND; + if (!commandName) { + return s; + } + + // Escape special regex characters in command name + const escapedCommand = commandName.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); + + // Neutralize /command at the start of text (with optional leading whitespace) + // Only match at the start of the string or after leading whitespace + return s.replace(new RegExp(`^(\\s*)/(${escapedCommand})\\b`, "i"), "$1`/$2`"); +} + +/** + * Neutralizes ALL @mentions by wrapping them in backticks + * This is the core version without any filtering + * @param {string} s - The string to process + * @returns {string} The string with neutralized mentions + */ +function neutralizeAllMentions(s) { + // Replace @name or @org/team outside code with `@name` + // No filtering - all mentions are neutralized + return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, (m, p1, p2) => { + // Log when a mention is escaped to help debug issues + if (typeof core !== "undefined" && core.info) { + core.info(`Escaped mention: @${p2} (not in allowed list)`); + } + return `${p1}\`@${p2}\``; + }); +} + +/** + * Removes XML comments from content + * @param {string} s - The string to process + * @returns {string} The string with XML comments removed + */ +function removeXmlComments(s) { + // Remove and malformed + // Consolidated into single atomic regex to prevent intermediate state vulnerabilities + // The pattern and + // Apply repeatedly to handle nested/overlapping patterns that could reintroduce comment markers + let previous; + do { + previous = s; + s = s.replace(/`; +} + +/** + * Build the island end marker for replace-island mode + * @param {number} runId - Workflow run ID + * @returns {string} Island end marker + */ +function buildIslandEndMarker(runId) { + return ``; +} + +/** + * Find and extract island content from body + * @param {string} body - The body content to search + * @param {number} runId - Workflow run ID + * @returns {{found: boolean, startIndex: number, endIndex: number}} Island location info + */ +function findIsland(body, runId) { + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + + const startIndex = body.indexOf(startMarker); + if (startIndex === -1) { + return { found: false, startIndex: -1, endIndex: -1 }; + } + + const endIndex = body.indexOf(endMarker, startIndex); + if (endIndex === -1) { + return { found: false, startIndex: -1, endIndex: -1 }; + } + + return { found: true, startIndex, endIndex: endIndex + endMarker.length }; +} + +/** + * Update PR body with the specified operation + * @param {Object} params - Update parameters + * @param {string} params.currentBody - Current PR body content + * @param {string} params.newContent - New content to add/replace + * @param {string} params.operation - Operation type: "append", "prepend", "replace", or "replace-island" + * @param {string} params.workflowName - Name of the workflow + * @param {string} params.runUrl - URL of the workflow run + * @param {number} params.runId - Workflow run ID + * @returns {string} Updated body content + */ +function updatePRBody(params) { + const { currentBody, newContent, operation, workflowName, runUrl, runId } = params; + const aiFooter = buildAIFooter(workflowName, runUrl); + + if (operation === "replace") { + // Replace: just use the new content as-is + core.info("Operation: replace (full body replacement)"); + return newContent; + } + + if (operation === "replace-island") { + // Try to find existing island for this run ID + const island = findIsland(currentBody, runId); + + if (island.found) { + // Replace the island content + core.info(`Operation: replace-island (updating existing island for run ${runId})`); + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const islandContent = `${startMarker}\n${newContent}${aiFooter}\n${endMarker}`; + + const before = currentBody.substring(0, island.startIndex); + const after = currentBody.substring(island.endIndex); + return before + islandContent + after; + } else { + // Island not found, fall back to append mode + core.info(`Operation: replace-island (island not found for run ${runId}, falling back to append)`); + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const islandContent = `${startMarker}\n${newContent}${aiFooter}\n${endMarker}`; + const appendSection = `\n\n---\n\n${islandContent}`; + return currentBody + appendSection; + } + } + + if (operation === "prepend") { + // Prepend: add content, AI footer, and horizontal line at the start + core.info("Operation: prepend (add to start with separator)"); + const prependSection = `${newContent}${aiFooter}\n\n---\n\n`; + return prependSection + currentBody; + } + + // Default to append + core.info("Operation: append (add to end with separator)"); + const appendSection = `\n\n---\n\n${newContent}${aiFooter}`; + return currentBody + appendSection; +} + +module.exports = { + buildAIFooter, + buildIslandStartMarker, + buildIslandEndMarker, + findIsland, + updatePRBody, +}; diff --git a/pkg/workflow/js/update_project.cjs b/pkg/workflow/js/update_project.cjs new file mode 100644 index 0000000000..21939dd7ab --- /dev/null +++ b/pkg/workflow/js/update_project.cjs @@ -0,0 +1,417 @@ +const { loadAgentOutput } = require("./load_agent_output.cjs"); +function logGraphQLError(error, operation) { + (core.info(`GraphQL Error during: ${operation}`), core.info(`Message: ${error.message}`)); + const errorList = Array.isArray(error.errors) ? error.errors : [], + hasInsufficientScopes = errorList.some(e => e && "INSUFFICIENT_SCOPES" === e.type), + hasNotFound = errorList.some(e => e && "NOT_FOUND" === e.type); + (hasInsufficientScopes + ? core.info( + "This looks like a token permission problem for Projects v2. The GraphQL fields used by update_project require a token with Projects access (classic PAT: scope 'project'; fine-grained PAT: Organization permission 'Projects' and access to the org). Fix: set safe-outputs.update-project.github-token to a secret PAT that can access the target org project." + ) + : hasNotFound && + /projectV2\b/.test(error.message) && + core.info( + "GitHub returned NOT_FOUND for ProjectV2. This can mean either: (1) the project number is wrong for Projects v2, (2) the project is a classic Projects board (not Projects v2), or (3) the token does not have access to that org/user project." + ), + error.errors && + (core.info(`Errors array (${error.errors.length} error(s)):`), + error.errors.forEach((err, idx) => { + (core.info(` [${idx + 1}] ${err.message}`), + err.type && core.info(` Type: ${err.type}`), + err.path && core.info(` Path: ${JSON.stringify(err.path)}`), + err.locations && core.info(` Locations: ${JSON.stringify(err.locations)}`)); + })), + error.request && core.info(`Request: ${JSON.stringify(error.request, null, 2)}`), + error.data && core.info(`Response data: ${JSON.stringify(error.data, null, 2)}`)); +} +function parseProjectInput(projectUrl) { + if (!projectUrl || "string" != typeof projectUrl) throw new Error(`Invalid project input: expected string, got ${typeof projectUrl}. The "project" field is required and must be a full GitHub project URL.`); + const urlMatch = projectUrl.match(/github\.com\/(?:users|orgs)\/[^/]+\/projects\/(\d+)/); + if (!urlMatch) throw new Error(`Invalid project URL: "${projectUrl}". The "project" field must be a full GitHub project URL (e.g., https://github.com/orgs/myorg/projects/123).`); + return urlMatch[1]; +} +function parseProjectUrl(projectUrl) { + if (!projectUrl || "string" != typeof projectUrl) throw new Error(`Invalid project input: expected string, got ${typeof projectUrl}. The "project" field is required and must be a full GitHub project URL.`); + const match = projectUrl.match(/github\.com\/(users|orgs)\/([^/]+)\/projects\/(\d+)/); + if (!match) throw new Error(`Invalid project URL: "${projectUrl}". The "project" field must be a full GitHub project URL (e.g., https://github.com/orgs/myorg/projects/123).`); + return { scope: match[1], ownerLogin: match[2], projectNumber: match[3] }; +} +async function listAccessibleProjectsV2(projectInfo) { + const baseQuery = + "projectsV2(first: 100) {\n totalCount\n nodes {\n id\n number\n title\n closed\n url\n }\n edges {\n node {\n id\n number\n title\n closed\n url\n }\n }\n }"; + if ("orgs" === projectInfo.scope) { + const result = await github.graphql(`query($login: String!) {\n organization(login: $login) {\n ${baseQuery}\n }\n }`, { login: projectInfo.ownerLogin }), + conn = result && result.organization && result.organization.projectsV2, + rawNodes = conn && Array.isArray(conn.nodes) ? conn.nodes : [], + rawEdges = conn && Array.isArray(conn.edges) ? conn.edges : [], + nodeNodes = rawNodes.filter(Boolean), + edgeNodes = rawEdges.map(e => e && e.node).filter(Boolean), + unique = new Map(); + for (const n of [...nodeNodes, ...edgeNodes]) n && "string" == typeof n.id && unique.set(n.id, n); + return { + nodes: Array.from(unique.values()), + totalCount: conn && conn.totalCount, + diagnostics: { rawNodesCount: rawNodes.length, nullNodesCount: rawNodes.length - nodeNodes.length, rawEdgesCount: rawEdges.length, nullEdgeNodesCount: rawEdges.filter(e => !e || !e.node).length }, + }; + } + const result = await github.graphql(`query($login: String!) {\n user(login: $login) {\n ${baseQuery}\n }\n }`, { login: projectInfo.ownerLogin }), + conn = result && result.user && result.user.projectsV2, + rawNodes = conn && Array.isArray(conn.nodes) ? conn.nodes : [], + rawEdges = conn && Array.isArray(conn.edges) ? conn.edges : [], + nodeNodes = rawNodes.filter(Boolean), + edgeNodes = rawEdges.map(e => e && e.node).filter(Boolean), + unique = new Map(); + for (const n of [...nodeNodes, ...edgeNodes]) n && "string" == typeof n.id && unique.set(n.id, n); + return { + nodes: Array.from(unique.values()), + totalCount: conn && conn.totalCount, + diagnostics: { rawNodesCount: rawNodes.length, nullNodesCount: rawNodes.length - nodeNodes.length, rawEdgesCount: rawEdges.length, nullEdgeNodesCount: rawEdges.filter(e => !e || !e.node).length }, + }; +} +function summarizeProjectsV2(projects, limit = 20) { + if (!Array.isArray(projects) || 0 === projects.length) return "(none)"; + const normalized = projects + .filter(p => p && "number" == typeof p.number && "string" == typeof p.title) + .slice(0, limit) + .map(p => `#${p.number} ${p.closed ? "(closed) " : ""}${p.title}`); + return normalized.length > 0 ? normalized.join("; ") : "(none)"; +} +function summarizeEmptyProjectsV2List(list) { + const total = "number" == typeof list.totalCount ? list.totalCount : void 0, + d = list && list.diagnostics, + diag = d ? ` nodes=${d.rawNodesCount} (null=${d.nullNodesCount}), edges=${d.rawEdgesCount} (nullNode=${d.nullEdgeNodesCount})` : ""; + return "number" == typeof total && total > 0 + ? `(none; totalCount=${total} but returned 0 readable project nodes${diag}. This often indicates the token can see the org/user but lacks Projects v2 access, or the org enforces SSO and the token is not authorized.)` + : `(none${diag})`; +} +async function resolveProjectV2(projectInfo, projectNumberInt) { + try { + if ("orgs" === projectInfo.scope) { + const direct = await github.graphql( + "query($login: String!, $number: Int!) {\n organization(login: $login) {\n projectV2(number: $number) {\n id\n number\n title\n url\n }\n }\n }", + { login: projectInfo.ownerLogin, number: projectNumberInt } + ), + project = direct && direct.organization && direct.organization.projectV2; + if (project) return project; + } else { + const direct = await github.graphql( + "query($login: String!, $number: Int!) {\n user(login: $login) {\n projectV2(number: $number) {\n id\n number\n title\n url\n }\n }\n }", + { login: projectInfo.ownerLogin, number: projectNumberInt } + ), + project = direct && direct.user && direct.user.projectV2; + if (project) return project; + } + } catch (error) { + core.warning(`Direct projectV2(number) query failed; falling back to projectsV2 list search: ${error.message}`); + } + const list = await listAccessibleProjectsV2(projectInfo), + nodes = Array.isArray(list.nodes) ? list.nodes : [], + found = nodes.find(p => p && "number" == typeof p.number && p.number === projectNumberInt); + if (found) return found; + const summary = nodes.length > 0 ? summarizeProjectsV2(nodes) : summarizeEmptyProjectsV2List(list), + total = "number" == typeof list.totalCount ? ` (totalCount=${list.totalCount})` : "", + who = "orgs" === projectInfo.scope ? `org ${projectInfo.ownerLogin}` : `user ${projectInfo.ownerLogin}`; + throw new Error(`Project #${projectNumberInt} not found or not accessible for ${who}.${total} Accessible Projects v2: ${summary}`); +} +function generateCampaignId(projectUrl, projectNumber) { + const urlMatch = projectUrl.match(/github\.com\/(users|orgs)\/([^/]+)\/projects/); + return `${`${urlMatch ? urlMatch[2] : "project"}-project-${projectNumber}` + .toLowerCase() + .replace(/[^a-z0-9]+/g, "-") + .replace(/^-+|-+$/g, "") + .substring(0, 30)}-${Date.now().toString(36).substring(0, 8)}`; +} +async function updateProject(output) { + const { owner, repo } = context.repo, + projectInfo = parseProjectUrl(output.project), + projectNumberFromUrl = projectInfo.projectNumber, + campaignId = output.campaign_id; + try { + let repoResult; + (core.info(`Looking up project #${projectNumberFromUrl} from URL: ${output.project}`), core.info("[1/4] Fetching repository information...")); + try { + repoResult = await github.graphql( + "query($owner: String!, $repo: String!) {\n repository(owner: $owner, name: $repo) {\n id\n owner {\n id\n __typename\n }\n }\n }", + { owner, repo } + ); + } catch (error) { + throw (logGraphQLError(error, "Fetching repository information"), error); + } + const repositoryId = repoResult.repository.id, + ownerType = repoResult.repository.owner.__typename; + core.info(`✓ Repository: ${owner}/${repo} (${ownerType})`); + try { + const viewerResult = await github.graphql("query {\n viewer {\n login\n }\n }"); + viewerResult && viewerResult.viewer && viewerResult.viewer.login && core.info(`✓ Authenticated as: ${viewerResult.viewer.login}`); + } catch (viewerError) { + core.warning(`Could not resolve token identity (viewer.login): ${viewerError.message}`); + } + let projectId; + core.info(`[2/4] Resolving project from URL (scope=${projectInfo.scope}, login=${projectInfo.ownerLogin}, number=${projectNumberFromUrl})...`); + let resolvedProjectNumber = projectNumberFromUrl; + try { + const projectNumberInt = parseInt(projectNumberFromUrl, 10); + if (!Number.isFinite(projectNumberInt)) throw new Error(`Invalid project number parsed from URL: ${projectNumberFromUrl}`); + const project = await resolveProjectV2(projectInfo, projectNumberInt); + ((projectId = project.id), (resolvedProjectNumber = String(project.number)), core.info(`✓ Resolved project #${resolvedProjectNumber} (${projectInfo.ownerLogin}) (ID: ${projectId})`)); + } catch (error) { + throw (logGraphQLError(error, "Resolving project from URL"), error); + } + core.info("[3/4] Processing content (issue/PR/draft) if specified..."); + const hasContentNumber = void 0 !== output.content_number && null !== output.content_number, + hasIssue = void 0 !== output.issue && null !== output.issue, + hasPullRequest = void 0 !== output.pull_request && null !== output.pull_request, + values = []; + if ( + (hasContentNumber && values.push({ key: "content_number", value: output.content_number }), + hasIssue && values.push({ key: "issue", value: output.issue }), + hasPullRequest && values.push({ key: "pull_request", value: output.pull_request }), + values.length > 1) + ) { + const uniqueValues = [...new Set(values.map(v => String(v.value)))], + list = values.map(v => `${v.key}=${v.value}`).join(", "), + descriptor = uniqueValues.length > 1 ? "different values" : `same value "${uniqueValues[0]}"`; + core.warning(`Multiple content number fields (${descriptor}): ${list}. Using priority content_number > issue > pull_request.`); + } + (hasIssue && core.warning('Field "issue" deprecated; use "content_number" instead.'), hasPullRequest && core.warning('Field "pull_request" deprecated; use "content_number" instead.')); + + if ("draft_issue" === output.content_type) { + values.length > 0 && core.warning('content_number/issue/pull_request is ignored when content_type is "draft_issue".'); + const draftTitle = "string" == typeof output.draft_title ? output.draft_title.trim() : ""; + if (!draftTitle) throw new Error('Invalid draft_title. When content_type is "draft_issue", draft_title is required and must be a non-empty string.'); + const draftBody = "string" == typeof output.draft_body ? output.draft_body : void 0; + const itemId = ( + await github.graphql( + "mutation($projectId: ID!, $title: String!, $body: String) {\n addProjectV2DraftIssue(input: {\n projectId: $projectId,\n title: $title,\n body: $body\n }) {\n projectItem {\n id\n }\n }\n }", + { projectId, title: draftTitle, body: draftBody } + ) + ).addProjectV2DraftIssue.projectItem.id; + + const fieldsToUpdate = output.fields ? { ...output.fields } : {}; + if (Object.keys(fieldsToUpdate).length > 0) { + const projectFields = ( + await github.graphql( + "query($projectId: ID!) {\n node(id: $projectId) {\n ... on ProjectV2 {\n fields(first: 20) {\n nodes {\n ... on ProjectV2Field {\n id\n name\n dataType\n }\n ... on ProjectV2SingleSelectField {\n id\n name\n dataType\n options {\n id\n name\n color\n }\n }\n }\n }\n }\n }\n }", + { projectId } + ) + ).node.fields.nodes; + for (const [fieldName, fieldValue] of Object.entries(fieldsToUpdate)) { + const normalizedFieldName = fieldName + .split(/[\s_-]+/) + .map(word => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase()) + .join(" "); + let valueToSet, + field = projectFields.find(f => f.name.toLowerCase() === normalizedFieldName.toLowerCase()); + if (!field) + if ("classification" === fieldName.toLowerCase() || ("string" == typeof fieldValue && fieldValue.includes("|"))) + try { + field = ( + await github.graphql( + "mutation($projectId: ID!, $name: String!, $dataType: ProjectV2CustomFieldType!) {\n createProjectV2Field(input: {\n projectId: $projectId,\n name: $name,\n dataType: $dataType\n }) {\n projectV2Field {\n ... on ProjectV2Field {\n id\n name\n }\n ... on ProjectV2SingleSelectField {\n id\n name\n options { id name }\n }\n }\n }\n }", + { projectId, name: normalizedFieldName, dataType: "TEXT" } + ) + ).createProjectV2Field.projectV2Field; + } catch (createError) { + core.warning(`Failed to create field "${fieldName}": ${createError.message}`); + continue; + } + else + try { + field = ( + await github.graphql( + "mutation($projectId: ID!, $name: String!, $dataType: ProjectV2CustomFieldType!, $options: [ProjectV2SingleSelectFieldOptionInput!]!) {\n createProjectV2Field(input: {\n projectId: $projectId,\n name: $name,\n dataType: $dataType,\n singleSelectOptions: $options\n }) {\n projectV2Field {\n ... on ProjectV2SingleSelectField {\n id\n name\n options { id name }\n }\n ... on ProjectV2Field {\n id\n name\n }\n }\n }\n }", + { projectId, name: normalizedFieldName, dataType: "SINGLE_SELECT", options: [{ name: String(fieldValue), description: "", color: "GRAY" }] } + ) + ).createProjectV2Field.projectV2Field; + } catch (createError) { + core.warning(`Failed to create field "${fieldName}": ${createError.message}`); + continue; + } + if (field.dataType === "DATE") valueToSet = { date: String(fieldValue) }; + else if (field.options) { + let option = field.options.find(o => o.name === fieldValue); + if (!option) + try { + const allOptions = [...field.options.map(o => ({ name: o.name, description: "", color: o.color || "GRAY" })), { name: String(fieldValue), description: "", color: "GRAY" }], + updatedField = ( + await github.graphql( + "mutation($fieldId: ID!, $fieldName: String!, $options: [ProjectV2SingleSelectFieldOptionInput!]!) {\n updateProjectV2Field(input: {\n fieldId: $fieldId,\n name: $fieldName,\n singleSelectOptions: $options\n }) {\n projectV2Field {\n ... on ProjectV2SingleSelectField {\n id\n options {\n id\n name\n }\n }\n }\n }\n }", + { fieldId: field.id, fieldName: field.name, options: allOptions } + ) + ).updateProjectV2Field.projectV2Field; + ((option = updatedField.options.find(o => o.name === fieldValue)), (field = updatedField)); + } catch (createError) { + core.warning(`Failed to create option "${fieldValue}": ${createError.message}`); + continue; + } + if (!option) { + core.warning(`Could not get option ID for "${fieldValue}" in field "${fieldName}"`); + continue; + } + valueToSet = { singleSelectOptionId: option.id }; + } else valueToSet = { text: String(fieldValue) }; + await github.graphql( + "mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!, $value: ProjectV2FieldValue!) {\n updateProjectV2ItemFieldValue(input: {\n projectId: $projectId,\n itemId: $itemId,\n fieldId: $fieldId,\n value: $value\n }) {\n projectV2Item {\n id\n }\n }\n }", + { projectId, itemId, fieldId: field.id, value: valueToSet } + ); + } + } + + core.setOutput("item-id", itemId); + return; + } + let contentNumber = null; + if (hasContentNumber || hasIssue || hasPullRequest) { + const rawContentNumber = hasContentNumber ? output.content_number : hasIssue ? output.issue : output.pull_request, + sanitizedContentNumber = null == rawContentNumber ? "" : "number" == typeof rawContentNumber ? rawContentNumber.toString() : String(rawContentNumber).trim(); + if (sanitizedContentNumber) { + if (!/^\d+$/.test(sanitizedContentNumber)) throw new Error(`Invalid content number "${rawContentNumber}". Provide a positive integer.`); + contentNumber = Number.parseInt(sanitizedContentNumber, 10); + } else core.warning("Content number field provided but empty; skipping project item update."); + } + if (null !== contentNumber) { + const contentType = "pull_request" === output.content_type ? "PullRequest" : "issue" === output.content_type || output.issue ? "Issue" : "PullRequest", + contentQuery = + "Issue" === contentType + ? "query($owner: String!, $repo: String!, $number: Int!) {\n repository(owner: $owner, name: $repo) {\n issue(number: $number) {\n id\n createdAt\n closedAt\n }\n }\n }" + : "query($owner: String!, $repo: String!, $number: Int!) {\n repository(owner: $owner, name: $repo) {\n pullRequest(number: $number) {\n id\n createdAt\n closedAt\n }\n }\n }", + contentResult = await github.graphql(contentQuery, { owner, repo, number: contentNumber }), + contentData = "Issue" === contentType ? contentResult.repository.issue : contentResult.repository.pullRequest, + contentId = contentData.id, + createdAt = contentData.createdAt, + closedAt = contentData.closedAt, + existingItem = await (async function (projectId, contentId) { + let hasNextPage = !0, + endCursor = null; + for (; hasNextPage; ) { + const result = await github.graphql( + "query($projectId: ID!, $after: String) {\n node(id: $projectId) {\n ... on ProjectV2 {\n items(first: 100, after: $after) {\n nodes {\n id\n content {\n ... on Issue {\n id\n }\n ... on PullRequest {\n id\n }\n }\n }\n pageInfo {\n hasNextPage\n endCursor\n }\n }\n }\n }\n }", + { projectId, after: endCursor } + ), + found = result.node.items.nodes.find(item => item.content && item.content.id === contentId); + if (found) return found; + ((hasNextPage = result.node.items.pageInfo.hasNextPage), (endCursor = result.node.items.pageInfo.endCursor)); + } + return null; + })(projectId, contentId); + let itemId; + if (existingItem) ((itemId = existingItem.id), core.info("✓ Item already on board")); + else { + itemId = ( + await github.graphql( + "mutation($projectId: ID!, $contentId: ID!) {\n addProjectV2ItemById(input: {\n projectId: $projectId,\n contentId: $contentId\n }) {\n item {\n id\n }\n }\n }", + { projectId, contentId } + ) + ).addProjectV2ItemById.item.id; + if (campaignId) { + try { + await github.rest.issues.addLabels({ owner, repo, issue_number: contentNumber, labels: [`campaign:${campaignId}`] }); + } catch (labelError) { + core.warning(`Failed to add campaign label: ${labelError.message}`); + } + } + } + const fieldsToUpdate = output.fields ? { ...output.fields } : {}; + if (Object.keys(fieldsToUpdate).length > 0) { + const projectFields = ( + await github.graphql( + "query($projectId: ID!) {\n node(id: $projectId) {\n ... on ProjectV2 {\n fields(first: 20) {\n nodes {\n ... on ProjectV2Field {\n id\n name\n dataType\n }\n ... on ProjectV2SingleSelectField {\n id\n name\n dataType\n options {\n id\n name\n color\n }\n }\n }\n }\n }\n }\n }", + { projectId } + ) + ).node.fields.nodes; + for (const [fieldName, fieldValue] of Object.entries(fieldsToUpdate)) { + const normalizedFieldName = fieldName + .split(/[\s_-]+/) + .map(word => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase()) + .join(" "); + let valueToSet, + field = projectFields.find(f => f.name.toLowerCase() === normalizedFieldName.toLowerCase()); + if (!field) + if ("classification" === fieldName.toLowerCase() || ("string" == typeof fieldValue && fieldValue.includes("|"))) + try { + field = ( + await github.graphql( + "mutation($projectId: ID!, $name: String!, $dataType: ProjectV2CustomFieldType!) {\n createProjectV2Field(input: {\n projectId: $projectId,\n name: $name,\n dataType: $dataType\n }) {\n projectV2Field {\n ... on ProjectV2Field {\n id\n name\n }\n ... on ProjectV2SingleSelectField {\n id\n name\n options { id name }\n }\n }\n }\n }", + { projectId, name: normalizedFieldName, dataType: "TEXT" } + ) + ).createProjectV2Field.projectV2Field; + } catch (createError) { + core.warning(`Failed to create field "${fieldName}": ${createError.message}`); + continue; + } + else + try { + field = ( + await github.graphql( + "mutation($projectId: ID!, $name: String!, $dataType: ProjectV2CustomFieldType!, $options: [ProjectV2SingleSelectFieldOptionInput!]!) {\n createProjectV2Field(input: {\n projectId: $projectId,\n name: $name,\n dataType: $dataType,\n singleSelectOptions: $options\n }) {\n projectV2Field {\n ... on ProjectV2SingleSelectField {\n id\n name\n options { id name }\n }\n ... on ProjectV2Field {\n id\n name\n }\n }\n }\n }", + { projectId, name: normalizedFieldName, dataType: "SINGLE_SELECT", options: [{ name: String(fieldValue), description: "", color: "GRAY" }] } + ) + ).createProjectV2Field.projectV2Field; + } catch (createError) { + core.warning(`Failed to create field "${fieldName}": ${createError.message}`); + continue; + } + // Check dataType first to properly handle DATE fields before checking for options + // This prevents date fields from being misidentified as single-select fields + if (field.dataType === "DATE") { + // Date fields use ProjectV2FieldValue input type with date property + // The date value must be in ISO 8601 format (YYYY-MM-DD) with no time component + // Unlike other field types that may require IDs, date fields accept the date string directly + valueToSet = { date: String(fieldValue) }; + } else if (field.options) { + let option = field.options.find(o => o.name === fieldValue); + if (!option) + try { + const allOptions = [...field.options.map(o => ({ name: o.name, description: "", color: o.color || "GRAY" })), { name: String(fieldValue), description: "", color: "GRAY" }], + updatedField = ( + await github.graphql( + "mutation($fieldId: ID!, $fieldName: String!, $options: [ProjectV2SingleSelectFieldOptionInput!]!) {\n updateProjectV2Field(input: {\n fieldId: $fieldId,\n name: $fieldName,\n singleSelectOptions: $options\n }) {\n projectV2Field {\n ... on ProjectV2SingleSelectField {\n id\n options {\n id\n name\n }\n }\n }\n }\n }", + { fieldId: field.id, fieldName: field.name, options: allOptions } + ) + ).updateProjectV2Field.projectV2Field; + ((option = updatedField.options.find(o => o.name === fieldValue)), (field = updatedField)); + } catch (createError) { + core.warning(`Failed to create option "${fieldValue}": ${createError.message}`); + continue; + } + if (!option) { + core.warning(`Could not get option ID for "${fieldValue}" in field "${fieldName}"`); + continue; + } + valueToSet = { singleSelectOptionId: option.id }; + } else valueToSet = { text: String(fieldValue) }; + await github.graphql( + "mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!, $value: ProjectV2FieldValue!) {\n updateProjectV2ItemFieldValue(input: {\n projectId: $projectId,\n itemId: $itemId,\n fieldId: $fieldId,\n value: $value\n }) {\n projectV2Item {\n id\n }\n }\n }", + { projectId, itemId, fieldId: field.id, value: valueToSet } + ); + } + } + core.setOutput("item-id", itemId); + } + } catch (error) { + if (error.message && error.message.includes("does not have permission to create projects")) { + const usingCustomToken = !!process.env.GH_AW_PROJECT_GITHUB_TOKEN; + core.error( + `Failed to manage project: ${error.message}\n\nTroubleshooting:\n • Create the project manually at https://github.com/orgs/${owner}/projects/new.\n • Or supply a PAT (classic with project + repo scopes, or fine-grained with Projects: Read+Write) via GH_AW_PROJECT_GITHUB_TOKEN.\n • Or use a GitHub App with Projects: Read+Write permission.\n • Ensure the workflow grants projects: write.\n\n` + + (usingCustomToken ? "GH_AW_PROJECT_GITHUB_TOKEN is set but lacks access." : "Using default GITHUB_TOKEN - this cannot access Projects v2 API. You must configure GH_AW_PROJECT_GITHUB_TOKEN.") + ); + } else core.error(`Failed to manage project: ${error.message}`); + throw error; + } +} +async function main() { + const result = loadAgentOutput(); + if (!result.success) return; + const updateProjectItems = result.items.filter(item => "update_project" === item.type); + if (0 !== updateProjectItems.length) + for (let i = 0; i < updateProjectItems.length; i++) { + const output = updateProjectItems[i]; + try { + await updateProject(output); + } catch (error) { + (core.error(`Failed to process item ${i + 1}`), logGraphQLError(error, `Processing update_project item ${i + 1}`)); + } + } +} + +module.exports = { updateProject, parseProjectInput, generateCampaignId, main }; diff --git a/pkg/workflow/js/update_pull_request.cjs b/pkg/workflow/js/update_pull_request.cjs new file mode 100644 index 0000000000..79cbedb758 --- /dev/null +++ b/pkg/workflow/js/update_pull_request.cjs @@ -0,0 +1,83 @@ +// @ts-check +/// + +const { createUpdateHandler } = require("./update_runner.cjs"); +const { updatePRBody } = require("./update_pr_description_helpers.cjs"); +const { isPRContext, getPRNumber } = require("./update_context_helpers.cjs"); + +/** + * Execute the pull request update API call + * @param {any} github - GitHub API client + * @param {any} context - GitHub Actions context + * @param {number} prNumber - PR number to update + * @param {any} updateData - Data to update + * @returns {Promise} Updated pull request + */ +async function executePRUpdate(github, context, prNumber, updateData) { + // Handle body operation (append/prepend/replace/replace-island) + const operation = updateData._operation || "replace"; + const rawBody = updateData._rawBody; + + // Remove internal fields + const { _operation, _rawBody, ...apiData } = updateData; + + // If we have a body with operation, handle it + if (rawBody !== undefined && operation !== "replace") { + // Fetch current PR body for operations that need it + const { data: currentPR } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + }); + const currentBody = currentPR.body || ""; + + // Get workflow run URL for AI attribution + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "GitHub Agentic Workflow"; + const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; + + // Use helper to update body + apiData.body = updatePRBody({ + currentBody, + newContent: rawBody, + operation, + workflowName, + runUrl, + runId: context.runId, + }); + + core.info(`Will update body (length: ${apiData.body.length})`); + } else if (rawBody !== undefined) { + // Replace: just use the new content as-is (already in apiData.body) + core.info("Operation: replace (full body replacement)"); + } + + const { data: pr } = await github.rest.pulls.update({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + ...apiData, + }); + + return pr; +} + +// Create the handler using the factory +const main = createUpdateHandler({ + itemType: "update_pull_request", + displayName: "pull request", + displayNamePlural: "pull requests", + numberField: "pull_request_number", + outputNumberKey: "pull_request_number", + outputUrlKey: "pull_request_url", + entityName: "Pull Request", + entityPrefix: "PR", + targetLabel: "Target PR:", + currentTargetText: "Current pull request", + supportsStatus: false, + supportsOperation: true, + isValidContext: isPRContext, + getContextNumber: getPRNumber, + executeUpdate: executePRUpdate, +}); + +module.exports = { main }; diff --git a/pkg/workflow/js/update_release.cjs b/pkg/workflow/js/update_release.cjs new file mode 100644 index 0000000000..fbf939107d --- /dev/null +++ b/pkg/workflow/js/update_release.cjs @@ -0,0 +1,170 @@ +// @ts-check +/// + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all update-release items + const updateItems = result.items.filter(/** @param {any} item */ item => item.type === "update_release"); + if (updateItems.length === 0) { + core.info("No update-release items found in agent output"); + return; + } + + core.info(`Found ${updateItems.length} update-release item(s)`); + + // If in staged mode, emit step summary instead of updating releases + if (isStaged) { + await generateStagedPreview({ + title: "Update Releases", + description: "The following release updates would be applied if staged mode was disabled:", + items: updateItems, + renderItem: (item, index) => { + let content = `#### Release Update ${index + 1}\n`; + content += `**Tag:** ${item.tag || "(inferred from event context)"}\n`; + content += `**Operation:** ${item.operation}\n\n`; + content += `**Body Content:**\n${item.body}\n\n`; + return content; + }, + }); + return; + } + + // Get workflow run URL for AI attribution + const workflowName = process.env.GH_AW_WORKFLOW_NAME || "GitHub Agentic Workflow"; + const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; + + const updatedReleases = []; + + // Process each update item + for (let i = 0; i < updateItems.length; i++) { + const updateItem = updateItems[i]; + core.info(`Processing update-release item ${i + 1}/${updateItems.length}`); + + try { + // Infer tag from event context if not provided + let releaseTag = updateItem.tag; + if (!releaseTag) { + // Try to get tag from release event context + if (context.eventName === "release" && context.payload.release && context.payload.release.tag_name) { + releaseTag = context.payload.release.tag_name; + core.info(`Inferred release tag from event context: ${releaseTag}`); + } else if (context.eventName === "workflow_dispatch" && context.payload.inputs) { + // Try to extract from release_url input + const releaseUrl = context.payload.inputs.release_url; + if (releaseUrl) { + const urlMatch = releaseUrl.match(/github\.com\/[^\/]+\/[^\/]+\/releases\/tag\/([^\/\?#]+)/); + if (urlMatch && urlMatch[1]) { + releaseTag = decodeURIComponent(urlMatch[1]); + core.info(`Inferred release tag from release_url input: ${releaseTag}`); + } + } + // Try to fetch from release_id input + if (!releaseTag && context.payload.inputs.release_id) { + const releaseId = context.payload.inputs.release_id; + core.info(`Fetching release with ID: ${releaseId}`); + const { data: release } = await github.rest.repos.getRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: parseInt(releaseId, 10), + }); + releaseTag = release.tag_name; + core.info(`Inferred release tag from release_id input: ${releaseTag}`); + } + } + + if (!releaseTag) { + core.error("No tag provided and unable to infer from event context"); + core.setFailed("Release tag is required but not provided and cannot be inferred from event context"); + return; + } + } + + // Get the release by tag + core.info(`Fetching release with tag: ${releaseTag}`); + const { data: release } = await github.rest.repos.getReleaseByTag({ + owner: context.repo.owner, + repo: context.repo.repo, + tag: releaseTag, + }); + + core.info(`Found release: ${release.name || release.tag_name} (ID: ${release.id})`); + + // Determine new body based on operation + let newBody; + if (updateItem.operation === "replace") { + // Replace: just use the new content + newBody = updateItem.body; + core.info("Operation: replace (full body replacement)"); + } else if (updateItem.operation === "prepend") { + // Prepend: add content, AI footer, and horizontal line at the start + const aiFooter = `\n\n> AI generated by [${workflowName}](${runUrl})`; + const prependSection = `${updateItem.body}${aiFooter}\n\n---\n\n`; + newBody = prependSection + (release.body || ""); + core.info("Operation: prepend (add to start with separator)"); + } else { + // Append: add horizontal line, content, and AI footer at the end + const aiFooter = `\n\n> AI generated by [${workflowName}](${runUrl})`; + const appendSection = `\n\n---\n\n${updateItem.body}${aiFooter}`; + newBody = (release.body || "") + appendSection; + core.info("Operation: append (add to end with separator)"); + } + + // Update the release + const { data: updatedRelease } = await github.rest.repos.updateRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: release.id, + body: newBody, + }); + + core.info(`Successfully updated release: ${updatedRelease.html_url}`); + + updatedReleases.push({ + tag: releaseTag, + url: updatedRelease.html_url, + id: updatedRelease.id, + }); + + // Set outputs for the first release + if (i === 0) { + core.setOutput("release_id", updatedRelease.id); + core.setOutput("release_url", updatedRelease.html_url); + core.setOutput("release_tag", updatedRelease.tag_name); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + const tagInfo = updateItem.tag || "inferred from context"; + core.error(`Failed to update release with tag ${tagInfo}: ${errorMessage}`); + + // Check for specific error cases + if (errorMessage.includes("Not Found")) { + core.error(`Release with tag '${tagInfo}' not found. Please ensure the tag exists.`); + } + + core.setFailed(`Failed to update release: ${errorMessage}`); + return; + } + } + + // Generate step summary + let summaryContent = `## ✅ Release Updates Complete\n\n`; + summaryContent += `Updated ${updatedReleases.length} release(s):\n\n`; + + for (const rel of updatedReleases) { + summaryContent += `- **${rel.tag}**: [View Release](${rel.url})\n`; + } + + await core.summary.addRaw(summaryContent).write(); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/update_runner.cjs b/pkg/workflow/js/update_runner.cjs new file mode 100644 index 0000000000..d283e5a979 --- /dev/null +++ b/pkg/workflow/js/update_runner.cjs @@ -0,0 +1,427 @@ +// @ts-check +/// + +/** + * Shared update runner for safe-output scripts (update_issue, update_pull_request, etc.) + * + * This module depends on GitHub Actions environment globals provided by actions/github-script: + * - core: @actions/core module for logging and outputs + * - github: @octokit/rest instance for GitHub API calls + * - context: GitHub Actions context with event payload and repository info + * + * @module update_runner + */ + +const { loadAgentOutput } = require("./load_agent_output.cjs"); +const { generateStagedPreview } = require("./staged_preview.cjs"); +const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs"); + +/** + * @typedef {Object} UpdateRunnerConfig + * @property {string} itemType - Type of item in agent output (e.g., "update_issue", "update_pull_request") + * @property {string} displayName - Human-readable name (e.g., "issue", "pull request") + * @property {string} displayNamePlural - Human-readable plural name (e.g., "issues", "pull requests") + * @property {string} numberField - Field name for explicit number (e.g., "issue_number", "pull_request_number") + * @property {string} outputNumberKey - Output key for number (e.g., "issue_number", "pull_request_number") + * @property {string} outputUrlKey - Output key for URL (e.g., "issue_url", "pull_request_url") + * @property {(eventName: string, payload: any) => boolean} isValidContext - Function to check if context is valid + * @property {(payload: any) => number|undefined} getContextNumber - Function to get number from context payload + * @property {boolean} supportsStatus - Whether this type supports status updates + * @property {boolean} supportsOperation - Whether this type supports operation (append/prepend/replace) + * @property {(item: any, index: number) => string} renderStagedItem - Function to render item for staged preview + * @property {(github: any, context: any, targetNumber: number, updateData: any) => Promise} executeUpdate - Function to execute the update API call + * @property {(result: any) => string} getSummaryLine - Function to generate summary line for an updated item + */ + +/** + * Resolve the target number for an update operation + * @param {Object} params - Resolution parameters + * @param {string} params.updateTarget - Target configuration ("triggering", "*", or explicit number) + * @param {any} params.item - Update item with optional explicit number field + * @param {string} params.numberField - Field name for explicit number + * @param {boolean} params.isValidContext - Whether current context is valid + * @param {number|undefined} params.contextNumber - Number from triggering context + * @param {string} params.displayName - Display name for error messages + * @returns {{success: true, number: number} | {success: false, error: string}} + */ +function resolveTargetNumber(params) { + const { updateTarget, item, numberField, isValidContext, contextNumber, displayName } = params; + + if (updateTarget === "*") { + // For target "*", we need an explicit number from the update item + const explicitNumber = item[numberField]; + if (explicitNumber) { + const parsed = parseInt(explicitNumber, 10); + if (isNaN(parsed) || parsed <= 0) { + return { success: false, error: `Invalid ${numberField} specified: ${explicitNumber}` }; + } + return { success: true, number: parsed }; + } else { + return { success: false, error: `Target is "*" but no ${numberField} specified in update item` }; + } + } else if (updateTarget && updateTarget !== "triggering") { + // Explicit number specified in target + const parsed = parseInt(updateTarget, 10); + if (isNaN(parsed) || parsed <= 0) { + return { success: false, error: `Invalid ${displayName} number in target configuration: ${updateTarget}` }; + } + return { success: true, number: parsed }; + } else { + // Default behavior: use triggering context + if (isValidContext && contextNumber) { + return { success: true, number: contextNumber }; + } + return { success: false, error: `Could not determine ${displayName} number` }; + } +} + +/** + * Build update data based on allowed fields and provided values + * @param {Object} params - Build parameters + * @param {any} params.item - Update item with field values + * @param {boolean} params.canUpdateStatus - Whether status updates are allowed + * @param {boolean} params.canUpdateTitle - Whether title updates are allowed + * @param {boolean} params.canUpdateBody - Whether body updates are allowed + * @param {boolean} [params.canUpdateLabels] - Whether label updates are allowed + * @param {boolean} params.supportsStatus - Whether this type supports status + * @returns {{hasUpdates: boolean, updateData: any, logMessages: string[]}} + */ +function buildUpdateData(params) { + const { item, canUpdateStatus, canUpdateTitle, canUpdateBody, canUpdateLabels, supportsStatus } = params; + + /** @type {any} */ + const updateData = {}; + let hasUpdates = false; + const logMessages = []; + + // Handle status update (only for types that support it, like issues) + if (supportsStatus && canUpdateStatus && item.status !== undefined) { + if (item.status === "open" || item.status === "closed") { + updateData.state = item.status; + hasUpdates = true; + logMessages.push(`Will update status to: ${item.status}`); + } else { + logMessages.push(`Invalid status value: ${item.status}. Must be 'open' or 'closed'`); + } + } + + // Handle title update + let titleForDedup = null; + if (canUpdateTitle && item.title !== undefined) { + const trimmedTitle = typeof item.title === "string" ? item.title.trim() : ""; + if (trimmedTitle.length > 0) { + updateData.title = trimmedTitle; + titleForDedup = trimmedTitle; + hasUpdates = true; + logMessages.push(`Will update title to: ${trimmedTitle}`); + } else { + logMessages.push("Invalid title value: must be a non-empty string"); + } + } + + // Handle body update (with title deduplication) + if (canUpdateBody && item.body !== undefined) { + if (typeof item.body === "string") { + let processedBody = item.body; + + // If we're updating the title at the same time, remove duplicate title from body + if (titleForDedup) { + processedBody = removeDuplicateTitleFromDescription(titleForDedup, processedBody); + } + + updateData.body = processedBody; + hasUpdates = true; + logMessages.push(`Will update body (length: ${processedBody.length})`); + } else { + logMessages.push("Invalid body value: must be a string"); + } + } + + // Handle labels update + if (canUpdateLabels && item.labels !== undefined) { + if (Array.isArray(item.labels)) { + updateData.labels = item.labels; + hasUpdates = true; + logMessages.push(`Will update labels to: ${item.labels.join(", ")}`); + } else { + logMessages.push("Invalid labels value: must be an array"); + } + } + + return { hasUpdates, updateData, logMessages }; +} + +/** + * Run the update workflow with the provided configuration + * @param {UpdateRunnerConfig} config - Configuration for the update runner + * @returns {Promise} Array of updated items or undefined + */ +async function runUpdateWorkflow(config) { + const { itemType, displayName, displayNamePlural, numberField, outputNumberKey, outputUrlKey, isValidContext, getContextNumber, supportsStatus, supportsOperation, renderStagedItem, executeUpdate, getSummaryLine } = config; + + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + const result = loadAgentOutput(); + if (!result.success) { + return; + } + + // Find all update items + const updateItems = result.items.filter(/** @param {any} item */ item => item.type === itemType); + if (updateItems.length === 0) { + core.info(`No ${itemType} items found in agent output`); + return; + } + + core.info(`Found ${updateItems.length} ${itemType} item(s)`); + + // If in staged mode, emit step summary instead of updating + if (isStaged) { + await generateStagedPreview({ + title: `Update ${displayNamePlural.charAt(0).toUpperCase() + displayNamePlural.slice(1)}`, + description: `The following ${displayName} updates would be applied if staged mode was disabled:`, + items: updateItems, + renderItem: renderStagedItem, + }); + return; + } + + // Get the configuration from environment variables + const updateTarget = process.env.GH_AW_UPDATE_TARGET || "triggering"; + const canUpdateStatus = process.env.GH_AW_UPDATE_STATUS === "true"; + const canUpdateTitle = process.env.GH_AW_UPDATE_TITLE === "true"; + const canUpdateBody = process.env.GH_AW_UPDATE_BODY === "true"; + const canUpdateLabels = process.env.GH_AW_UPDATE_LABELS === "true"; + + core.info(`Update target configuration: ${updateTarget}`); + if (supportsStatus) { + core.info(`Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}, labels: ${canUpdateLabels}`); + } else { + core.info(`Can update title: ${canUpdateTitle}, body: ${canUpdateBody}, labels: ${canUpdateLabels}`); + } + + // Check context validity + const contextIsValid = isValidContext(context.eventName, context.payload); + const contextNumber = getContextNumber(context.payload); + + // Validate context based on target configuration + if (updateTarget === "triggering" && !contextIsValid) { + core.info(`Target is "triggering" but not running in ${displayName} context, skipping ${displayName} update`); + return; + } + + const updatedItems = []; + + // Process each update item + for (let i = 0; i < updateItems.length; i++) { + const updateItem = updateItems[i]; + core.info(`Processing ${itemType} item ${i + 1}/${updateItems.length}`); + + // Resolve target number + const targetResult = resolveTargetNumber({ + updateTarget, + item: updateItem, + numberField, + isValidContext: contextIsValid, + contextNumber, + displayName, + }); + + if (!targetResult.success) { + core.info(targetResult.error); + continue; + } + + const targetNumber = targetResult.number; + core.info(`Updating ${displayName} #${targetNumber}`); + + // Build update data + const { hasUpdates, updateData, logMessages } = buildUpdateData({ + item: updateItem, + canUpdateStatus, + canUpdateTitle, + canUpdateBody, + canUpdateLabels, + supportsStatus, + }); + + // Log all messages + for (const msg of logMessages) { + core.info(msg); + } + + // Handle body operation for types that support it (like PRs with append/prepend) + if (supportsOperation && canUpdateBody && updateItem.body !== undefined && typeof updateItem.body === "string") { + // The body was already added by buildUpdateData, but we need to handle operations + // This will be handled by the executeUpdate function for PR-specific logic + updateData._operation = updateItem.operation || "append"; + updateData._rawBody = updateItem.body; + } + + if (!hasUpdates) { + core.info("No valid updates to apply for this item"); + continue; + } + + try { + // Execute the update using the provided function + const updatedItem = await executeUpdate(github, context, targetNumber, updateData); + core.info(`Updated ${displayName} #${updatedItem.number}: ${updatedItem.html_url}`); + updatedItems.push(updatedItem); + + // Set output for the last updated item (for backward compatibility) + if (i === updateItems.length - 1) { + core.setOutput(outputNumberKey, updatedItem.number); + core.setOutput(outputUrlKey, updatedItem.html_url); + } + } catch (error) { + core.error(`✗ Failed to update ${displayName} #${targetNumber}: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + + // Write summary for all updated items + if (updatedItems.length > 0) { + let summaryContent = `\n\n## Updated ${displayNamePlural.charAt(0).toUpperCase() + displayNamePlural.slice(1)}\n`; + for (const item of updatedItems) { + summaryContent += getSummaryLine(item); + } + await core.summary.addRaw(summaryContent).write(); + } + + core.info(`Successfully updated ${updatedItems.length} ${displayName}(s)`); + return updatedItems; +} + +/** + * @typedef {Object} RenderStagedItemConfig + * @property {string} entityName - Display name for the entity (e.g., "Issue", "Pull Request") + * @property {string} numberField - Field name for the target number (e.g., "issue_number", "pull_request_number") + * @property {string} targetLabel - Label for the target (e.g., "Target Issue:", "Target PR:") + * @property {string} currentTargetText - Text when targeting current entity (e.g., "Current issue", "Current pull request") + * @property {boolean} [includeOperation=false] - Whether to include operation field for body updates + */ + +/** + * Create a render function for staged preview items + * @param {RenderStagedItemConfig} config - Configuration for the renderer + * @returns {(item: any, index: number) => string} Render function + */ +function createRenderStagedItem(config) { + const { entityName, numberField, targetLabel, currentTargetText, includeOperation = false } = config; + + return function renderStagedItem(item, index) { + let content = `#### ${entityName} Update ${index + 1}\n`; + if (item[numberField]) { + content += `**${targetLabel}** #${item[numberField]}\n\n`; + } else { + content += `**Target:** ${currentTargetText}\n\n`; + } + + if (item.title !== undefined) { + content += `**New Title:** ${item.title}\n\n`; + } + if (item.body !== undefined) { + if (includeOperation) { + const operation = item.operation || "append"; + content += `**Operation:** ${operation}\n`; + content += `**Body Content:**\n${item.body}\n\n`; + } else { + content += `**New Body:**\n${item.body}\n\n`; + } + } + if (item.status !== undefined) { + content += `**New Status:** ${item.status}\n\n`; + } + return content; + }; +} + +/** + * @typedef {Object} SummaryLineConfig + * @property {string} entityPrefix - Prefix for the summary line (e.g., "Issue", "PR") + */ + +/** + * Create a summary line generator function + * @param {SummaryLineConfig} config - Configuration for the summary generator + * @returns {(item: any) => string} Summary line generator function + */ +function createGetSummaryLine(config) { + const { entityPrefix } = config; + + return function getSummaryLine(item) { + return `- ${entityPrefix} #${item.number}: [${item.title}](${item.html_url})\n`; + }; +} + +/** + * @typedef {Object} UpdateHandlerConfig + * @property {string} itemType - Type of item in agent output (e.g., "update_issue") + * @property {string} displayName - Human-readable name (e.g., "issue") + * @property {string} displayNamePlural - Human-readable plural name (e.g., "issues") + * @property {string} numberField - Field name for explicit number (e.g., "issue_number") + * @property {string} outputNumberKey - Output key for number (e.g., "issue_number") + * @property {string} outputUrlKey - Output key for URL (e.g., "issue_url") + * @property {string} entityName - Display name for entity (e.g., "Issue", "Pull Request") + * @property {string} entityPrefix - Prefix for summary lines (e.g., "Issue", "PR") + * @property {string} targetLabel - Label for target in staged preview (e.g., "Target Issue:") + * @property {string} currentTargetText - Text for current target (e.g., "Current issue") + * @property {boolean} supportsStatus - Whether this type supports status updates + * @property {boolean} supportsOperation - Whether this type supports operation (append/prepend/replace) + * @property {(eventName: string, payload: any) => boolean} isValidContext - Function to check if context is valid + * @property {(payload: any) => number|undefined} getContextNumber - Function to get number from context payload + * @property {(github: any, context: any, targetNumber: number, updateData: any) => Promise} executeUpdate - Function to execute the update API call + */ + +/** + * Create an update handler from configuration + * This factory function eliminates boilerplate by generating all the + * render functions, summary line generators, and the main handler + * @param {UpdateHandlerConfig} config - Handler configuration + * @returns {() => Promise} Main handler function + */ +function createUpdateHandler(config) { + // Create render function for staged preview + const renderStagedItem = createRenderStagedItem({ + entityName: config.entityName, + numberField: config.numberField, + targetLabel: config.targetLabel, + currentTargetText: config.currentTargetText, + includeOperation: config.supportsOperation, + }); + + // Create summary line generator + const getSummaryLine = createGetSummaryLine({ + entityPrefix: config.entityPrefix, + }); + + // Return the main handler function + return async function main() { + return await runUpdateWorkflow({ + itemType: config.itemType, + displayName: config.displayName, + displayNamePlural: config.displayNamePlural, + numberField: config.numberField, + outputNumberKey: config.outputNumberKey, + outputUrlKey: config.outputUrlKey, + isValidContext: config.isValidContext, + getContextNumber: config.getContextNumber, + supportsStatus: config.supportsStatus, + supportsOperation: config.supportsOperation, + renderStagedItem, + executeUpdate: config.executeUpdate, + getSummaryLine, + }); + }; +} + +module.exports = { + runUpdateWorkflow, + resolveTargetNumber, + buildUpdateData, + createRenderStagedItem, + createGetSummaryLine, + createUpdateHandler, +}; diff --git a/pkg/workflow/js/upload_assets.cjs b/pkg/workflow/js/upload_assets.cjs new file mode 100644 index 0000000000..9eb258b935 --- /dev/null +++ b/pkg/workflow/js/upload_assets.cjs @@ -0,0 +1,195 @@ +// @ts-check +/// + +const fs = require("fs"); +const path = require("path"); +const crypto = require("crypto"); +const { loadAgentOutput } = require("./load_agent_output.cjs"); + +/** + * Normalizes a branch name to be a valid git branch name. + * + * IMPORTANT: Keep this function in sync with the normalizeBranchName function in safe_outputs_mcp_server.cjs + * + * Valid characters: alphanumeric (a-z, A-Z, 0-9), dash (-), underscore (_), forward slash (/), dot (.) + * Max length: 128 characters + * + * The normalization process: + * 1. Replaces invalid characters with a single dash + * 2. Collapses multiple consecutive dashes to a single dash + * 3. Removes leading and trailing dashes + * 4. Truncates to 128 characters + * 5. Removes trailing dashes after truncation + * 6. Converts to lowercase + * + * @param {string} branchName - The branch name to normalize + * @returns {string} The normalized branch name + */ +function normalizeBranchName(branchName) { + if (!branchName || typeof branchName !== "string" || branchName.trim() === "") { + return branchName; + } + + // Replace any sequence of invalid characters with a single dash + // Valid characters are: a-z, A-Z, 0-9, -, _, /, . + let normalized = branchName.replace(/[^a-zA-Z0-9\-_/.]+/g, "-"); + + // Collapse multiple consecutive dashes to a single dash + normalized = normalized.replace(/-+/g, "-"); + + // Remove leading and trailing dashes + normalized = normalized.replace(/^-+|-+$/g, ""); + + // Truncate to max 128 characters + if (normalized.length > 128) { + normalized = normalized.substring(0, 128); + } + + // Ensure it doesn't end with a dash after truncation + normalized = normalized.replace(/-+$/, ""); + + // Convert to lowercase + normalized = normalized.toLowerCase(); + + return normalized; +} + +async function main() { + // Check if we're in staged mode + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + + // Get the branch name from environment variable (required) + const branchName = process.env.GH_AW_ASSETS_BRANCH; + if (!branchName || typeof branchName !== "string") { + core.setFailed("GH_AW_ASSETS_BRANCH environment variable is required but not set"); + return; + } + + // Normalize the branch name to ensure it's a valid git branch name + const normalizedBranchName = normalizeBranchName(branchName); + core.info(`Using assets branch: ${normalizedBranchName}`); + + const result = loadAgentOutput(); + if (!result.success) { + core.setOutput("upload_count", "0"); + core.setOutput("branch_name", normalizedBranchName); + return; + } + + // Find all upload-asset items + const uploadItems = result.items.filter(/** @param {any} item */ item => item.type === "upload_asset"); + + if (uploadItems.length === 0) { + core.info("No upload-asset items found in agent output"); + core.setOutput("upload_count", "0"); + core.setOutput("branch_name", normalizedBranchName); + return; + } + + core.info(`Found ${uploadItems.length} upload-asset item(s)`); + + let uploadCount = 0; + let hasChanges = false; + + try { + // Check if orphaned branch already exists, if not create it + try { + await exec.exec(`git rev-parse --verify origin/${normalizedBranchName}`); + await exec.exec(`git checkout -B ${normalizedBranchName} origin/${normalizedBranchName}`); + core.info(`Checked out existing branch from origin: ${normalizedBranchName}`); + } catch (originError) { + // Validate that branch starts with "assets/" prefix before creating orphaned branch + if (!normalizedBranchName.startsWith("assets/")) { + core.setFailed( + `Branch '${normalizedBranchName}' does not start with the required 'assets/' prefix. ` + + `Orphaned branches can only be automatically created under the 'assets/' prefix. ` + + `Please create the branch manually first, or use a branch name starting with 'assets/'.` + ); + return; + } + + // Branch doesn't exist on origin and has valid prefix, create orphaned branch + core.info(`Creating new orphaned branch: ${normalizedBranchName}`); + await exec.exec(`git checkout --orphan ${normalizedBranchName}`); + await exec.exec(`git rm -rf .`); + await exec.exec(`git clean -fdx`); + } + + // Process each asset + for (const asset of uploadItems) { + try { + const { fileName, sha, size, targetFileName } = asset; + + if (!fileName || !sha || !targetFileName) { + core.error(`Invalid asset entry missing required fields: ${JSON.stringify(asset)}`); + continue; + } + + // Check if file exists in artifacts + const assetSourcePath = path.join("/tmp/gh-aw/safeoutputs/assets", fileName); + if (!fs.existsSync(assetSourcePath)) { + core.warning(`Asset file not found: ${assetSourcePath}`); + continue; + } + + // Verify SHA matches + const fileContent = fs.readFileSync(assetSourcePath); + const computedSha = crypto.createHash("sha256").update(fileContent).digest("hex"); + + if (computedSha !== sha) { + core.warning(`SHA mismatch for ${fileName}: expected ${sha}, got ${computedSha}`); + continue; + } + + // Check if file already exists in the branch + if (fs.existsSync(targetFileName)) { + core.info(`Asset ${targetFileName} already exists, skipping`); + continue; + } + + // Copy file to branch with target filename + fs.copyFileSync(assetSourcePath, targetFileName); + + // Add to git + await exec.exec(`git add "${targetFileName}"`); + + uploadCount++; + hasChanges = true; + + core.info(`Added asset: ${targetFileName} (${size} bytes)`); + } catch (error) { + core.warning(`Failed to process asset ${asset.fileName}: ${error instanceof Error ? error.message : String(error)}`); + } + } + + // Commit and push if there are changes (skip if staged) + if (hasChanges) { + const commitMessage = `[skip-ci] Add ${uploadCount} asset(s)`; + await exec.exec(`git`, [`commit`, `-m`, commitMessage]); + if (isStaged) { + core.summary.addRaw("## Staged Asset Publication"); + } else { + await exec.exec(`git push origin ${normalizedBranchName}`); + core.summary.addRaw("## Assets").addRaw(`Successfully uploaded **${uploadCount}** assets to branch \`${normalizedBranchName}\``).addRaw(""); + core.info(`Successfully uploaded ${uploadCount} assets to branch ${normalizedBranchName}`); + } + + for (const asset of uploadItems) { + if (asset.fileName && asset.sha && asset.size && asset.url) { + core.summary.addRaw(`- [\`${asset.fileName}\`](${asset.url}) → \`${asset.targetFileName}\` (${asset.size} bytes)`); + } + } + core.summary.write(); + } else { + core.info("No new assets to upload"); + } + } catch (error) { + core.setFailed(`Failed to upload assets: ${error instanceof Error ? error.message : String(error)}`); + return; + } + + core.setOutput("upload_count", uploadCount.toString()); + core.setOutput("branch_name", normalizedBranchName); +} + +module.exports = { main }; diff --git a/pkg/workflow/js/validate_errors.cjs b/pkg/workflow/js/validate_errors.cjs new file mode 100644 index 0000000000..3d2e6fba01 --- /dev/null +++ b/pkg/workflow/js/validate_errors.cjs @@ -0,0 +1,349 @@ +// @ts-check +/// + +function main() { + const fs = require("fs"); + const path = require("path"); + + core.info("Starting validate_errors.cjs script"); + const startTime = Date.now(); + + try { + const logPath = process.env.GH_AW_AGENT_OUTPUT; + if (!logPath) { + throw new Error("GH_AW_AGENT_OUTPUT environment variable is required"); + } + + core.info(`Log path: ${logPath}`); + + if (!fs.existsSync(logPath)) { + core.info(`Log path not found: ${logPath}`); + core.info("No logs to validate - skipping error validation"); + return; + } + + // Get error patterns from environment variables + const patterns = getErrorPatternsFromEnv(); + if (patterns.length === 0) { + throw new Error("GH_AW_ERROR_PATTERNS environment variable is required and must contain at least one pattern"); + } + + core.info(`Loaded ${patterns.length} error patterns`); + core.info(`Patterns: ${JSON.stringify(patterns.map(p => ({ description: p.description, pattern: p.pattern })))}`); + + let content = ""; + + // Check if logPath is a directory or a file + const stat = fs.statSync(logPath); + if (stat.isDirectory()) { + // Read all log files from the directory and concatenate them + const files = fs.readdirSync(logPath); + const logFiles = files.filter(file => file.endsWith(".log") || file.endsWith(".txt")); + + if (logFiles.length === 0) { + core.info(`No log files found in directory: ${logPath}`); + return; + } + + core.info(`Found ${logFiles.length} log files in directory`); + + // Sort log files by name to ensure consistent ordering + logFiles.sort(); + + // Concatenate all log files + for (const file of logFiles) { + const filePath = path.join(logPath, file); + const fileContent = fs.readFileSync(filePath, "utf8"); + core.info(`Reading log file: ${file} (${fileContent.length} bytes)`); + content += fileContent; + // Add a newline between files if the previous file doesn't end with one + if (content.length > 0 && !content.endsWith("\n")) { + content += "\n"; + } + } + } else { + // Read the single log file + content = fs.readFileSync(logPath, "utf8"); + core.info(`Read single log file (${content.length} bytes)`); + } + + core.info(`Total log content size: ${content.length} bytes, ${content.split("\n").length} lines`); + + const hasErrors = validateErrors(content, patterns); + + const elapsedTime = Date.now() - startTime; + core.info(`Error validation completed in ${elapsedTime}ms`); + + if (hasErrors) { + core.error("Errors detected in agent logs - continuing workflow step (not failing for now)"); + //core.setFailed("Errors detected in agent logs - failing workflow step"); + } else { + core.info("Error validation completed successfully"); + } + } catch (error) { + console.debug(error); + core.error(`Error validating log: ${error instanceof Error ? error.message : String(error)}`); + } +} + +function getErrorPatternsFromEnv() { + const patternsEnv = process.env.GH_AW_ERROR_PATTERNS; + if (!patternsEnv) { + throw new Error("GH_AW_ERROR_PATTERNS environment variable is required"); + } + + try { + const patterns = JSON.parse(patternsEnv); + if (!Array.isArray(patterns)) { + throw new Error("GH_AW_ERROR_PATTERNS must be a JSON array"); + } + return patterns; + } catch (e) { + throw new Error(`Failed to parse GH_AW_ERROR_PATTERNS as JSON: ${e instanceof Error ? e.message : String(e)}`); + } +} + +/** + * Determine if a log line should be skipped during error validation. + * This prevents false positives from environment variable definitions and other metadata. + * @param {string} line - The log line to check + * @returns {boolean} - True if the line should be skipped + */ +function shouldSkipLine(line) { + // GitHub Actions timestamp format: YYYY-MM-DDTHH:MM:SS.MMMZ + const GITHUB_ACTIONS_TIMESTAMP = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z\s+/; + + // Skip GitHub Actions environment variable declarations + // Format: "2025-10-11T21:23:50.7459810Z GH_AW_ERROR_PATTERNS: [..." + if (new RegExp(GITHUB_ACTIONS_TIMESTAMP.source + "GH_AW_ERROR_PATTERNS:").test(line)) { + return true; + } + + // Skip lines that are showing environment variables in GitHub Actions format + // Format: " GH_AW_ERROR_PATTERNS: [..." + if (/^\s+GH_AW_ERROR_PATTERNS:\s*\[/.test(line)) { + return true; + } + + // Skip lines showing env: section in GitHub Actions logs + // Format: "2025-10-11T21:23:50.7453806Z env:" + if (new RegExp(GITHUB_ACTIONS_TIMESTAMP.source + "env:").test(line)) { + return true; + } + + // Skip Copilot CLI DEBUG messages + // Format: "2025-12-15T08:35:23.457Z [DEBUG] ..." + // These are diagnostic messages that may contain error patterns but are not actual errors + if (/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z\s+\[DEBUG\]/.test(line)) { + return true; + } + + return false; +} + +/** + * @param {string} logContent + * @param {any[]} patterns + * @returns {boolean} + */ +function validateErrors(logContent, patterns) { + const lines = logContent.split("\n"); + let hasErrors = false; + + // Configuration for infinite loop detection and performance + const MAX_ITERATIONS_PER_LINE = 10000; // Maximum regex matches per line + const ITERATION_WARNING_THRESHOLD = 1000; // Warn if iterations exceed this + const MAX_TOTAL_ERRORS = 100; // Stop after finding this many errors (prevents excessive processing) + const MAX_LINE_LENGTH = 10000; // Skip lines longer than this (likely JSON payloads) + const TOP_SLOW_PATTERNS_COUNT = 5; // Number of slowest patterns to report + + core.info(`Starting error validation with ${patterns.length} patterns and ${lines.length} lines`); + + const validationStartTime = Date.now(); + let totalMatches = 0; + let patternStats = []; + + for (let patternIndex = 0; patternIndex < patterns.length; patternIndex++) { + const pattern = patterns[patternIndex]; + const patternStartTime = Date.now(); + let patternMatches = 0; + + let regex; + try { + regex = new RegExp(pattern.pattern, "g"); + core.info(`Pattern ${patternIndex + 1}/${patterns.length}: ${pattern.description || "Unknown"} - regex: ${pattern.pattern}`); + } catch (e) { + core.error(`invalid error regex pattern: ${pattern.pattern}`); + continue; + } + + for (let lineIndex = 0; lineIndex < lines.length; lineIndex++) { + const line = lines[lineIndex]; + + // Skip lines that are environment variable definitions from GitHub Actions logs + // These lines contain the error patterns themselves and create false positives + if (shouldSkipLine(line)) { + continue; + } + + // Skip very long lines that are likely JSON payloads or dumps + // These rarely contain actionable error messages and are expensive to process + if (line.length > MAX_LINE_LENGTH) { + continue; + } + + // Early termination if we've found too many errors + if (totalMatches >= MAX_TOTAL_ERRORS) { + core.warning(`Stopping error validation after finding ${totalMatches} matches (max: ${MAX_TOTAL_ERRORS})`); + break; + } + + let match; + let iterationCount = 0; + let lastIndex = -1; + + while ((match = regex.exec(line)) !== null) { + iterationCount++; + + // Detect potential infinite loop: regex.lastIndex not advancing + if (regex.lastIndex === lastIndex) { + core.error(`Infinite loop detected at line ${lineIndex + 1}! Pattern: ${pattern.pattern}, lastIndex stuck at ${lastIndex}`); + core.error(`Line content (truncated): ${truncateString(line, 200)}`); + break; // Exit the while loop to prevent hanging + } + lastIndex = regex.lastIndex; + + // Warn if iteration count is getting high + if (iterationCount === ITERATION_WARNING_THRESHOLD) { + core.warning(`High iteration count (${iterationCount}) on line ${lineIndex + 1} with pattern: ${pattern.description || pattern.pattern}`); + core.warning(`Line content (truncated): ${truncateString(line, 200)}`); + } + + // Hard limit to prevent actual infinite loops + if (iterationCount > MAX_ITERATIONS_PER_LINE) { + core.error(`Maximum iteration limit (${MAX_ITERATIONS_PER_LINE}) exceeded at line ${lineIndex + 1}! Pattern: ${pattern.pattern}`); + core.error(`Line content (truncated): ${truncateString(line, 200)}`); + core.error(`This likely indicates a problematic regex pattern. Skipping remaining matches on this line.`); + break; // Exit the while loop + } + + const level = extractLevel(match, pattern); + const message = extractMessage(match, pattern, line); + + const errorMessage = `Line ${lineIndex + 1}: ${message} (Pattern: ${pattern.description || "Unknown pattern"}, Raw log: ${truncateString(line.trim(), 120)})`; + + if (level.toLowerCase() === "error") { + core.error(errorMessage); + hasErrors = true; + } else { + core.warning(errorMessage); + } + + patternMatches++; + totalMatches++; + } + + // Log if we had a significant number of matches on a line + if (iterationCount > 100) { + core.info(`Line ${lineIndex + 1} had ${iterationCount} matches for pattern: ${pattern.description || pattern.pattern}`); + } + } + + // Track pattern performance + const patternElapsed = Date.now() - patternStartTime; + patternStats.push({ + description: pattern.description || "Unknown", + pattern: pattern.pattern.substring(0, 50) + (pattern.pattern.length > 50 ? "..." : ""), + matches: patternMatches, + timeMs: patternElapsed, + }); + + // Log slow patterns (> 5 seconds) + if (patternElapsed > 5000) { + core.warning(`Pattern "${pattern.description}" took ${patternElapsed}ms to process (${patternMatches} matches)`); + } + + // Early termination if we've found enough errors + if (totalMatches >= MAX_TOTAL_ERRORS) { + core.warning(`Stopping pattern processing after finding ${totalMatches} matches (max: ${MAX_TOTAL_ERRORS})`); + break; + } + } + + // Log performance summary + const validationElapsed = Date.now() - validationStartTime; + core.info(`Validation summary: ${totalMatches} total matches found in ${validationElapsed}ms`); + + // Log top slowest patterns + patternStats.sort((a, b) => b.timeMs - a.timeMs); + const topSlow = patternStats.slice(0, TOP_SLOW_PATTERNS_COUNT); + if (topSlow.length > 0 && topSlow[0].timeMs > 1000) { + core.info(`Top ${TOP_SLOW_PATTERNS_COUNT} slowest patterns:`); + topSlow.forEach((stat, idx) => { + core.info(` ${idx + 1}. "${stat.description}" - ${stat.timeMs}ms (${stat.matches} matches)`); + }); + } + + core.info(`Error validation completed. Errors found: ${hasErrors}`); + return hasErrors; +} + +/** + * @param {any} match + * @param {any} pattern + * @returns {string} + */ +function extractLevel(match, pattern) { + if (pattern.level_group && pattern.level_group > 0 && match[pattern.level_group]) { + return match[pattern.level_group]; + } + + // Try to infer level from the match content + const fullMatch = match[0]; + if (fullMatch.toLowerCase().includes("error")) { + return "error"; + } else if (fullMatch.toLowerCase().includes("warn")) { + return "warning"; + } + + return "unknown"; +} + +/** + * @param {any} match + * @param {any} pattern + * @param {any} fullLine + * @returns {string} + */ +function extractMessage(match, pattern, fullLine) { + if (pattern.message_group && pattern.message_group > 0 && match[pattern.message_group]) { + return match[pattern.message_group].trim(); + } + + // Fallback to the full match or line + return match[0] || fullLine.trim(); +} + +/** + * @param {any} str + * @param {any} maxLength + * @returns {string} + */ +function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; +} + +// Export for testing +if (typeof module !== "undefined" && module.exports) { + module.exports = { + main, + validateErrors, + extractLevel, + extractMessage, + getErrorPatternsFromEnv, + truncateString, + shouldSkipLine, + }; +} diff --git a/pkg/workflow/js/write_large_content_to_file.cjs b/pkg/workflow/js/write_large_content_to_file.cjs new file mode 100644 index 0000000000..89561cf276 --- /dev/null +++ b/pkg/workflow/js/write_large_content_to_file.cjs @@ -0,0 +1,44 @@ +// @ts-check +/// + +const fs = require("fs"); +const path = require("path"); +const crypto = require("crypto"); + +const { generateCompactSchema } = require("./generate_compact_schema.cjs"); + +/** + * Writes large content to a file and returns metadata + * @param {string} content - The content to write + * @returns {Object} Object with filename and description + */ +function writeLargeContentToFile(content) { + const logsDir = "/tmp/gh-aw/safeoutputs"; + + // Ensure directory exists + if (!fs.existsSync(logsDir)) { + fs.mkdirSync(logsDir, { recursive: true }); + } + + // Generate SHA256 hash of content + const hash = crypto.createHash("sha256").update(content).digest("hex"); + + // MCP tools return JSON, so always use .json extension + const filename = `${hash}.json`; + const filepath = path.join(logsDir, filename); + + // Write content to file + fs.writeFileSync(filepath, content, "utf8"); + + // Generate compact schema description for jq/agent + const description = generateCompactSchema(content); + + return { + filename: filename, + description: description, + }; +} + +module.exports = { + writeLargeContentToFile, +}; diff --git a/pkg/workflow/safe_outputs_mcp_bundler_integration_test.go b/pkg/workflow/safe_outputs_mcp_bundler_integration_test.go index 0c6143c5fa..dcb0b30d47 100644 --- a/pkg/workflow/safe_outputs_mcp_bundler_integration_test.go +++ b/pkg/workflow/safe_outputs_mcp_bundler_integration_test.go @@ -3,12 +3,12 @@ package workflow import ( -"testing" + "testing" ) // SKIPPED: Scripts now use require() pattern and are loaded at runtime from external files // TestSafeOutputsMCPBundlerIntegration tests that the safe-outputs workflow // correctly includes child_process imports in the generated .cjs files func TestSafeOutputsMCPBundlerIntegration(t *testing.T) { -t.Skip("Test skipped - safe-outputs MCP scripts now use require() pattern and are loaded at runtime from external files") + t.Skip("Test skipped - safe-outputs MCP scripts now use require() pattern and are loaded at runtime from external files") } diff --git a/pkg/workflow/trial_mode_test.go b/pkg/workflow/trial_mode_test.go index 4824f5f0e0..8496211028 100644 --- a/pkg/workflow/trial_mode_test.go +++ b/pkg/workflow/trial_mode_test.go @@ -162,28 +162,41 @@ This is a test workflow for trial mode compilation. agentJobContent := lockContent[agentJobStart:agentJobEnd] - // Check specifically that the checkout step in agent job has the token parameter + // Check specifically that the "Checkout repository" step has the token parameter + // Note: There are multiple checkout steps in the agent job (e.g., "Checkout actions folder"), + // but we specifically want to check the "Checkout repository" step lines := strings.Split(agentJobContent, "\n") foundCheckoutToken := false for i, line := range lines { - if strings.Contains(line, "actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd") { - // Check the next few lines for "with:" and "token:" + // Look for the "Checkout repository" step name + if strings.Contains(line, "- name:") && strings.Contains(line, "Checkout repository") { + // Now look for the checkout action in the next few lines for j := i + 1; j < len(lines) && j < i+10; j++ { - if strings.TrimSpace(lines[j]) == "with:" { - // Found "with:" section, check for token - for k := j + 1; k < len(lines) && k < j+5; k++ { - if strings.Contains(lines[k], "token:") && strings.Contains(lines[k], "${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}") { - foundCheckoutToken = true + if strings.Contains(lines[j], "actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd") { + // Check the next few lines for "with:" and "token:" + for k := j + 1; k < len(lines) && k < j+10; k++ { + if strings.TrimSpace(lines[k]) == "with:" { + // Found "with:" section, check for token + for m := k + 1; m < len(lines) && m < k+5; m++ { + if strings.Contains(lines[m], "token:") && strings.Contains(lines[m], "${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}") { + foundCheckoutToken = true + break + } + // If we hit another step or section, stop checking + if strings.HasPrefix(strings.TrimSpace(lines[m]), "- name:") { + break + } + } break } - // If we hit another step or section, stop checking + // If we hit another step, stop checking if strings.HasPrefix(strings.TrimSpace(lines[k]), "- name:") { break } } break } - // If we hit another step, stop checking + // If we hit another step, stop searching for the checkout action if strings.HasPrefix(strings.TrimSpace(lines[j]), "- name:") { break }