Skip to content

Bot Detection

Bot Detection #1029

#
# ___ _ _
# / _ \ | | (_)
# | |_| | __ _ ___ _ __ | |_ _ ___
# | _ |/ _` |/ _ \ '_ \| __| |/ __|
# | | | | (_| | __/ | | | |_| | (__
# \_| |_/\__, |\___|_| |_|\__|_|\___|
# __/ |
# _ _ |___/
# | | | | / _| |
# | | | | ___ _ __ _ __| |_| | _____ ____
# | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___|
# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \
# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/
#
# This file was automatically generated by gh-aw (v0.56.2). DO NOT EDIT.
#
# To update this file, edit the corresponding .md file and run:
# gh aw compile
# Not all edits will cause changes to this file.
#
# For more information: https://github.github.com/gh-aw/introduction/overview/
#
# Investigates suspicious repository activity and maintains a single triage issue with auto-close for spam
#
# gh-aw-metadata: {"schema_version":"v2","frontmatter_hash":"a772707cdae9f7fe936bc50db3556e9e9bd484d96abf2096bbdaf35e505ebf95","compiler_version":"v0.56.2","strict":true}
name: "Bot Detection"
"on":
issues:
types:
- opened
- edited
pull_request:
types:
- opened
- edited
- synchronize
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
permissions: {}
concurrency:
group: "gh-aw-${{ github.workflow }}-${{ github.event.issue.number || github.event.pull_request.number || github.run_id }}"
cancel-in-progress: true
run-name: "Bot Detection"
jobs:
activation:
needs:
- pre_activation
- precompute
if: >
(needs.pre_activation.outputs.activated == 'true') && ((needs.precompute.outputs.action != 'none') &&
((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.id == github.repository_id)))
runs-on: ubuntu-slim
permissions:
contents: read
outputs:
body: ${{ steps.sanitized.outputs.body }}
comment_id: ""
comment_repo: ""
model: ${{ steps.generate_aw_info.outputs.model }}
secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
text: ${{ steps.sanitized.outputs.text }}
title: ${{ steps.sanitized.outputs.title }}
steps:
- name: Setup Scripts
uses: github/gh-aw/actions/setup@f1073c5498ee46fec1530555a7c953445417c69b # v0.56.2
with:
destination: /opt/gh-aw/actions
- name: Generate agentic run info
id: generate_aw_info
env:
GH_AW_INFO_ENGINE_ID: "claude"
GH_AW_INFO_ENGINE_NAME: "Claude Code"
GH_AW_INFO_MODEL: ${{ vars.GH_AW_MODEL_AGENT_CLAUDE || '' }}
GH_AW_INFO_VERSION: ""
GH_AW_INFO_AGENT_VERSION: "latest"
GH_AW_INFO_CLI_VERSION: "v0.56.2"
GH_AW_INFO_WORKFLOW_NAME: "Bot Detection"
GH_AW_INFO_EXPERIMENTAL: "false"
GH_AW_INFO_SUPPORTS_TOOLS_ALLOWLIST: "true"
GH_AW_INFO_STAGED: "false"
GH_AW_INFO_ALLOWED_DOMAINS: '["defaults"]'
GH_AW_INFO_FIREWALL_ENABLED: "true"
GH_AW_INFO_AWF_VERSION: "v0.23.0"
GH_AW_INFO_AWMG_VERSION: ""
GH_AW_INFO_FIREWALL_TYPE: "squid"
GH_AW_COMPILED_STRICT: "true"
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const { main } = require('/opt/gh-aw/actions/generate_aw_info.cjs');
await main(core, context);
- name: Validate ANTHROPIC_API_KEY secret
id: validate-secret
run: /opt/gh-aw/actions/validate_multi_secret.sh ANTHROPIC_API_KEY 'Claude Code' https://github.github.com/gh-aw/reference/engines/#anthropic-claude-code
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
- name: Checkout .github and .agents folders
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
sparse-checkout: |
.github
.agents
sparse-checkout-cone-mode: true
fetch-depth: 1
persist-credentials: false
- name: Check workflow file timestamps
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_WORKFLOW_FILE: "bot-detection.lock.yml"
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/check_workflow_timestamp_api.cjs');
await main();
- name: Compute current body text
id: sanitized
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/compute_text.cjs');
await main();
- name: Create prompt with built-in context
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
GH_AW_GITHUB_ACTOR: ${{ github.actor }}
GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ACTION: ${{ needs.precompute.outputs.action }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_BODY: ${{ needs.precompute.outputs.issue_body }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_NUMBER: ${{ needs.precompute.outputs.issue_number }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_TITLE: ${{ needs.precompute.outputs.issue_title }}
run: |
bash /opt/gh-aw/actions/create_prompt_first.sh
{
cat << 'GH_AW_PROMPT_EOF'
<system>
GH_AW_PROMPT_EOF
cat "/opt/gh-aw/prompts/xpia.md"
cat "/opt/gh-aw/prompts/temp_folder_prompt.md"
cat "/opt/gh-aw/prompts/markdown.md"
cat "/opt/gh-aw/prompts/safe_outputs_prompt.md"
cat << 'GH_AW_PROMPT_EOF'
<safe-output-tools>
Tools: create_issue, close_issue, update_issue, close_pull_request, missing_tool, missing_data, noop
</safe-output-tools>
<github-context>
The following GitHub context information is available for this workflow:
{{#if __GH_AW_GITHUB_ACTOR__ }}
- **actor**: __GH_AW_GITHUB_ACTOR__
{{/if}}
{{#if __GH_AW_GITHUB_REPOSITORY__ }}
- **repository**: __GH_AW_GITHUB_REPOSITORY__
{{/if}}
{{#if __GH_AW_GITHUB_WORKSPACE__ }}
- **workspace**: __GH_AW_GITHUB_WORKSPACE__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ }}
- **issue-number**: #__GH_AW_GITHUB_EVENT_ISSUE_NUMBER__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ }}
- **discussion-number**: #__GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ }}
- **pull-request-number**: #__GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_COMMENT_ID__ }}
- **comment-id**: __GH_AW_GITHUB_EVENT_COMMENT_ID__
{{/if}}
{{#if __GH_AW_GITHUB_RUN_ID__ }}
- **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__
{{/if}}
</github-context>
GH_AW_PROMPT_EOF
cat << 'GH_AW_PROMPT_EOF'
</system>
GH_AW_PROMPT_EOF
cat << 'GH_AW_PROMPT_EOF'
{{#runtime-import .github/workflows/bot-detection.md}}
GH_AW_PROMPT_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ACTION: ${{ needs.precompute.outputs.action }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_BODY: ${{ needs.precompute.outputs.issue_body }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_NUMBER: ${{ needs.precompute.outputs.issue_number }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_TITLE: ${{ needs.precompute.outputs.issue_title }}
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/interpolate_prompt.cjs');
await main();
- name: Substitute placeholders
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_GITHUB_ACTOR: ${{ github.actor }}
GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ACTION: ${{ needs.precompute.outputs.action }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_BODY: ${{ needs.precompute.outputs.issue_body }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_NUMBER: ${{ needs.precompute.outputs.issue_number }}
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_TITLE: ${{ needs.precompute.outputs.issue_title }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: ${{ needs.pre_activation.outputs.activated }}
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs');
// Call the substitution function
return await substitutePlaceholders({
file: process.env.GH_AW_PROMPT,
substitutions: {
GH_AW_GITHUB_ACTOR: process.env.GH_AW_GITHUB_ACTOR,
GH_AW_GITHUB_EVENT_COMMENT_ID: process.env.GH_AW_GITHUB_EVENT_COMMENT_ID,
GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: process.env.GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER,
GH_AW_GITHUB_EVENT_ISSUE_NUMBER: process.env.GH_AW_GITHUB_EVENT_ISSUE_NUMBER,
GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: process.env.GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER,
GH_AW_GITHUB_REPOSITORY: process.env.GH_AW_GITHUB_REPOSITORY,
GH_AW_GITHUB_RUN_ID: process.env.GH_AW_GITHUB_RUN_ID,
GH_AW_GITHUB_WORKSPACE: process.env.GH_AW_GITHUB_WORKSPACE,
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ACTION: process.env.GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ACTION,
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_BODY: process.env.GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_BODY,
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_NUMBER: process.env.GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_NUMBER,
GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_TITLE: process.env.GH_AW_NEEDS_PRECOMPUTE_OUTPUTS_ISSUE_TITLE,
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED
}
});
- name: Validate prompt placeholders
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: bash /opt/gh-aw/actions/validate_prompt_placeholders.sh
- name: Print prompt
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: bash /opt/gh-aw/actions/print_prompt_summary.sh
- name: Upload activation artifact
if: success()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: activation
path: |
/tmp/gh-aw/aw_info.json
/tmp/gh-aw/aw-prompts/prompt.txt
retention-days: 1
agent:
needs:
- activation
- precompute
if: >
(needs.precompute.outputs.action != 'none') && ((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.id == github.repository_id))
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
pull-requests: read
env:
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
GH_AW_ASSETS_ALLOWED_EXTS: ""
GH_AW_ASSETS_BRANCH: ""
GH_AW_ASSETS_MAX_SIZE_KB: 0
GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
GH_AW_SAFE_OUTPUTS: /opt/gh-aw/safeoutputs/outputs.jsonl
GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config.json
GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools.json
GH_AW_WORKFLOW_ID_SANITIZED: botdetection
outputs:
checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }}
has_patch: ${{ steps.collect_output.outputs.has_patch }}
model: ${{ needs.activation.outputs.model }}
output: ${{ steps.collect_output.outputs.output }}
output_types: ${{ steps.collect_output.outputs.output_types }}
steps:
- name: Setup Scripts
uses: github/gh-aw/actions/setup@f1073c5498ee46fec1530555a7c953445417c69b # v0.56.2
with:
destination: /opt/gh-aw/actions
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Create gh-aw temp directory
run: bash /opt/gh-aw/actions/create_gh_aw_tmp_dir.sh
- name: Configure Git credentials
env:
REPO_NAME: ${{ github.repository }}
SERVER_URL: ${{ github.server_url }}
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git config --global am.keepcr true
# Re-authenticate git with GitHub token
SERVER_URL_STRIPPED="${SERVER_URL#https://}"
git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
echo "Git configured with standard GitHub Actions identity"
- name: Checkout PR branch
id: checkout-pr
if: |
(github.event.pull_request) || (github.event.issue.pull_request)
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
with:
github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/checkout_pr_branch.cjs');
await main();
- name: Setup Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
with:
node-version: '24'
package-manager-cache: false
- name: Install awf binary
run: bash /opt/gh-aw/actions/install_awf_binary.sh v0.23.0
- name: Install Claude Code CLI
run: npm install -g @anthropic-ai/claude-code@latest
- name: Determine automatic lockdown mode for GitHub MCP Server
id: determine-automatic-lockdown
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
with:
script: |
const determineAutomaticLockdown = require('/opt/gh-aw/actions/determine_automatic_lockdown.cjs');
await determineAutomaticLockdown(github, context, core);
- name: Download container images
run: bash /opt/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.23.0 ghcr.io/github/gh-aw-firewall/api-proxy:0.23.0 ghcr.io/github/gh-aw-firewall/squid:0.23.0 ghcr.io/github/gh-aw-mcpg:v0.1.8 ghcr.io/github/github-mcp-server:v0.32.0 node:lts-alpine
- name: Write Safe Outputs Config
run: |
mkdir -p /opt/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
cat > /opt/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_EOF'
{"close_issue":{"max":10,"target":"*"},"create_issue":{"max":1},"mentions":{"allowed":["pelikhan"]},"missing_data":{},"missing_tool":{},"noop":{"max":1},"update_issue":{"max":1}}
GH_AW_SAFE_OUTPUTS_CONFIG_EOF
cat > /opt/gh-aw/safeoutputs/tools.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_EOF'
[
{
"description": "Create a new GitHub issue for tracking bugs, feature requests, or tasks. Use this for actionable work items that need assignment, labeling, and status tracking. For reports, announcements, or status updates that don't require task tracking, use create_discussion instead. CONSTRAINTS: Maximum 1 issue(s) can be created. Labels [\"security\" \"bot-detection\"] will be automatically added.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"body": {
"description": "Detailed issue description in Markdown. Do NOT repeat the title as a heading since it already appears as the issue's h1. Include context, reproduction steps, or acceptance criteria as appropriate.",
"type": "string"
},
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"labels": {
"description": "Labels to categorize the issue (e.g., 'bug', 'enhancement'). Labels must exist in the repository.",
"items": {
"type": "string"
},
"type": "array"
},
"parent": {
"description": "Parent issue number for creating sub-issues. This is the numeric ID from the GitHub URL (e.g., 42 in github.com/owner/repo/issues/42). Can also be a temporary_id (e.g., 'aw_abc123', 'aw_Test123') from a previously created issue in the same workflow run.",
"type": [
"number",
"string"
]
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
},
"temporary_id": {
"description": "Unique temporary identifier for referencing this issue before it's created. Format: 'aw_' followed by 3 to 12 alphanumeric characters (e.g., 'aw_abc1', 'aw_Test123'). Use '#aw_ID' in body text to reference other issues by their temporary_id; these are replaced with actual issue numbers after creation.",
"pattern": "^aw_[A-Za-z0-9]{3,12}$",
"type": "string"
},
"title": {
"description": "Concise issue title summarizing the bug, feature, or task. The title appears as the main heading, so keep it brief and descriptive.",
"type": "string"
}
},
"required": [
"title",
"body"
],
"type": "object"
},
"name": "create_issue"
},
{
"description": "Close a GitHub issue with a closing comment. You can and should always add a comment when closing an issue to explain the action or provide context. This tool is ONLY for closing issues - use update_issue if you need to change the title, body, labels, or other metadata without closing. Use close_issue when work is complete, the issue is no longer relevant, or it's a duplicate. The closing comment should explain the resolution or reason for closing. If the issue is already closed, a comment will still be posted. CONSTRAINTS: Maximum 10 issue(s) can be closed. Target: *.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"body": {
"description": "Closing comment explaining why the issue is being closed and summarizing any resolution, workaround, or conclusion.",
"type": "string"
},
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"issue_number": {
"description": "Issue number to close. This is the numeric ID from the GitHub URL (e.g., 901 in github.com/owner/repo/issues/901). If omitted, closes the issue that triggered this workflow (requires an issue event trigger).",
"type": [
"number",
"string"
]
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
}
},
"required": [
"body"
],
"type": "object"
},
"name": "close_issue"
},
{
"description": "Close a pull request WITHOUT merging, adding a closing comment. You can and should always add a comment when closing a PR to explain the action or provide context. Use this for PRs that should be abandoned, superseded, or closed for other reasons. The closing comment should explain why the PR is being closed. This does NOT merge the changes. If the PR is already closed, a comment will still be posted. CONSTRAINTS: Maximum 10 pull request(s) can be closed. Target: *.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"body": {
"description": "Closing comment explaining why the PR is being closed without merging (e.g., superseded by another PR, no longer needed, approach rejected).",
"type": "string"
},
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"pull_request_number": {
"description": "Pull request number to close. This is the numeric ID from the GitHub URL (e.g., 432 in github.com/owner/repo/pull/432). If omitted, closes the PR that triggered this workflow (requires a pull_request event trigger).",
"type": [
"number",
"string"
]
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
}
},
"required": [
"body"
],
"type": "object"
},
"name": "close_pull_request"
},
{
"description": "Update an existing GitHub issue's title, body, labels, assignees, or milestone WITHOUT closing it. This tool is primarily for editing issue metadata and content. While it supports changing status between 'open' and 'closed', use close_issue instead when you want to close an issue with a closing comment. Body updates support replacing, appending to, prepending content, or updating a per-run \"island\" section. CONSTRAINTS: Maximum 1 issue(s) can be updated. Target: *. Body updates are allowed.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"assignees": {
"description": "Replace the issue assignees with this list of GitHub usernames (e.g., ['octocat', 'mona']).",
"items": {
"type": "string"
},
"type": "array"
},
"body": {
"description": "Issue body content in Markdown. For 'replace', this becomes the entire body. For 'append'/'prepend', this content is added with a separator and an attribution footer. For 'replace-island', only the run-specific section is updated.",
"type": "string"
},
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"issue_number": {
"description": "Issue number to update. This is the numeric ID from the GitHub URL (e.g., 789 in github.com/owner/repo/issues/789). Required when the workflow target is '*' (any issue).",
"type": [
"number",
"string"
]
},
"labels": {
"description": "Replace the issue labels with this list (e.g., ['bug', 'tracking:foo']). Labels must exist in the repository.",
"items": {
"type": "string"
},
"type": "array"
},
"milestone": {
"description": "Milestone number to assign (e.g., 1). Use null to clear.",
"type": [
"number",
"string"
]
},
"operation": {
"description": "How to update the issue body: 'append' (default - add to end with separator), 'prepend' (add to start with separator), 'replace' (overwrite entire body), or 'replace-island' (update a run-specific section).",
"enum": [
"replace",
"append",
"prepend",
"replace-island"
],
"type": "string"
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
},
"status": {
"description": "New issue status: 'open' to reopen a closed issue, 'closed' to close an open issue.",
"enum": [
"open",
"closed"
],
"type": "string"
},
"title": {
"description": "New issue title to replace the existing title.",
"type": "string"
}
},
"type": "object"
},
"name": "update_issue"
},
{
"description": "Report that a tool or capability needed to complete the task is not available, or share any information you deem important about missing functionality or limitations. Use this when you cannot accomplish what was requested because the required functionality is missing or access is restricted.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"alternatives": {
"description": "Any workarounds, manual steps, or alternative approaches the user could take (max 256 characters).",
"type": "string"
},
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"reason": {
"description": "Explanation of why this tool is needed or what information you want to share about the limitation (max 256 characters).",
"type": "string"
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
},
"tool": {
"description": "Optional: Name or description of the missing tool or capability (max 128 characters). Be specific about what functionality is needed.",
"type": "string"
}
},
"required": [
"reason"
],
"type": "object"
},
"name": "missing_tool"
},
{
"description": "Log a transparency message when no significant actions are needed. Use this to confirm workflow completion and provide visibility when analysis is complete but no changes or outputs are required (e.g., 'No issues found', 'All checks passed'). This ensures the workflow produces human-visible output even when no other actions are taken.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"message": {
"description": "Status or completion message to log. Should explain what was analyzed and the outcome (e.g., 'Code review complete - no issues found', 'Analysis complete - all tests passing').",
"type": "string"
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
}
},
"required": [
"message"
],
"type": "object"
},
"name": "noop"
},
{
"description": "Report that data or information needed to complete the task is not available. Use this when you cannot accomplish what was requested because required data, context, or information is missing.",
"inputSchema": {
"additionalProperties": false,
"properties": {
"alternatives": {
"description": "Any workarounds, manual steps, or alternative approaches the user could take (max 256 characters).",
"type": "string"
},
"context": {
"description": "Additional context about the missing data or where it should come from (max 256 characters).",
"type": "string"
},
"data_type": {
"description": "Type or description of the missing data or information (max 128 characters). Be specific about what data is needed.",
"type": "string"
},
"integrity": {
"description": "Trustworthiness level of the message source (e.g., \"low\", \"medium\", \"high\").",
"type": "string"
},
"reason": {
"description": "Explanation of why this data is needed to complete the task (max 256 characters).",
"type": "string"
},
"secrecy": {
"description": "Confidentiality level of the message content (e.g., \"public\", \"internal\", \"private\").",
"type": "string"
}
},
"required": [],
"type": "object"
},
"name": "missing_data"
}
]
GH_AW_SAFE_OUTPUTS_TOOLS_EOF
cat > /opt/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_EOF'
{
"close_issue": {
"defaultMax": 1,
"fields": {
"body": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 65000
},
"issue_number": {
"optionalPositiveInteger": true
},
"repo": {
"type": "string",
"maxLength": 256
}
}
},
"create_issue": {
"defaultMax": 1,
"fields": {
"body": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 65000
},
"labels": {
"type": "array",
"itemType": "string",
"itemSanitize": true,
"itemMaxLength": 128
},
"parent": {
"issueOrPRNumber": true
},
"repo": {
"type": "string",
"maxLength": 256
},
"temporary_id": {
"type": "string"
},
"title": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 128
}
}
},
"missing_data": {
"defaultMax": 20,
"fields": {
"alternatives": {
"type": "string",
"sanitize": true,
"maxLength": 256
},
"context": {
"type": "string",
"sanitize": true,
"maxLength": 256
},
"data_type": {
"type": "string",
"sanitize": true,
"maxLength": 128
},
"reason": {
"type": "string",
"sanitize": true,
"maxLength": 256
}
}
},
"missing_tool": {
"defaultMax": 20,
"fields": {
"alternatives": {
"type": "string",
"sanitize": true,
"maxLength": 512
},
"reason": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 256
},
"tool": {
"type": "string",
"sanitize": true,
"maxLength": 128
}
}
},
"noop": {
"defaultMax": 1,
"fields": {
"message": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 65000
}
}
},
"update_issue": {
"defaultMax": 1,
"fields": {
"assignees": {
"type": "array",
"itemType": "string",
"itemSanitize": true,
"itemMaxLength": 39
},
"body": {
"type": "string",
"sanitize": true,
"maxLength": 65000
},
"issue_number": {
"issueOrPRNumber": true
},
"labels": {
"type": "array",
"itemType": "string",
"itemSanitize": true,
"itemMaxLength": 128
},
"milestone": {
"optionalPositiveInteger": true
},
"operation": {
"type": "string",
"enum": [
"replace",
"append",
"prepend",
"replace-island"
]
},
"repo": {
"type": "string",
"maxLength": 256
},
"status": {
"type": "string",
"enum": [
"open",
"closed"
]
},
"title": {
"type": "string",
"sanitize": true,
"maxLength": 128
}
},
"customValidation": "requiresOneOf:status,title,body"
}
}
GH_AW_SAFE_OUTPUTS_VALIDATION_EOF
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
run: |
# Generate a secure random API key (360 bits of entropy, 40+ chars)
# Mask immediately to prevent timing vulnerabilities
API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
echo "::add-mask::${API_KEY}"
PORT=3001
# Set outputs for next steps
{
echo "safe_outputs_api_key=${API_KEY}"
echo "safe_outputs_port=${PORT}"
} >> "$GITHUB_OUTPUT"
echo "Safe Outputs MCP server will run on port ${PORT}"
- name: Start Safe Outputs MCP HTTP Server
id: safe-outputs-start
env:
DEBUG: '*'
GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-config.outputs.safe_outputs_port }}
GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-config.outputs.safe_outputs_api_key }}
GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools.json
GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config.json
GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
run: |
# Environment variables are set above to prevent template injection
export DEBUG
export GH_AW_SAFE_OUTPUTS_PORT
export GH_AW_SAFE_OUTPUTS_API_KEY
export GH_AW_SAFE_OUTPUTS_TOOLS_PATH
export GH_AW_SAFE_OUTPUTS_CONFIG_PATH
export GH_AW_MCP_LOG_DIR
bash /opt/gh-aw/actions/start_safe_outputs_server.sh
- name: Start MCP Gateway
id: start-mcp-gateway
env:
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-start.outputs.api_key }}
GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-start.outputs.port }}
GITHUB_MCP_LOCKDOWN: ${{ steps.determine-automatic-lockdown.outputs.lockdown == 'true' && '1' || '0' }}
GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
run: |
set -eo pipefail
mkdir -p /tmp/gh-aw/mcp-config
# Export gateway environment variables for MCP config and gateway script
export MCP_GATEWAY_PORT="80"
export MCP_GATEWAY_DOMAIN="host.docker.internal"
MCP_GATEWAY_API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
echo "::add-mask::${MCP_GATEWAY_API_KEY}"
export MCP_GATEWAY_API_KEY
export MCP_GATEWAY_PAYLOAD_DIR="/tmp/gh-aw/mcp-payloads"
mkdir -p "${MCP_GATEWAY_PAYLOAD_DIR}"
export MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD="524288"
export DEBUG="*"
export GH_AW_ENGINE="claude"
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_LOCKDOWN -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.1.8'
cat << GH_AW_MCP_CONFIG_EOF | bash /opt/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
"container": "ghcr.io/github/github-mcp-server:v0.32.0",
"env": {
"GITHUB_LOCKDOWN_MODE": "$GITHUB_MCP_LOCKDOWN",
"GITHUB_PERSONAL_ACCESS_TOKEN": "$GITHUB_MCP_SERVER_TOKEN",
"GITHUB_READ_ONLY": "1",
"GITHUB_TOOLSETS": "context,repos,issues,pull_requests"
}
},
"safeoutputs": {
"type": "http",
"url": "http://host.docker.internal:$GH_AW_SAFE_OUTPUTS_PORT",
"headers": {
"Authorization": "$GH_AW_SAFE_OUTPUTS_API_KEY"
}
}
},
"gateway": {
"port": $MCP_GATEWAY_PORT,
"domain": "${MCP_GATEWAY_DOMAIN}",
"apiKey": "${MCP_GATEWAY_API_KEY}",
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
GH_AW_MCP_CONFIG_EOF
- name: Download activation artifact
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: activation
path: /tmp/gh-aw
- name: Clean git credentials
run: bash /opt/gh-aw/actions/clean_git_credentials.sh
- name: Execute Claude Code CLI
id: agentic_execution
# Allowed tools (sorted):
# - Bash
# - BashOutput
# - Edit
# - ExitPlanMode
# - Glob
# - Grep
# - KillBash
# - LS
# - MultiEdit
# - NotebookEdit
# - NotebookRead
# - Read
# - Task
# - TodoWrite
# - Write
# - mcp__github__download_workflow_run_artifact
# - mcp__github__get_code_scanning_alert
# - mcp__github__get_commit
# - mcp__github__get_dependabot_alert
# - mcp__github__get_discussion
# - mcp__github__get_discussion_comments
# - mcp__github__get_file_contents
# - mcp__github__get_job_logs
# - mcp__github__get_label
# - mcp__github__get_latest_release
# - mcp__github__get_me
# - mcp__github__get_notification_details
# - mcp__github__get_pull_request
# - mcp__github__get_pull_request_comments
# - mcp__github__get_pull_request_diff
# - mcp__github__get_pull_request_files
# - mcp__github__get_pull_request_review_comments
# - mcp__github__get_pull_request_reviews
# - mcp__github__get_pull_request_status
# - mcp__github__get_release_by_tag
# - mcp__github__get_secret_scanning_alert
# - mcp__github__get_tag
# - mcp__github__get_workflow_run
# - mcp__github__get_workflow_run_logs
# - mcp__github__get_workflow_run_usage
# - mcp__github__issue_read
# - mcp__github__list_branches
# - mcp__github__list_code_scanning_alerts
# - mcp__github__list_commits
# - mcp__github__list_dependabot_alerts
# - mcp__github__list_discussion_categories
# - mcp__github__list_discussions
# - mcp__github__list_issue_types
# - mcp__github__list_issues
# - mcp__github__list_label
# - mcp__github__list_notifications
# - mcp__github__list_pull_requests
# - mcp__github__list_releases
# - mcp__github__list_secret_scanning_alerts
# - mcp__github__list_starred_repositories
# - mcp__github__list_tags
# - mcp__github__list_workflow_jobs
# - mcp__github__list_workflow_run_artifacts
# - mcp__github__list_workflow_runs
# - mcp__github__list_workflows
# - mcp__github__pull_request_read
# - mcp__github__search_code
# - mcp__github__search_issues
# - mcp__github__search_orgs
# - mcp__github__search_pull_requests
# - mcp__github__search_repositories
# - mcp__github__search_users
timeout-minutes: 10
run: |
set -o pipefail
touch /tmp/gh-aw/agent-step-summary.md
# shellcheck disable=SC1003
sudo -E awf --tty --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains "*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com" --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --image-tag 0.23.0 --skip-pull --enable-api-proxy \
-- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && claude --print --disable-slash-commands --no-chrome --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools Bash,BashOutput,Edit,ExitPlanMode,Glob,Grep,KillBash,LS,MultiEdit,NotebookEdit,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__issue_read,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users --debug-file /tmp/gh-aw/agent-stdio.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_CLAUDE:+ --model "$GH_AW_MODEL_AGENT_CLAUDE"}' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
BASH_DEFAULT_TIMEOUT_MS: 60000
BASH_MAX_TIMEOUT_MS: 60000
DISABLE_BUG_COMMAND: 1
DISABLE_ERROR_REPORTING: 1
DISABLE_TELEMETRY: 1
GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json
GH_AW_MODEL_AGENT_CLAUDE: ${{ vars.GH_AW_MODEL_AGENT_CLAUDE || '' }}
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md
GITHUB_WORKSPACE: ${{ github.workspace }}
GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com
GIT_AUTHOR_NAME: github-actions[bot]
GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com
GIT_COMMITTER_NAME: github-actions[bot]
MCP_TIMEOUT: 120000
MCP_TOOL_TIMEOUT: 60000
- name: Configure Git credentials
env:
REPO_NAME: ${{ github.repository }}
SERVER_URL: ${{ github.server_url }}
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git config --global am.keepcr true
# Re-authenticate git with GitHub token
SERVER_URL_STRIPPED="${SERVER_URL#https://}"
git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
echo "Git configured with standard GitHub Actions identity"
- name: Stop MCP Gateway
if: always()
continue-on-error: true
env:
MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }}
MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }}
GATEWAY_PID: ${{ steps.start-mcp-gateway.outputs.gateway-pid }}
run: |
bash /opt/gh-aw/actions/stop_mcp_gateway.sh "$GATEWAY_PID"
- name: Redact secrets in logs
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs');
await main();
env:
GH_AW_SECRET_NAMES: 'ANTHROPIC_API_KEY,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN'
SECRET_ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Append agent step summary
if: always()
run: bash /opt/gh-aw/actions/append_agent_step_summary.sh
- name: Upload Safe Outputs
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: safe-output
path: ${{ env.GH_AW_SAFE_OUTPUTS }}
if-no-files-found: warn
- name: Ingest agent output
id: collect_output
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com"
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_API_URL: ${{ github.api_url }}
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/collect_ndjson_output.cjs');
await main();
- name: Upload sanitized agent output
if: always() && env.GH_AW_AGENT_OUTPUT
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: agent-output
path: ${{ env.GH_AW_AGENT_OUTPUT }}
if-no-files-found: warn
- name: Parse agent logs for step summary
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_AGENT_OUTPUT: /tmp/gh-aw/agent-stdio.log
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/parse_claude_log.cjs');
await main();
- name: Parse MCP Gateway logs for step summary
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/parse_mcp_gateway_log.cjs');
await main();
- name: Print firewall logs
if: always()
continue-on-error: true
env:
AWF_LOGS_DIR: /tmp/gh-aw/sandbox/firewall/logs
run: |
# Fix permissions on firewall logs so they can be uploaded as artifacts
# AWF runs with sudo, creating files owned by root
sudo chmod -R a+r /tmp/gh-aw/sandbox/firewall/logs 2>/dev/null || true
# Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step)
if command -v awf &> /dev/null; then
awf logs summary | tee -a "$GITHUB_STEP_SUMMARY"
else
echo 'AWF binary not installed, skipping firewall log summary'
fi
- name: Upload agent artifacts
if: always()
continue-on-error: true
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: agent-artifacts
path: |
/tmp/gh-aw/aw-prompts/prompt.txt
/tmp/gh-aw/mcp-logs/
/tmp/gh-aw/sandbox/firewall/logs/
/tmp/gh-aw/agent-stdio.log
/tmp/gh-aw/agent/
if-no-files-found: ignore
conclusion:
needs:
- activation
- agent
- safe_outputs
if: (always()) && (needs.agent.result != 'skipped')
runs-on: ubuntu-slim
permissions:
contents: read
issues: write
pull-requests: write
concurrency:
group: "gh-aw-conclusion-bot-detection"
cancel-in-progress: false
outputs:
noop_message: ${{ steps.noop.outputs.noop_message }}
tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
total_count: ${{ steps.missing_tool.outputs.total_count }}
steps:
- name: Setup Scripts
uses: github/gh-aw/actions/setup@f1073c5498ee46fec1530555a7c953445417c69b # v0.56.2
with:
destination: /opt/gh-aw/actions
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: agent-output
path: /tmp/gh-aw/safeoutputs/
- name: Setup agent output environment variable
if: steps.download-agent-output.outcome == 'success'
run: |
mkdir -p /tmp/gh-aw/safeoutputs/
find "/tmp/gh-aw/safeoutputs/" -type f -print
echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/safeoutputs/agent_output.json" >> "$GITHUB_ENV"
- name: Process No-Op Messages
id: noop
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
GH_AW_NOOP_MAX: "1"
GH_AW_WORKFLOW_NAME: "Bot Detection"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/noop.cjs');
await main();
- name: Record Missing Tool
id: missing_tool
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
GH_AW_WORKFLOW_NAME: "Bot Detection"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/missing_tool.cjs');
await main();
- name: Handle Agent Failure
id: handle_agent_failure
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
GH_AW_WORKFLOW_NAME: "Bot Detection"
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
GH_AW_WORKFLOW_ID: "bot-detection"
GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.activation.outputs.secret_verification_result }}
GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }}
GH_AW_GROUP_REPORTS: "false"
GH_AW_TIMEOUT_MINUTES: "10"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/handle_agent_failure.cjs');
await main();
- name: Handle No-Op Message
id: handle_noop_message
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
GH_AW_WORKFLOW_NAME: "Bot Detection"
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
GH_AW_NOOP_MESSAGE: ${{ steps.noop.outputs.noop_message }}
GH_AW_NOOP_REPORT_AS_ISSUE: "true"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/handle_noop_message.cjs');
await main();
pre_activation:
runs-on: ubuntu-slim
outputs:
activated: ${{ steps.check_membership.outputs.is_team_member == 'true' }}
matched_command: ''
steps:
- name: Setup Scripts
uses: github/gh-aw/actions/setup@f1073c5498ee46fec1530555a7c953445417c69b # v0.56.2
with:
destination: /opt/gh-aw/actions
- name: Check team membership for workflow
id: check_membership
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_REQUIRED_ROLES: admin,maintainer,write
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/check_membership.cjs');
await main();
precompute:
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
issues: write
pull-requests: write
outputs:
action: ${{ steps.precompute.outputs.action }}
issue_body: ${{ steps.precompute.outputs.issue_body }}
issue_number: ${{ steps.precompute.outputs.issue_number }}
issue_title: ${{ steps.precompute.outputs.issue_title }}
spam_items: ${{ steps.precompute.outputs.spam_items }}
steps:
- name: Precompute deterministic findings
id: precompute
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
with:
github-token: ${{ secrets.GH_AW_BOT_DETECTION_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { owner, repo } = context.repo;
const HOURS_BACK = 6;
const ISSUE_TITLE = "🔎 Activity Signals: Review Queue";
const MIN_ACCOUNT_AGE_DAYS = 14;
const MAX_PR = 50;
const MAX_COMMENT_EXAMPLES = 10;
const MAX_TOUCHED_FILES = 10;
const ALLOWED_DOMAINS = new Set([
// GitHub docs + blog
"docs.github.com",
"github.blog",
// Marketplace + package registries
"marketplace.visualstudio.com",
"npmjs.com",
"pkg.go.dev",
// Language vendor sites
"golang.org",
"go.dev",
"nodejs.org",
"pypi.org",
"crates.io",
"python.org",
"rust-lang.org",
// Amplihack-specific trusted domains
"anthropic.com",
"claude.ai",
"docs.anthropic.com",
]);
const ALLOWED_ACCOUNTS = new Set([
// Bots and service accounts
"github-actions[bot]",
"dependabot[bot]",
"renovate[bot]",
"copilot",
"copilot-swe-agent",
]);
const TRUSTED_ORGS = [owner];
const MEMBER_ACCOUNTS = new Set();
function parseJsonList(envName) {
try {
const raw = process.env[envName];
if (!raw) return [];
const parsed = JSON.parse(raw);
return Array.isArray(parsed) ? parsed : [];
} catch {
return [];
}
}
function toISO(d) {
return new Date(d).toISOString();
}
function normalizeForDup(s) {
return (s || "")
.toString()
.replace(/https?:\/\/\S+/g, "")
.toLowerCase()
.replace(/\s+/g, " ")
.trim()
.slice(0, 240);
}
function extractDomains(text) {
const domains = [];
const urlRe = /https?:\/\/[^\s)\]]+/g;
const matches = text.match(urlRe) || [];
for (const raw of matches) {
try {
const u = new URL(raw);
domains.push(u.hostname.toLowerCase());
} catch {
// ignore parse failures
}
}
return domains;
}
function isExternalDomain(host) {
const allowed = new Set([
"github.com",
"raw.githubusercontent.com",
"avatars.githubusercontent.com",
"api.github.com",
]);
return host && !allowed.has(host) && !ALLOWED_DOMAINS.has(host);
}
function isAllowedAccount(login) {
const normalized = String(login || "").toLowerCase();
return ALLOWED_ACCOUNTS.has(normalized) || MEMBER_ACCOUNTS.has(normalized);
}
async function loadMemberAccounts() {
try {
const collaborators = await github.paginate(github.rest.repos.listCollaborators, {
owner,
repo,
per_page: 100,
});
for (const collaborator of collaborators) {
if (collaborator?.login) {
MEMBER_ACCOUNTS.add(String(collaborator.login).toLowerCase());
}
}
} catch {
// If collaborator lookup fails, continue without member allowlist.
}
}
async function loadContributorAccounts() {
try {
const contributors = await github.paginate(github.rest.repos.listContributors, {
owner,
repo,
per_page: 100,
});
for (const contributor of contributors) {
if (contributor?.login) {
MEMBER_ACCOUNTS.add(String(contributor.login).toLowerCase());
}
}
} catch {
// If contributor lookup fails, continue without contributor allowlist.
}
}
async function loadOrgMembers() {
for (const org of TRUSTED_ORGS) {
try {
const members = await github.paginate(github.rest.orgs.listMembers, {
org,
per_page: 100,
});
for (const member of members) {
if (member?.login) {
MEMBER_ACCOUNTS.add(String(member.login).toLowerCase());
}
}
} catch {
// If org member lookup fails, continue without org allowlist.
}
}
}
function isShortener(host) {
const shorteners = new Set(["bit.ly", "tinyurl.com", "t.co", "is.gd", "goo.gl"]);
return shorteners.has(host);
}
function isNonGitHubBinaryUrl(urlStr) {
try {
const u = new URL(urlStr);
const host = u.hostname.toLowerCase();
if (!isExternalDomain(host)) return false;
const path = u.pathname.toLowerCase();
return (
path.endsWith(".exe") ||
path.endsWith(".msi") ||
path.endsWith(".pkg") ||
path.endsWith(".dmg") ||
path.endsWith(".zip") ||
path.endsWith(".tar.gz")
);
} catch {
return false;
}
}
async function getRunCreatedAt() {
const runId = context.runId;
const { data } = await github.rest.actions.getWorkflowRun({
owner,
repo,
run_id: runId,
});
return new Date(data.created_at);
}
const end = await getRunCreatedAt();
const start = new Date(end.getTime() - HOURS_BACK * 60 * 60 * 1000);
for (const domain of parseJsonList("BOT_DETECTION_ALLOWED_DOMAINS")) {
if (domain) ALLOWED_DOMAINS.add(String(domain).toLowerCase());
}
await loadMemberAccounts();
await loadContributorAccounts();
await loadOrgMembers();
// Search issues + PRs updated in window (API requires is:issue or is:pull-request)
const qBase = `repo:${owner}/${repo} updated:>=${toISO(start)}`;
const rawItems = [];
for (const scope of ["is:issue", "is:pull-request"]) {
const search = await github.rest.search.issuesAndPullRequests({
q: `${qBase} ${scope}`,
per_page: 100,
sort: "updated",
order: "desc",
});
rawItems.push(...(search.data.items || []));
}
const seen = new Set();
let skippedNoLogin = 0;
let skippedAllowed = 0;
const skippedAllowedLogins = new Set();
const MAX_LOGGED_SKIPPED = 10;
const items = rawItems
.filter(i => new Date(i.updated_at) >= start && new Date(i.updated_at) <= end)
.map(i => ({
number: i.number,
title: i.title || "",
body: i.body || "",
url: i.html_url,
created_at: i.created_at,
updated_at: i.updated_at,
is_pr: Boolean(i.pull_request),
author: i.user?.login || "",
state: i.state || "open",
}))
.filter(i => {
if (seen.has(i.url)) return false;
seen.add(i.url);
return true;
});
// Deterministic ordering for any downstream processing
items.sort((a, b) => {
const at = a.updated_at.localeCompare(b.updated_at);
if (at !== 0) return at;
const an = a.number - b.number;
if (an !== 0) return an;
return a.url.localeCompare(b.url);
});
// Collect per-author signals
const perAuthor = new Map();
const domainAccounts = new Map(); // domain -> Set(logins)
const userCreatedAt = new Map();
const spamItems = []; // Track items that should be auto-closed
async function ensureUserCreatedAt(login) {
if (!login || userCreatedAt.has(login)) return;
try {
const { data: userInfo } = await github.rest.users.getByUsername({ username: login });
userCreatedAt.set(login, new Date(userInfo.created_at));
} catch {
userCreatedAt.set(login, null);
}
}
function ensureAuthor(login) {
if (!perAuthor.has(login)) {
perAuthor.set(login, {
login,
itemCount: 0,
prCount: 0,
issueCount: 0,
commentCount: 0,
reviewCount: 0,
accountAgeDays: null,
externalDomains: new Set(),
hasShortener: false,
hasNonGitHubBinary: false,
touchesWorkflows: false,
touchesCI: false,
touchesDeps: false,
dupTexts: new Map(),
exampleItems: [],
touchedFiles: new Set(),
examples: [],
});
}
return perAuthor.get(login);
}
for (const it of items) {
const login = it.author;
if (!login) {
skippedNoLogin += 1;
continue;
}
if (isAllowedAccount(login)) {
skippedAllowed += 1;
if (skippedAllowedLogins.size < MAX_LOGGED_SKIPPED) {
skippedAllowedLogins.add(login);
}
continue;
}
const s = ensureAuthor(login);
await ensureUserCreatedAt(login);
s.itemCount += 1;
if (it.is_pr) s.prCount += 1;
else s.issueCount += 1;
if (s.exampleItems.length < 5) {
s.exampleItems.push({
title: it.title || "",
url: it.url,
is_pr: it.is_pr,
number: it.number,
});
}
if (s.examples.length < 5) {
s.examples.push({ url: it.url, is_pr: it.is_pr, number: it.number });
}
const text = `${it.title}\n\n${it.body}`;
const domains = extractDomains(text);
for (const host of domains) {
if (!host) continue;
if (isExternalDomain(host)) {
s.externalDomains.add(host);
if (!domainAccounts.has(host)) domainAccounts.set(host, new Set());
domainAccounts.get(host).add(login);
}
if (isShortener(host)) s.hasShortener = true;
}
// Non-GitHub binary/download links
const urlRe = /https?:\/\/[^\s)\]]+/g;
const urlMatches = (text.match(urlRe) || []);
for (const u of urlMatches) {
if (isNonGitHubBinaryUrl(u)) {
s.hasNonGitHubBinary = true;
}
}
// Duplicate-ish content detection (within items we fetched)
const norm = normalizeForDup(text);
if (norm) {
s.dupTexts.set(norm, (s.dupTexts.get(norm) || 0) + 1);
}
}
// PR comments + reviews (deterministic and bounded)
const prItems = items.filter(i => i.is_pr).slice(0, MAX_PR);
for (const it of prItems) {
const login = it.author;
if (login) {
if (isAllowedAccount(login)) continue;
await ensureUserCreatedAt(login);
}
let issueComments = [];
try {
let total = 0;
issueComments = await github.paginate(
github.rest.issues.listComments,
{
owner,
repo,
issue_number: it.number,
per_page: 100,
},
(response, done) => {
const remaining = 500 - total;
if (remaining <= 0) {
done();
return [];
}
if (total + response.data.length >= 500) {
total = 500;
done();
return response.data.slice(0, remaining);
}
total += response.data.length;
return response.data;
}
);
} catch {
// ignore
}
let reviewComments = [];
try {
let total = 0;
reviewComments = await github.paginate(
github.rest.pulls.listReviewComments,
{
owner,
repo,
pull_number: it.number,
per_page: 100,
},
(response, done) => {
const remaining = 500 - total;
if (remaining <= 0) {
done();
return [];
}
if (total + response.data.length >= 500) {
total = 500;
done();
return response.data.slice(0, remaining);
}
total += response.data.length;
return response.data;
}
);
} catch {
// ignore
}
let reviews = [];
try {
let total = 0;
reviews = await github.paginate(
github.rest.pulls.listReviews,
{
owner,
repo,
pull_number: it.number,
per_page: 100,
},
(response, done) => {
const remaining = 500 - total;
if (remaining <= 0) {
done();
return [];
}
if (total + response.data.length >= 500) {
total = 500;
done();
return response.data.slice(0, remaining);
}
total += response.data.length;
return response.data;
}
);
} catch {
// ignore
}
const commentCandidates = [...issueComments, ...reviewComments]
.filter(c => c?.created_at)
.filter(c => new Date(c.created_at) >= start && new Date(c.created_at) <= end)
.sort((a, b) => a.created_at.localeCompare(b.created_at));
for (const c of commentCandidates) {
const commenter = c.user?.login || "";
if (!commenter) continue;
if (isAllowedAccount(commenter)) continue;
await ensureUserCreatedAt(commenter);
const s = ensureAuthor(commenter);
s.commentCount += 1;
if (s.examples.length < MAX_COMMENT_EXAMPLES) {
s.examples.push({ url: c.html_url, is_pr: true, number: it.number });
}
}
const reviewCandidates = reviews
.map(r => ({
user: r.user,
submitted_at: r.submitted_at || r.submittedAt,
url: r.html_url || `${it.url}#pullrequestreview-${r.id}`,
}))
.filter(r => r.submitted_at)
.filter(r => new Date(r.submitted_at) >= start && new Date(r.submitted_at) <= end)
.sort((a, b) => a.submitted_at.localeCompare(b.submitted_at));
for (const r of reviewCandidates) {
const reviewer = r.user?.login || "";
if (!reviewer) continue;
if (isAllowedAccount(reviewer)) continue;
await ensureUserCreatedAt(reviewer);
const s = ensureAuthor(reviewer);
s.reviewCount += 1;
if (s.examples.length < MAX_COMMENT_EXAMPLES) {
s.examples.push({ url: r.url, is_pr: true, number: it.number });
}
}
}
// PR file touches (sensitive paths) - deterministic and bounded
for (const it of prItems) {
const login = it.author;
if (!login) continue;
if (isAllowedAccount(login)) continue;
const s = ensureAuthor(login);
try {
let total = 0;
const files = await github.paginate(
github.rest.pulls.listFiles,
{
owner,
repo,
pull_number: it.number,
per_page: 100,
},
(response, done) => {
const remaining = 500 - total;
if (remaining <= 0) {
done();
return [];
}
if (total + response.data.length >= 500) {
total = 500;
done();
return response.data.slice(0, remaining);
}
total += response.data.length;
return response.data;
}
);
const filenames = files.map(f => f.filename);
for (const fn of filenames) {
if (s.touchedFiles.size < MAX_TOUCHED_FILES) s.touchedFiles.add(fn);
if (fn.startsWith(".github/workflows/") || fn.startsWith(".github/actions/")) s.touchesWorkflows = true;
if (fn === "Dockerfile" || fn === "Makefile" || fn.startsWith("scripts/") || fn.startsWith("actions/")) s.touchesCI = true;
if (
fn === "package.json" ||
fn === "package-lock.json" ||
fn === "pnpm-lock.yaml" ||
fn === "yarn.lock" ||
fn === "go.mod" ||
fn === "go.sum" ||
fn.startsWith("requirements") ||
fn === "Cargo.toml" ||
fn === "Cargo.lock"
) {
s.touchesDeps = true;
}
}
} catch (e) {
// If file listing fails, do not infer.
}
}
// Score + severity
const accounts = Array.from(perAuthor.values()).map(s => {
if (userCreatedAt.has(s.login)) {
const createdAt = userCreatedAt.get(s.login);
if (createdAt) {
const now = new Date(end);
s.accountAgeDays = Math.max(0, Math.floor((now - createdAt) / (24 * 60 * 60 * 1000)));
}
}
let score = 0;
const extDomains = Array.from(s.externalDomains);
score += Math.min(9, extDomains.length * 3);
if (s.hasShortener) score += 8;
if (s.hasNonGitHubBinary) score += 10;
if (s.touchesWorkflows) score += 15;
if (s.touchesCI) score += 10;
if (s.touchesDeps) score += 6;
if (s.itemCount >= 5) score += 6;
if (s.accountAgeDays !== null && s.accountAgeDays < MIN_ACCOUNT_AGE_DAYS) score += 8;
let hasDup3 = false;
for (const [, c] of s.dupTexts) {
if (c >= 3) {
hasDup3 = true;
break;
}
}
if (hasDup3) score += 8;
score = Math.min(100, score);
let severity = "None";
if (score >= 20) severity = "High";
else if (score >= 10) severity = "Medium";
else if (score >= 1) severity = "Low";
// Deterministic signal summary
const signals = [];
if (extDomains.length > 0) signals.push(`external_domains=${extDomains.length}`);
if (s.hasShortener) signals.push("shortener");
if (s.hasNonGitHubBinary) signals.push("non_github_binary_link");
if (s.touchesWorkflows) signals.push("touches_workflows");
if (s.touchesCI) signals.push("touches_ci_or_scripts");
if (s.touchesDeps) signals.push("touches_dependencies");
if (s.itemCount >= 5) signals.push(`burst_items=${s.itemCount}`);
if (hasDup3) signals.push("dup_text>=3");
if (s.commentCount > 0) signals.push(`comments=${s.commentCount}`);
if (s.reviewCount > 0) signals.push(`reviews=${s.reviewCount}`);
if (s.accountAgeDays !== null && s.accountAgeDays < MIN_ACCOUNT_AGE_DAYS) {
signals.push(`new_account=${s.accountAgeDays}d`);
}
return {
login: s.login,
risk_score: score,
severity,
signals,
external_domains: extDomains.sort((a, b) => a.localeCompare(b)),
pr_count: s.prCount,
issue_count: s.issueCount,
comment_count: s.commentCount,
review_count: s.reviewCount,
example_items: s.exampleItems,
touched_files: Array.from(s.touchedFiles).sort((a, b) => a.localeCompare(b)),
examples: s.examples,
};
});
// Stable sorting
accounts.sort((a, b) => {
if (b.risk_score !== a.risk_score) return b.risk_score - a.risk_score;
return a.login.localeCompare(b.login);
});
// Identify spam items for auto-close (score >= 30 = definite spam)
for (const acc of accounts.filter(a => a.risk_score >= 30)) {
for (const ex of acc.example_items) {
const item = items.find(i => i.number === ex.number);
if (item && item.state === "open") {
spamItems.push({
number: item.number,
is_pr: item.is_pr,
login: acc.login,
score: acc.risk_score,
});
}
}
}
const domains = Array.from(domainAccounts.entries())
.map(([domain, logins]) => ({ domain, count: logins.size, accounts: Array.from(logins).sort((a, b) => a.localeCompare(b)) }))
.sort((a, b) => {
if (b.count !== a.count) return b.count - a.count;
return a.domain.localeCompare(b.domain);
});
const topSeverity = accounts.find(a => a.severity !== "None")?.severity || "None";
// Calculate metrics for observability and decision logic
const highRiskAccounts = accounts.filter(a => a.risk_score >= 10).length;
const multiAccountDomains = domains.filter(d => d.count >= 2).length;
const hasFindings = highRiskAccounts > 0 || multiAccountDomains > 0;
// Log analysis summary for observability
const skippedNames = Array.from(skippedAllowedLogins).sort((a, b) => a.localeCompare(b));
const skippedLabel = skippedNames.length > 0 ? skippedNames.map(n => `@${n}`).join(", ") : "none";
const analyzedNames = accounts.slice(0, 10).map(a => `@${a.login}`).join(", ") || "none";
const domainSamples = domains.slice(0, 10).map(d => d.domain).join(", ") || "none";
core.info("Summary:");
core.info(`- Window: ${toISO(start)} -> ${toISO(end)}`);
core.info(`- Items: raw=${rawItems.length}, in_window+dedup=${items.length}`);
core.info(`- PRs scanned: ${prItems.length} (max ${MAX_PR})`);
core.info(`- Skipped (no login): ${skippedNoLogin}`);
core.info(`- Skipped (allowlisted): ${skippedAllowed} [${skippedLabel}]`);
core.info(`- Accounts analyzed: ${accounts.length} [${analyzedNames}]`);
core.info(`- Risk >= 10: ${highRiskAccounts}`);
core.info(`- Spam items to auto-close: ${spamItems.length}`);
core.info(`- External domains: total=${domains.length}, shared>=2=${multiAccountDomains} [${domainSamples}]`);
core.info(`- Decision: has_findings=${hasFindings} (will ${hasFindings ? "run" : "skip"} agent job)`);
core.info("Detailed report:");
if (domains.length === 0) {
core.info("- Domains: none");
} else {
core.info("- Domains:");
for (const d of domains) {
const logins = d.accounts.map(login => `@${login}`).join(", ") || "none";
core.info(` - ${d.domain}: accounts=${d.count} [${logins}]`);
}
}
if (accounts.length === 0) {
core.info("- Accounts: none");
} else {
core.info("- Accounts:");
for (const a of accounts) {
const signalsText = a.signals.join(", ") || "none";
const domainsText = (a.external_domains || []).join(", ") || "none";
const touchedText = (a.touched_files || []).join(", ") || "none";
core.info(` - @${a.login}: score=${a.risk_score}, severity=${a.severity}, signals=[${signalsText}]`);
core.info(` - activity: pr=${a.pr_count || 0}, issue=${a.issue_count || 0}, comment=${a.comment_count || 0}, review=${a.review_count || 0}`);
core.info(` - external_domains: ${domainsText}`);
core.info(` - touched_files: ${touchedText}`);
if (a.example_items && a.example_items.length > 0) {
const itemLines = a.example_items
.map(item => {
const label = item.is_pr ? `PR #${item.number}` : `Issue #${item.number}`;
const title = item.title ? ` "${item.title}"` : "";
return `${label}${title}`;
})
.join("; ");
core.info(` - examples: ${itemLines}`);
}
if (a.examples && a.examples.length > 0) {
core.info(" - evidence:");
for (const ex of a.examples) {
core.info(` - ${ex.url}`);
}
}
}
}
// Find existing triage issue (exact title match)
let existingIssueNumber = "";
try {
const openIssues = await github.rest.issues.listForRepo({
owner,
repo,
state: "open",
per_page: 100,
});
const existing = (openIssues.data || []).find(i => (i.title || "") === ISSUE_TITLE);
if (existing?.number) existingIssueNumber = String(existing.number);
} catch (e) {
// ignore
}
// Render deterministic markdown body
function renderBody(includeMention) {
const lines = [];
if (includeMention) lines.push("@pelikhan", "");
lines.push(
`**Window:** ${toISO(start)} → ${toISO(end)}`,
`**Assessment:** ${topSeverity}`,
""
);
if (!hasFindings) {
lines.push("No meaningful suspicious activity detected in this window.");
return lines.join("\n");
}
if (spamItems.length > 0) {
lines.push("## Auto-Closed Spam Items", "");
lines.push("The following items were automatically closed due to high spam scores (>= 30):", "");
for (const spam of spamItems.slice(0, 10)) {
const label = spam.is_pr ? "PR" : "Issue";
lines.push(`- ${label} #${spam.number} by @${spam.login} (score=${spam.score})`);
}
lines.push("");
}
if (domains.length > 0) {
lines.push("## Domains (external)", "", "| Domain | Accounts | Logins |", "| --- | ---: | --- |");
for (const d of domains.slice(0, 20)) {
const maxLogins = 5;
const shown = d.accounts.slice(0, maxLogins).map(login => `@${login}`);
const overflow = d.accounts.length > maxLogins ? ` +${d.accounts.length - maxLogins} more` : "";
lines.push(`| ${d.domain} | ${d.count} | ${shown.join(", ")}${overflow} |`);
}
lines.push("");
}
const high = accounts.filter(a => a.severity === "High");
const med = accounts.filter(a => a.severity === "Medium");
const low = accounts.filter(a => a.severity === "Low");
function renderAccounts(title, arr) {
if (arr.length === 0) return;
lines.push(`## ${title}`, "");
for (const a of arr.slice(0, 25)) {
const sig = a.signals.join(", ");
lines.push(`- @${a.login} — score=${a.risk_score} — ${sig}`);
const changeParts = [];
if (a.example_items && a.example_items.length > 0) {
const itemSamples = a.example_items.slice(0, 2).map(item => {
const label = item.is_pr ? `PR #${item.number}` : `Issue #${item.number}`;
const title = item.title ? ` "${item.title}"` : "";
return `${label}${title}`;
});
changeParts.push(itemSamples.join("; "));
}
if (a.touched_files && a.touched_files.length > 0) {
const files = a.touched_files.slice(0, 6).join(", ");
changeParts.push(`files: ${files}`);
}
if (changeParts.length > 0) {
lines.push(` - Change summary: ${changeParts.join("; ")}`);
}
lines.push(
` - Activity summary: ${a.pr_count || 0} PR, ${a.issue_count || 0} issue, ${a.comment_count || 0} comment, ${a.review_count || 0} review`
);
if (a.examples && a.examples.length > 0) {
lines.push(" <details><summary>Evidence</summary>", "");
for (const ex of a.examples.slice(0, 5)) {
lines.push(` - ${ex.url}`);
}
if (a.examples.length > 5) {
lines.push(` - ... and ${a.examples.length - 5} more`);
}
lines.push("", " </details>");
}
}
lines.push("");
}
renderAccounts("Accounts (High)", high);
renderAccounts("Accounts (Medium)", med);
renderAccounts("Accounts (Low)", low);
lines.push("## Notes", "", "- This report is computed deterministically from GitHub Search + PR file listings + PR comments/reviews within the window.", "- Items with spam scores >= 30 are automatically closed.");
return lines.join("\n");
}
let action = "none";
let issueBody = "";
let issueNumber = "";
if (hasFindings) {
if (existingIssueNumber) {
action = "update";
issueNumber = existingIssueNumber;
issueBody = renderBody(false);
} else {
action = "create";
issueBody = renderBody(true);
}
}
core.setOutput("action", action);
core.setOutput("issue_number", issueNumber);
core.setOutput("issue_title", ISSUE_TITLE);
core.setOutput("issue_body", issueBody);
core.setOutput("spam_items", JSON.stringify(spamItems));
safe_outputs:
needs: agent
if: (!cancelled()) && (needs.agent.result != 'skipped')
runs-on: ubuntu-slim
permissions:
contents: read
issues: write
pull-requests: write
timeout-minutes: 15
env:
GH_AW_CALLER_WORKFLOW_ID: "${{ github.repository }}/bot-detection"
GH_AW_ENGINE_ID: "claude"
GH_AW_WORKFLOW_ID: "bot-detection"
GH_AW_WORKFLOW_NAME: "Bot Detection"
outputs:
code_push_failure_count: ${{ steps.process_safe_outputs.outputs.code_push_failure_count }}
code_push_failure_errors: ${{ steps.process_safe_outputs.outputs.code_push_failure_errors }}
create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }}
create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }}
created_issue_number: ${{ steps.process_safe_outputs.outputs.created_issue_number }}
created_issue_url: ${{ steps.process_safe_outputs.outputs.created_issue_url }}
process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }}
process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
steps:
- name: Setup Scripts
uses: github/gh-aw/actions/setup@f1073c5498ee46fec1530555a7c953445417c69b # v0.56.2
with:
destination: /opt/gh-aw/actions
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: agent-output
path: /tmp/gh-aw/safeoutputs/
- name: Setup agent output environment variable
if: steps.download-agent-output.outcome == 'success'
run: |
mkdir -p /tmp/gh-aw/safeoutputs/
find "/tmp/gh-aw/safeoutputs/" -type f -print
echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/safeoutputs/agent_output.json" >> "$GITHUB_ENV"
- name: Process Safe Outputs
id: process_safe_outputs
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com"
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_API_URL: ${{ github.api_url }}
GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"close_issue\":{\"max\":10,\"target\":\"*\"},\"close_pull_request\":{\"max\":10,\"target\":\"*\"},\"create_issue\":{\"labels\":[\"security\",\"bot-detection\"],\"max\":1},\"missing_data\":{},\"missing_tool\":{},\"update_issue\":{\"allow_body\":true,\"max\":1,\"target\":\"*\"}}"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs');
await main();
- name: Upload safe output items manifest
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
with:
name: safe-output-items
path: /tmp/safe-output-items.jsonl
if-no-files-found: warn