diff --git a/.changeset/patch-clean-safe-outputs-loader.md b/.changeset/patch-clean-safe-outputs-loader.md new file mode 100644 index 0000000000..54e80ffa9f --- /dev/null +++ b/.changeset/patch-clean-safe-outputs-loader.md @@ -0,0 +1,9 @@ +--- +"gh-aw": patch +--- + +Clean and modernize `pkg/workflow/js/safe_outputs_tools_loader.cjs` by refactoring +internal functions (`loadTools`, `attachHandlers`, `registerDynamicTools`) to use +modern JavaScript patterns (optional chaining, nullish coalescing, handler map) +and reduce nesting and complexity. No behavioral changes. + diff --git a/.github/workflows/agent-performance-analyzer.lock.yml b/.github/workflows/agent-performance-analyzer.lock.yml index ae1b269671..7e9b16b455 100644 --- a/.github/workflows/agent-performance-analyzer.lock.yml +++ b/.github/workflows/agent-performance-analyzer.lock.yml @@ -7315,10 +7315,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7338,9 +7335,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7377,12 +7372,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/audit-workflows.lock.yml b/.github/workflows/audit-workflows.lock.yml index 613411c356..10fe81b71c 100644 --- a/.github/workflows/audit-workflows.lock.yml +++ b/.github/workflows/audit-workflows.lock.yml @@ -171,7 +171,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod @@ -193,7 +193,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -201,7 +201,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data diff --git a/.github/workflows/campaign-manager.lock.yml b/.github/workflows/campaign-manager.lock.yml index b0371e9295..9da17aab65 100644 --- a/.github/workflows/campaign-manager.lock.yml +++ b/.github/workflows/campaign-manager.lock.yml @@ -7150,10 +7150,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7173,9 +7170,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7212,12 +7207,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/copilot-pr-nlp-analysis.lock.yml b/.github/workflows/copilot-pr-nlp-analysis.lock.yml index 4976cefe6d..e8122b1dcc 100644 --- a/.github/workflows/copilot-pr-nlp-analysis.lock.yml +++ b/.github/workflows/copilot-pr-nlp-analysis.lock.yml @@ -177,7 +177,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -185,7 +185,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/copilot-session-insights.lock.yml b/.github/workflows/copilot-session-insights.lock.yml index cf9cf6b332..0fe64cdb80 100644 --- a/.github/workflows/copilot-session-insights.lock.yml +++ b/.github/workflows/copilot-session-insights.lock.yml @@ -184,7 +184,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -192,7 +192,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/daily-code-metrics.lock.yml b/.github/workflows/daily-code-metrics.lock.yml index 73df188341..29c6a2d864 100644 --- a/.github/workflows/daily-code-metrics.lock.yml +++ b/.github/workflows/daily-code-metrics.lock.yml @@ -170,7 +170,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -178,7 +178,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data diff --git a/.github/workflows/daily-copilot-token-report.lock.yml b/.github/workflows/daily-copilot-token-report.lock.yml index 3cf3f0e433..326ab27b67 100644 --- a/.github/workflows/daily-copilot-token-report.lock.yml +++ b/.github/workflows/daily-copilot-token-report.lock.yml @@ -173,7 +173,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -181,7 +181,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/daily-file-diet.lock.yml b/.github/workflows/daily-file-diet.lock.yml index 68dbb63857..b5258a97cc 100644 --- a/.github/workflows/daily-file-diet.lock.yml +++ b/.github/workflows/daily-file-diet.lock.yml @@ -189,7 +189,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -197,7 +197,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data @@ -7741,10 +7741,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7764,9 +7761,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7803,12 +7798,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/daily-firewall-report.lock.yml b/.github/workflows/daily-firewall-report.lock.yml index 47b7fb7bed..406a2f82b8 100644 --- a/.github/workflows/daily-firewall-report.lock.yml +++ b/.github/workflows/daily-firewall-report.lock.yml @@ -170,7 +170,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod @@ -190,7 +190,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -198,7 +198,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data @@ -7043,10 +7043,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7066,9 +7063,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7105,12 +7100,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/daily-issues-report.lock.yml b/.github/workflows/daily-issues-report.lock.yml index c9bd314b75..a26cdbb13a 100644 --- a/.github/workflows/daily-issues-report.lock.yml +++ b/.github/workflows/daily-issues-report.lock.yml @@ -187,7 +187,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -195,7 +195,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/daily-news.lock.yml b/.github/workflows/daily-news.lock.yml index 4160cddd9e..8eac6cb34e 100644 --- a/.github/workflows/daily-news.lock.yml +++ b/.github/workflows/daily-news.lock.yml @@ -179,7 +179,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -187,7 +187,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/daily-performance-summary.lock.yml b/.github/workflows/daily-performance-summary.lock.yml index beded83902..1241567c76 100644 --- a/.github/workflows/daily-performance-summary.lock.yml +++ b/.github/workflows/daily-performance-summary.lock.yml @@ -176,7 +176,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -184,7 +184,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data diff --git a/.github/workflows/daily-repo-chronicle.lock.yml b/.github/workflows/daily-repo-chronicle.lock.yml index eb8b4f79f8..290216ebbd 100644 --- a/.github/workflows/daily-repo-chronicle.lock.yml +++ b/.github/workflows/daily-repo-chronicle.lock.yml @@ -174,7 +174,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -182,7 +182,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/deep-report.lock.yml b/.github/workflows/deep-report.lock.yml index 47ab1b888d..affc0ea1f5 100644 --- a/.github/workflows/deep-report.lock.yml +++ b/.github/workflows/deep-report.lock.yml @@ -179,7 +179,7 @@ jobs: name: Fetch weekly issues data run: "# Create output directories\nmkdir -p /tmp/gh-aw/weekly-issues-data\nmkdir -p /tmp/gh-aw/cache-memory\n\n# Get today's date for cache identification\nTODAY=$(date '+%Y-%m-%d')\nCACHE_DIR=\"/tmp/gh-aw/cache-memory\"\n\n# Check if cached data exists from today\nif [ -f \"$CACHE_DIR/weekly-issues-${TODAY}.json\" ] && [ -s \"$CACHE_DIR/weekly-issues-${TODAY}.json\" ]; then\n echo \"✓ Found cached weekly issues data from ${TODAY}\"\n cp \"$CACHE_DIR/weekly-issues-${TODAY}.json\" /tmp/gh-aw/weekly-issues-data/issues.json\n \n # Regenerate schema if missing\n if [ ! -f \"$CACHE_DIR/weekly-issues-${TODAY}-schema.json\" ]; then\n /tmp/gh-aw/jqschema.sh < /tmp/gh-aw/weekly-issues-data/issues.json > \"$CACHE_DIR/weekly-issues-${TODAY}-schema.json\"\n fi\n cp \"$CACHE_DIR/weekly-issues-${TODAY}-schema.json\" /tmp/gh-aw/weekly-issues-data/issues-schema.json\n \n echo \"Using cached data from ${TODAY}\"\n echo \"Total issues in cache: $(jq 'length' /tmp/gh-aw/weekly-issues-data/issues.json)\"\nelse\n echo \"⬇ Downloading fresh weekly issues data...\"\n \n # Calculate date 7 days ago (cross-platform: GNU date first, BSD fallback)\n DATE_7_DAYS_AGO=$(date -d '7 days ago' '+%Y-%m-%d' 2>/dev/null || date -v-7d '+%Y-%m-%d')\n \n echo \"Fetching issues created or updated since ${DATE_7_DAYS_AGO}...\"\n \n # Fetch issues from the last 7 days using gh CLI\n # Using --search with updated filter to get recent activity\n gh issue list --repo ${{ github.repository }} \\\n --search \"updated:>=${DATE_7_DAYS_AGO}\" \\\n --state all \\\n --json number,title,author,createdAt,state,url,body,labels,updatedAt,closedAt,milestone,assignees,comments \\\n --limit 500 \\\n > /tmp/gh-aw/weekly-issues-data/issues.json\n\n # Generate schema for reference\n /tmp/gh-aw/jqschema.sh < /tmp/gh-aw/weekly-issues-data/issues.json > /tmp/gh-aw/weekly-issues-data/issues-schema.json\n\n # Store in cache with today's date\n cp /tmp/gh-aw/weekly-issues-data/issues.json \"$CACHE_DIR/weekly-issues-${TODAY}.json\"\n cp /tmp/gh-aw/weekly-issues-data/issues-schema.json \"$CACHE_DIR/weekly-issues-${TODAY}-schema.json\"\n\n echo \"✓ Weekly issues data saved to cache: weekly-issues-${TODAY}.json\"\n echo \"Total issues found: $(jq 'length' /tmp/gh-aw/weekly-issues-data/issues.json)\"\nfi\n\n# Always ensure data is available at expected locations for backward compatibility\necho \"Weekly issues data available at: /tmp/gh-aw/weekly-issues-data/issues.json\"\necho \"Schema available at: /tmp/gh-aw/weekly-issues-data/issues-schema.json\"" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod @@ -7085,10 +7085,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7108,9 +7105,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7147,12 +7142,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/dev-hawk.lock.yml b/.github/workflows/dev-hawk.lock.yml index 42c2142c3a..d2614f3480 100644 --- a/.github/workflows/dev-hawk.lock.yml +++ b/.github/workflows/dev-hawk.lock.yml @@ -174,7 +174,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod diff --git a/.github/workflows/github-mcp-structural-analysis.lock.yml b/.github/workflows/github-mcp-structural-analysis.lock.yml index ac9f9f6943..0bc8dc83de 100644 --- a/.github/workflows/github-mcp-structural-analysis.lock.yml +++ b/.github/workflows/github-mcp-structural-analysis.lock.yml @@ -175,7 +175,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -183,7 +183,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/go-pattern-detector.lock.yml b/.github/workflows/go-pattern-detector.lock.yml index 5444f90f6a..cc741976f9 100644 --- a/.github/workflows/go-pattern-detector.lock.yml +++ b/.github/workflows/go-pattern-detector.lock.yml @@ -5391,7 +5391,7 @@ jobs: found_patterns: ${{ steps.detect.outputs.found_patterns }} steps: - name: Checkout repository - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: persist-credentials: false - name: Install ast-grep diff --git a/.github/workflows/human-ai-collaboration.lock.yml b/.github/workflows/human-ai-collaboration.lock.yml index ddfebdac3a..2d9da5d099 100644 --- a/.github/workflows/human-ai-collaboration.lock.yml +++ b/.github/workflows/human-ai-collaboration.lock.yml @@ -7049,10 +7049,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7072,9 +7069,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7111,12 +7106,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/incident-response.lock.yml b/.github/workflows/incident-response.lock.yml index f9d95806fe..885eb8cb64 100644 --- a/.github/workflows/incident-response.lock.yml +++ b/.github/workflows/incident-response.lock.yml @@ -7207,10 +7207,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7230,9 +7227,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7269,12 +7264,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/intelligence.lock.yml b/.github/workflows/intelligence.lock.yml index 499b3eb7ea..ea0f0fa70e 100644 --- a/.github/workflows/intelligence.lock.yml +++ b/.github/workflows/intelligence.lock.yml @@ -182,7 +182,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -190,7 +190,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data @@ -7745,10 +7745,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7768,9 +7765,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7807,12 +7802,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/issue-classifier.lock.yml b/.github/workflows/issue-classifier.lock.yml index b4cd8aa7b8..0a8d23036c 100644 --- a/.github/workflows/issue-classifier.lock.yml +++ b/.github/workflows/issue-classifier.lock.yml @@ -3006,7 +3006,7 @@ jobs: path: /tmp/gh-aw/aw_info.json if-no-files-found: warn - name: Run AI Inference - uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v1 + uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v2.0.4 env: GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt diff --git a/.github/workflows/issue-monster.lock.yml b/.github/workflows/issue-monster.lock.yml index 7c9cf8d63c..d3ab4fc6cf 100644 --- a/.github/workflows/issue-monster.lock.yml +++ b/.github/workflows/issue-monster.lock.yml @@ -27,7 +27,7 @@ name: "Issue Monster" - cron: "49 */1 * * *" # Friendly format: every 1h (scattered) # skip-if-match: # Skip-if-match processed as search check in pre-activation job - # max: 5 + # max: 9 # query: is:pr is:open is:draft author:app/copilot-swe-agent workflow_dispatch: @@ -6715,7 +6715,7 @@ jobs: env: GH_AW_SKIP_QUERY: "is:pr is:open is:draft author:app/copilot-swe-agent" GH_AW_WORKFLOW_NAME: "Issue Monster" - GH_AW_SKIP_MAX_MATCHES: "5" + GH_AW_SKIP_MAX_MATCHES: "9" with: script: | async function main() { diff --git a/.github/workflows/issue-monster.md b/.github/workflows/issue-monster.md index f66b0dbace..fb902f6257 100644 --- a/.github/workflows/issue-monster.md +++ b/.github/workflows/issue-monster.md @@ -6,7 +6,7 @@ on: schedule: every 1h skip-if-match: query: "is:pr is:open is:draft author:app/copilot-swe-agent" - max: 5 + max: 9 permissions: contents: read diff --git a/.github/workflows/org-health-report.lock.yml b/.github/workflows/org-health-report.lock.yml index 8984388e8b..9051469030 100644 --- a/.github/workflows/org-health-report.lock.yml +++ b/.github/workflows/org-health-report.lock.yml @@ -176,7 +176,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -184,7 +184,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/org-wide-rollout.lock.yml b/.github/workflows/org-wide-rollout.lock.yml index 2caeaf2922..b9215b65d7 100644 --- a/.github/workflows/org-wide-rollout.lock.yml +++ b/.github/workflows/org-wide-rollout.lock.yml @@ -7235,10 +7235,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7258,9 +7255,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7297,12 +7292,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/portfolio-analyst.lock.yml b/.github/workflows/portfolio-analyst.lock.yml index 3065956dc8..d7aa6c8aa9 100644 --- a/.github/workflows/portfolio-analyst.lock.yml +++ b/.github/workflows/portfolio-analyst.lock.yml @@ -171,7 +171,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod @@ -193,7 +193,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -201,7 +201,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data diff --git a/.github/workflows/prompt-clustering-analysis.lock.yml b/.github/workflows/prompt-clustering-analysis.lock.yml index 7b01986091..f0a07ad2d6 100644 --- a/.github/workflows/prompt-clustering-analysis.lock.yml +++ b/.github/workflows/prompt-clustering-analysis.lock.yml @@ -173,7 +173,7 @@ jobs: - name: Set up jq utilities directory run: "mkdir -p /tmp/gh-aw\ncat > /tmp/gh-aw/jqschema.sh << 'EOF'\n#!/usr/bin/env bash\n# jqschema.sh\njq -c '\ndef walk(f):\n . as $in |\n if type == \"object\" then\n reduce keys[] as $k ({}; . + {($k): ($in[$k] | walk(f))})\n elif type == \"array\" then\n if length == 0 then [] else [.[0] | walk(f)] end\n else\n type\n end;\nwalk(.)\n'\nEOF\nchmod +x /tmp/gh-aw/jqschema.sh" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod @@ -198,7 +198,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -206,7 +206,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data diff --git a/.github/workflows/python-data-charts.lock.yml b/.github/workflows/python-data-charts.lock.yml index 9016dda29d..4e2d4ea778 100644 --- a/.github/workflows/python-data-charts.lock.yml +++ b/.github/workflows/python-data-charts.lock.yml @@ -172,7 +172,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -180,7 +180,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/release.lock.yml b/.github/workflows/release.lock.yml index 4fcd2b0742..532c8ef4e6 100644 --- a/.github/workflows/release.lock.yml +++ b/.github/workflows/release.lock.yml @@ -6500,28 +6500,28 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: false go-version-file: go.mod - name: Download Go modules run: go mod download - name: Generate SBOM (SPDX format) - uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10 + uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11 with: artifact-name: sbom.spdx.json format: spdx-json output-file: sbom.spdx.json - name: Generate SBOM (CycloneDX format) - uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10 + uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11 with: artifact-name: sbom.cdx.json format: cyclonedx-json output-file: sbom.cdx.json - name: Upload SBOM artifacts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: sbom-artifacts path: | @@ -6700,12 +6700,12 @@ jobs: release_tag: ${{ steps.get_release.outputs.release_tag }} steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: fetch-depth: 0 persist-credentials: false - name: Release with gh-extension-precompile - uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2 + uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2.1.0 with: build_script_override: scripts/build-release.sh go_version_file: go.mod diff --git a/.github/workflows/safe-output-health.lock.yml b/.github/workflows/safe-output-health.lock.yml index 5b82eb490c..83b39a201c 100644 --- a/.github/workflows/safe-output-health.lock.yml +++ b/.github/workflows/safe-output-health.lock.yml @@ -167,7 +167,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod diff --git a/.github/workflows/security-compliance.lock.yml b/.github/workflows/security-compliance.lock.yml index e89c1d0413..dd23815057 100644 --- a/.github/workflows/security-compliance.lock.yml +++ b/.github/workflows/security-compliance.lock.yml @@ -6869,10 +6869,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -6892,9 +6889,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -6931,12 +6926,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/slide-deck-maintainer.lock.yml b/.github/workflows/slide-deck-maintainer.lock.yml index 16a1bceb66..c0b7d7d7ed 100644 --- a/.github/workflows/slide-deck-maintainer.lock.yml +++ b/.github/workflows/slide-deck-maintainer.lock.yml @@ -167,7 +167,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Node.js - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: cache: npm cache-dependency-path: docs/package-lock.json diff --git a/.github/workflows/smoke-copilot-playwright.lock.yml b/.github/workflows/smoke-copilot-playwright.lock.yml index 71c6933c71..9e4948c85a 100644 --- a/.github/workflows/smoke-copilot-playwright.lock.yml +++ b/.github/workflows/smoke-copilot-playwright.lock.yml @@ -7635,7 +7635,7 @@ jobs: run: "echo \"📋 Collecting Playwright MCP logs...\"\n\n# Create logs directory\nmkdir -p /tmp/gh-aw/playwright-debug-logs\n\n# Copy any playwright logs from the MCP logs directory\nif [ -d \"/tmp/gh-aw/mcp-logs/playwright\" ]; then\n echo \"Found Playwright MCP logs directory\"\n cp -r /tmp/gh-aw/mcp-logs/playwright/* /tmp/gh-aw/playwright-debug-logs/ 2>/dev/null || true\n ls -la /tmp/gh-aw/playwright-debug-logs/\nelse\n echo \"No Playwright MCP logs directory found at /tmp/gh-aw/mcp-logs/playwright\"\nfi\n\n# List all trace files if any\necho \"Looking for trace files...\"\nfind /tmp -name \"*.zip\" -o -name \"trace*\" 2>/dev/null | head -20 || true\n\n# Show docker container logs if any containers are still running\necho \"Checking for running Docker containers...\"\ndocker ps -a --format \"table {{.Names}}\\t{{.Status}}\\t{{.Image}}\" 2>/dev/null || true\n" - if: always() name: Upload Playwright Debug Logs - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: ignore name: playwright-debug-logs-${{ github.run_id }} diff --git a/.github/workflows/smoke-detector.lock.yml b/.github/workflows/smoke-detector.lock.yml index da99b69e2a..599b02c984 100644 --- a/.github/workflows/smoke-detector.lock.yml +++ b/.github/workflows/smoke-detector.lock.yml @@ -595,7 +595,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod diff --git a/.github/workflows/spec-kit-execute.lock.yml b/.github/workflows/spec-kit-execute.lock.yml index 3e59413efd..6c8db3d2bc 100644 --- a/.github/workflows/spec-kit-execute.lock.yml +++ b/.github/workflows/spec-kit-execute.lock.yml @@ -6991,10 +6991,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7014,9 +7011,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7053,12 +7048,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/spec-kit-executor.lock.yml b/.github/workflows/spec-kit-executor.lock.yml index d1df9a5830..0b80f44c0f 100644 --- a/.github/workflows/spec-kit-executor.lock.yml +++ b/.github/workflows/spec-kit-executor.lock.yml @@ -6866,10 +6866,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -6889,9 +6886,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -6928,12 +6923,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/.github/workflows/stale-repo-identifier.lock.yml b/.github/workflows/stale-repo-identifier.lock.yml index cafa2d4a1c..9826f0b420 100644 --- a/.github/workflows/stale-repo-identifier.lock.yml +++ b/.github/workflows/stale-repo-identifier.lock.yml @@ -184,7 +184,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -192,7 +192,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data @@ -208,7 +208,7 @@ jobs: pip install --user --quiet numpy pandas matplotlib seaborn scipy - if: always() name: Upload charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-charts @@ -216,7 +216,7 @@ jobs: retention-days: 30 - if: always() name: Upload source and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: trending-source-and-data diff --git a/.github/workflows/static-analysis-report.lock.yml b/.github/workflows/static-analysis-report.lock.yml index 01e0ea8511..bc200661d9 100644 --- a/.github/workflows/static-analysis-report.lock.yml +++ b/.github/workflows/static-analysis-report.lock.yml @@ -166,7 +166,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod diff --git a/.github/workflows/super-linter.lock.yml b/.github/workflows/super-linter.lock.yml index d5a2f30d5c..0dd7aa278a 100644 --- a/.github/workflows/super-linter.lock.yml +++ b/.github/workflows/super-linter.lock.yml @@ -166,7 +166,7 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Download super-linter log - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: super-linter-log path: /tmp/gh-aw/ @@ -7547,13 +7547,13 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: fetch-depth: 0 persist-credentials: false - name: Super-linter id: super-linter - uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.2.1 + uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.3.1 env: CREATE_LOG_FILE: "true" DEFAULT_BRANCH: main @@ -7575,7 +7575,7 @@ jobs: fi - name: Upload super-linter log if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: super-linter-log path: super-linter.log diff --git a/.github/workflows/tidy.lock.yml b/.github/workflows/tidy.lock.yml index f3dcc5ef32..840170e2de 100644 --- a/.github/workflows/tidy.lock.yml +++ b/.github/workflows/tidy.lock.yml @@ -577,13 +577,13 @@ jobs: mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" - name: Set up Node.js - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: cache: npm cache-dependency-path: pkg/workflow/js/package-lock.json node-version: "24" - name: Set up Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache: true go-version-file: go.mod diff --git a/.github/workflows/weekly-issue-summary.lock.yml b/.github/workflows/weekly-issue-summary.lock.yml index 17588260c5..0551f3ad7c 100644 --- a/.github/workflows/weekly-issue-summary.lock.yml +++ b/.github/workflows/weekly-issue-summary.lock.yml @@ -167,7 +167,7 @@ jobs: run: "pip install --user --quiet numpy pandas matplotlib seaborn scipy\n\n# Verify installations\npython3 -c \"import numpy; print(f'NumPy {numpy.__version__} installed')\"\npython3 -c \"import pandas; print(f'Pandas {pandas.__version__} installed')\"\npython3 -c \"import matplotlib; print(f'Matplotlib {matplotlib.__version__} installed')\"\npython3 -c \"import seaborn; print(f'Seaborn {seaborn.__version__} installed')\"\npython3 -c \"import scipy; print(f'SciPy {scipy.__version__} installed')\"\n\necho \"All scientific libraries installed successfully\"\n" - if: always() name: Upload generated charts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: data-charts @@ -175,7 +175,7 @@ jobs: retention-days: 30 - if: always() name: Upload source files and data - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: if-no-files-found: warn name: python-source-and-data diff --git a/.github/workflows/workflow-health-manager.lock.yml b/.github/workflows/workflow-health-manager.lock.yml index 01a845fd74..1ab832f79a 100644 --- a/.github/workflows/workflow-health-manager.lock.yml +++ b/.github/workflows/workflow-health-manager.lock.yml @@ -7168,10 +7168,7 @@ jobs: if (!entry.isFile()) { continue; } - const relPath = path.posix - .relative(rootDir, absPath) - .split(path.sep) - .join("/"); + const relPath = path.posix.relative(rootDir, absPath).split(path.sep).join("/"); const stats = fs.statSync(absPath); result.push({ relPath, absPath, size: stats.size }); } @@ -7191,9 +7188,7 @@ jobs: const isCampaignMode = Boolean(campaignId); if (!fs.existsSync(sourceMemoryPath)) { if (isCampaignMode) { - core.setFailed( - `Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/` - ); + core.setFailed(`Campaign repo-memory is enabled but no campaign state was written. Expected to find cursor and metrics under: ${sourceMemoryPath}/${campaignId}/`); return; } core.info(`Memory directory not found in artifact: ${sourceMemoryPath}`); @@ -7230,12 +7225,7 @@ jobs: let filesToCopy = []; try { const files = listFilesRecursively(sourceMemoryPath); - const patterns = fileGlobFilter - ? fileGlobFilter - .split(/\s+/) - .filter(Boolean) - .map(globToRegExp) - : []; + const patterns = fileGlobFilter ? fileGlobFilter.split(/\s+/).filter(Boolean).map(globToRegExp) : []; if (isCampaignMode) { const expectedCursorRel = `${campaignId}/cursor.json`; const cursorFile = files.find(f => f.relPath === expectedCursorRel); diff --git a/docs/src/content/docs/reference/memory.md b/docs/src/content/docs/reference/memory.md index 69d5291b00..f1d6f2ec27 100644 --- a/docs/src/content/docs/reference/memory.md +++ b/docs/src/content/docs/reference/memory.md @@ -5,15 +5,11 @@ sidebar: order: 1500 --- -Agentic workflows can maintain persistent memory through GitHub Issues/Discussions/files, **cache-memory** (GitHub Actions cache with 7-day retention), or **repo-memory** (Git branches with unlimited retention). - -This guide covers cache-memory and repo-memory configuration. +Agentic workflows maintain persistent memory through **cache-memory** (GitHub Actions cache, 7-day retention) or **repo-memory** (Git branches, unlimited retention). ## Cache Memory -Enables persistent file storage across workflow runs using GitHub Actions cache. When enabled, the compiler automatically sets up the cache directory, restore/save operations, and progressive fallback keys. - -Storage locations: `/tmp/gh-aw/cache-memory/` (default) or `/tmp/gh-aw/cache-memory-{id}/` (additional caches). +Provides persistent file storage across workflow runs via GitHub Actions cache. The compiler automatically configures the cache directory, restore/save operations, and progressive fallback keys at `/tmp/gh-aw/cache-memory/` (default) or `/tmp/gh-aw/cache-memory-{id}/` (additional caches). ## Enabling Cache Memory @@ -24,11 +20,7 @@ tools: --- ``` -Uses default key `memory-${{ github.workflow }}-${{ github.run_id }}` and stores files at `/tmp/gh-aw/cache-memory/`. - -## Using the Cache Folder - -Store and retrieve information using standard file operations. Organize files as JSON/YAML (structured data), text files (notes/logs), or subdirectories. +Stores files at `/tmp/gh-aw/cache-memory/` using default key `memory-${{ github.workflow }}-${{ github.run_id }}`. Use standard file operations to store/retrieve JSON/YAML, text files, or subdirectories. ## Advanced Configuration @@ -37,12 +29,10 @@ Store and retrieve information using standard file operations. Organize files as tools: cache-memory: key: custom-memory-${{ github.workflow }}-${{ github.run_id }} - retention-days: 30 # 1-90 days, defaults to repo setting + retention-days: 30 # 1-90 days, extends access beyond cache expiration --- ``` -The `retention-days` controls artifact retention, providing access beyond the 7-day cache expiration. - ## Multiple Cache Configurations ```aw wrap @@ -58,12 +48,10 @@ tools: --- ``` -Each cache mounts at `/tmp/gh-aw/cache-memory/` (default) or `/tmp/gh-aw/cache-memory-{id}/` (others). The `id` field is required and determines the folder name. If `key` is omitted, defaults to `memory-{id}-${{ github.workflow }}-${{ github.run_id }}`. +Mounts at `/tmp/gh-aw/cache-memory/` (default) or `/tmp/gh-aw/cache-memory-{id}/`. The `id` determines folder name; `key` defaults to `memory-{id}-${{ github.workflow }}-${{ github.run_id }}`. ## Cache Merging from Shared Workflows -Import cache-memory configurations from shared workflow files: - ```aw wrap --- imports: @@ -73,43 +61,37 @@ tools: --- ``` -Merge rules: **Single to Single** (local overrides imported), **Single to Multiple** (local converted to array and merged), **Multiple to Multiple** (merged by `id`, local takes precedence). +Merge rules: **Single→Single** (local overrides), **Single→Multiple** (local converts to array), **Multiple→Multiple** (merge by `id`, local wins). -## Cache Behavior and GitHub Actions Integration +## Cache Behavior -Uses GitHub Actions cache with 7-day retention, 10GB per repository limit, and LRU eviction. With `retention-days`, cache data uploads as artifacts (1-90 days) for long-term access. +GitHub Actions cache: 7-day retention, 10GB per repo, LRU eviction. Add `retention-days` to upload artifacts (1-90 days) for extended access. -Caches are accessible across branches with unique per-run keys. Custom keys automatically append `-${{ github.run_id }}`. Progressive restore keys split on dashes (e.g., `custom-memory-project-v1-${{ github.run_id }}` tries `custom-memory-project-v1-`, `custom-memory-project-`, `custom-memory-`, `custom-`). +Caches accessible across branches with unique per-run keys. Custom keys auto-append `-${{ github.run_id }}`. Progressive restore splits on dashes: `custom-memory-project-v1-${{ github.run_id }}` tries `custom-memory-project-v1-`, `custom-memory-project-`, `custom-memory-`, `custom-`. ## Best Practices -Organize files with descriptive names and directories. Use hierarchical cache keys like `project-${{ github.repository_owner }}-${{ github.workflow }}`. Choose appropriate scope (workflow-specific by default, or repository/user-wide by including identifiers in keys). Monitor growth and respect the 10GB limit. +Use descriptive file/directory names, hierarchical cache keys (`project-${{ github.repository_owner }}-${{ github.workflow }}`), and appropriate scope (workflow-specific default or repository/user-wide). Monitor growth within 10GB limit. ## Troubleshooting -**Files Not Persisting**: Check cache key consistency and workflow logs for restore/save messages. - -**File Access Issues**: Create subdirectories before use, verify permissions, use absolute paths. - -**Cache Size Issues**: Track growth, clear periodically, or use time-based keys for auto-expiration. +**Files not persisting**: Check cache key consistency and logs for restore/save messages. +**File access issues**: Create subdirectories first, verify permissions, use absolute paths. +**Cache size issues**: Track growth, clear periodically, or use time-based keys for auto-expiration. -## Security Considerations +## Security -Avoid storing sensitive data in cache files. Cache follows repository permissions and logs access in workflow logs. Files use standard runner permissions in temporary directories. - -With [threat detection](/gh-aw/reference/safe-outputs/#threat-detection) enabled, cache updates defer until validation completes: restored via `actions/cache/restore`, modified by agent, uploaded as artifacts, validated, then saved via `actions/cache/save` only if detection succeeds. Without threat detection, updates occur automatically via standard cache post-action. +Don't store sensitive data. Cache follows repository permissions, logs access. With [threat detection](/gh-aw/reference/safe-outputs/#threat-detection), cache saves only after validation succeeds (restore→modify→upload artifact→validate→save). ## Examples -Basic usage with `cache-memory: true`, project-specific with custom keys, or multiple caches with different retention. See [Grumpy Code Reviewer](https://github.com/githubnext/gh-aw/blob/main/.github/workflows/grumpy-reviewer.md) for tracking PR review history. +See [Grumpy Code Reviewer](https://github.com/githubnext/gh-aw/blob/main/.github/workflows/grumpy-reviewer.md) for tracking PR review history. --- # Repo Memory -Enables persistent file storage using Git branches with unlimited retention. When enabled, the compiler automatically clones/creates the branch, provides file access at `/tmp/gh-aw/repo-memory-{id}/memory/{id}/`, commits and pushes changes, and handles merge conflicts (your changes win). - -Default: branch `memory/default` at `/tmp/gh-aw/repo-memory-default/memory/default/`. +Persistent file storage via Git branches with unlimited retention. The compiler auto-configures branch cloning/creation, file access at `/tmp/gh-aw/repo-memory-{id}/memory/{id}/`, commits/pushes, and merge conflict resolution (your changes win). ## Enabling Repo Memory @@ -120,7 +102,7 @@ tools: --- ``` -Creates branch `memory/default` with file access at `/tmp/gh-aw/repo-memory-default/memory/default/`. Files are automatically committed and pushed after workflow completion. +Creates branch `memory/default` at `/tmp/gh-aw/repo-memory-default/memory/default/`. Files auto-commit/push after workflow completion. ## Advanced Configuration @@ -129,17 +111,15 @@ Creates branch `memory/default` with file access at `/tmp/gh-aw/repo-memory-defa tools: repo-memory: branch-name: memory/custom-agent - description: "Long-term insights and patterns" + description: "Long-term insights" file-glob: ["*.md", "*.json"] - max-file-size: 1048576 # 1MB, default 10KB + max-file-size: 1048576 # 1MB (default 10KB) max-file-count: 50 # default 100 target-repo: "owner/repository" - create-orphan: true # default true + create-orphan: true # default --- ``` -Options: `branch-name` (default `memory/default`), `description`, `file-glob`, `max-file-size`, `max-file-count`, `target-repo`, `create-orphan`. - ## Multiple Repo Memory Configurations ```aw wrap @@ -155,48 +135,42 @@ tools: --- ``` -Each mounts at `/tmp/gh-aw/repo-memory-{id}/memory/{id}/`. The `id` field is required and determines folder/branch names. If `branch-name` is omitted, defaults to `memory/{id}`. +Mounts at `/tmp/gh-aw/repo-memory-{id}/memory/{id}/`. Required `id` determines folder/branch names; `branch-name` defaults to `memory/{id}`. -## Behavior and Git Integration +## Behavior -Branches are auto-created as orphans (if `create-orphan: true`, default) or cloned with `--depth 1`. Changes commit automatically after validation (against `file-glob`, `max-file-size`, `max-file-count`), pull with `-X ours` (your changes win conflicts), and push. Push occurs only if changes detected and threat detection passes (if configured). Automatically adds `contents: write` permission. +Branches auto-create as orphans (default) or clone with `--depth 1`. Changes auto-commit after validation (`file-glob`, `max-file-size`, `max-file-count`), pull with `-X ours` (your changes win), and push when changes detected and threat detection passes. Auto-adds `contents: write` permission. ## Best Practices -Organize files with descriptive names and directories. Use hierarchical branch names (`memory/default`, `memory/insights`). Choose scope (workflow-specific default, shared across workflows, or cross-repository with `target-repo`). Set constraints (`file-glob`, `max-file-size`, `max-file-count`) to prevent abuse. Monitor branch size and clean periodically. +Use descriptive names, hierarchical branches (`memory/insights`), appropriate scope (workflow-specific, shared, or `target-repo` for cross-repository), and constraints to prevent abuse. Monitor branch size, clean periodically. -## Comparing Cache Memory vs Repo Memory +## Comparison | Feature | Cache Memory | Repo Memory | |---------|--------------|-------------| -| **Storage** | GitHub Actions Cache | Git Branches | -| **Retention** | 7 days | Unlimited | -| **Size Limit** | 10GB/repo | Repository limits | -| **Version Control** | No | Yes | -| **Performance** | Fast | Slower | -| **Best For** | Temporary/sessions | Long-term/history | - -Choose cache for fast temporary storage, repo for permanent version-controlled storage. +| Storage | GitHub Actions Cache | Git Branches | +| Retention | 7 days | Unlimited | +| Size Limit | 10GB/repo | Repository limits | +| Version Control | No | Yes | +| Performance | Fast | Slower | +| Best For | Temporary/sessions | Long-term/history | ## Troubleshooting -**Branch Not Created**: Ensure `create-orphan: true` or create branch manually. - -**Permission Denied**: `contents: write` is auto-added by compiler. - -**File Validation Failures**: Verify files match `file-glob`, are under `max-file-size` (10KB default), and within `max-file-count` (100 default). - -**Changes Not Persisting**: Check correct directory (`/tmp/gh-aw/repo-memory-{id}/memory/{id}/`), successful workflow completion, and push errors in logs. - -**Merge Conflicts**: Uses `-X ours`, your changes always win. Read before writing to preserve previous data. +**Branch not created**: Ensure `create-orphan: true` or create manually. +**Permission denied**: Compiler auto-adds `contents: write`. +**Validation failures**: Match `file-glob`, stay under `max-file-size` (10KB default) and `max-file-count` (100 default). +**Changes not persisting**: Check directory path, workflow completion, push errors in logs. +**Merge conflicts**: Uses `-X ours` (your changes win). Read before writing to preserve data. -## Security Considerations +## Security -Memory branches follow repository permissions. Use private repos for sensitive data. Avoid storing secrets. Use `file-glob`, `max-file-size`, and `max-file-count` to restrict files. Consider branch protection rules for production. Use `target-repo` to isolate memory. +Memory branches follow repository permissions. Use private repos for sensitive data, avoid storing secrets, set constraints (`file-glob`, `max-file-size`, `max-file-count`), consider branch protection, use `target-repo` to isolate. ## Examples -Basic usage with `repo-memory: true`, custom branches with constraints, multiple memory locations by ID, or cross-repository with `target-repo`. See [Deep Report](https://github.com/githubnext/gh-aw/blob/main/.github/workflows/deep-report.md) and [Daily Firewall Report](https://github.com/githubnext/gh-aw/blob/main/.github/workflows/daily-firewall-report.md) for tracking long-term insights and historical security data. +See [Deep Report](https://github.com/githubnext/gh-aw/blob/main/.github/workflows/deep-report.md) and [Daily Firewall Report](https://github.com/githubnext/gh-aw/blob/main/.github/workflows/daily-firewall-report.md) for long-term insights and historical data tracking. --- diff --git a/pkg/cli/interactive.go b/pkg/cli/interactive.go index 00903a12df..fc11dd295f 100644 --- a/pkg/cli/interactive.go +++ b/pkg/cli/interactive.go @@ -1,11 +1,9 @@ package cli import ( - "errors" "fmt" "os" "path/filepath" - "regexp" "slices" "strings" @@ -17,9 +15,6 @@ import ( var interactiveLog = logger.New("cli:interactive") -// workflowNameRegex validates workflow names contain only alphanumeric characters, hyphens, and underscores -var workflowNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) - // commonWorkflowNames contains common workflow name patterns for autocomplete suggestions var commonWorkflowNames = []string{ "issue-triage", @@ -34,12 +29,6 @@ var commonWorkflowNames = []string{ "documentation-check", } -// isValidWorkflowName checks if the provided workflow name contains only valid characters. -// Returns false for empty strings (which should be checked separately for a more specific error message). -func isValidWorkflowName(name string) bool { - return workflowNameRegex.MatchString(name) -} - // isAccessibleMode detects if accessibility mode should be enabled based on environment variables func isAccessibleMode() bool { return os.Getenv("TERM") == "dumb" || os.Getenv("NO_COLOR") != "" @@ -128,15 +117,7 @@ func (b *InteractiveWorkflowBuilder) promptForWorkflowName() error { Description("Enter a descriptive name for your workflow (e.g., 'issue-triage', 'code-review-helper')"). Suggestions(commonWorkflowNames). Value(&b.WorkflowName). - Validate(func(s string) error { - if s == "" { - return errors.New("workflow name cannot be empty") - } - if !isValidWorkflowName(s) { - return errors.New("workflow name must contain only alphanumeric characters, hyphens, and underscores") - } - return nil - }), + Validate(ValidateWorkflowName), ), ).WithAccessible(isAccessibleMode()) diff --git a/pkg/cli/interactive_test.go b/pkg/cli/interactive_test.go index aa369a0c59..d736d720e1 100644 --- a/pkg/cli/interactive_test.go +++ b/pkg/cli/interactive_test.go @@ -6,69 +6,70 @@ import ( "testing" ) -func TestIsValidWorkflowName(t *testing.T) { +func TestValidateWorkflowName_Integration(t *testing.T) { tests := []struct { - name string - input string - expected bool + name string + input string + expectError bool }{ { - name: "valid simple name", - input: "my-workflow", - expected: true, + name: "valid simple name", + input: "my-workflow", + expectError: false, }, { - name: "valid with underscores", - input: "my_workflow", - expected: true, + name: "valid with underscores", + input: "my_workflow", + expectError: false, }, { - name: "valid alphanumeric", - input: "workflow123", - expected: true, + name: "valid alphanumeric", + input: "workflow123", + expectError: false, }, { - name: "valid mixed", - input: "my-workflow_v2", - expected: true, + name: "valid mixed", + input: "my-workflow_v2", + expectError: false, }, { - name: "invalid with spaces", - input: "my workflow", - expected: false, + name: "invalid with spaces", + input: "my workflow", + expectError: true, }, { - name: "invalid with special chars", - input: "my@workflow!", - expected: false, + name: "invalid with special chars", + input: "my@workflow!", + expectError: true, }, { - name: "invalid with dots", - input: "my.workflow", - expected: false, + name: "invalid with dots", + input: "my.workflow", + expectError: true, }, { - name: "invalid with slashes", - input: "my/workflow", - expected: false, + name: "invalid with slashes", + input: "my/workflow", + expectError: true, }, { - name: "empty string", - input: "", - expected: false, + name: "empty string", + input: "", + expectError: true, }, { - name: "valid uppercase", - input: "MyWorkflow", - expected: true, + name: "valid uppercase", + input: "MyWorkflow", + expectError: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := isValidWorkflowName(tt.input) - if result != tt.expected { - t.Errorf("isValidWorkflowName(%q) = %v, want %v", tt.input, result, tt.expected) + err := ValidateWorkflowName(tt.input) + hasError := err != nil + if hasError != tt.expectError { + t.Errorf("ValidateWorkflowName(%q) error = %v, expectError %v", tt.input, err, tt.expectError) } }) } @@ -81,8 +82,8 @@ func TestCommonWorkflowNamesAreValid(t *testing.T) { } for _, name := range commonWorkflowNames { - if !isValidWorkflowName(name) { - t.Errorf("commonWorkflowNames contains invalid workflow name: %q", name) + if err := ValidateWorkflowName(name); err != nil { + t.Errorf("commonWorkflowNames contains invalid workflow name: %q (error: %v)", name, err) } } } diff --git a/pkg/cli/validators.go b/pkg/cli/validators.go new file mode 100644 index 0000000000..12dc5e5d16 --- /dev/null +++ b/pkg/cli/validators.go @@ -0,0 +1,21 @@ +package cli + +import ( + "errors" + "regexp" +) + +// workflowNameRegex validates workflow names contain only alphanumeric characters, hyphens, and underscores +var workflowNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +// ValidateWorkflowName checks if the provided workflow name is valid. +// It ensures the name is not empty and contains only alphanumeric characters, hyphens, and underscores. +func ValidateWorkflowName(s string) error { + if s == "" { + return errors.New("workflow name cannot be empty") + } + if !workflowNameRegex.MatchString(s) { + return errors.New("workflow name must contain only alphanumeric characters, hyphens, and underscores") + } + return nil +} diff --git a/pkg/cli/validators_test.go b/pkg/cli/validators_test.go new file mode 100644 index 0000000000..5ce4555190 --- /dev/null +++ b/pkg/cli/validators_test.go @@ -0,0 +1,247 @@ +package cli + +import ( + "strings" + "testing" +) + +func TestValidateWorkflowName(t *testing.T) { + tests := []struct { + name string + input string + expectError bool + errorMsg string + }{ + { + name: "valid simple name", + input: "my-workflow", + expectError: false, + }, + { + name: "valid with underscores", + input: "my_workflow", + expectError: false, + }, + { + name: "valid alphanumeric", + input: "workflow123", + expectError: false, + }, + { + name: "valid mixed", + input: "my-workflow_v2", + expectError: false, + }, + { + name: "valid uppercase", + input: "MyWorkflow", + expectError: false, + }, + { + name: "valid all hyphens and underscores", + input: "my-workflow_test-123", + expectError: false, + }, + { + name: "empty string", + input: "", + expectError: true, + errorMsg: "workflow name cannot be empty", + }, + { + name: "invalid with spaces", + input: "my workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with special chars", + input: "my@workflow!", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with dots", + input: "my.workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with slashes", + input: "my/workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with parentheses", + input: "my(workflow)", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with brackets", + input: "my[workflow]", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with dollar sign", + input: "my$workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with percent sign", + input: "my%workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with hash", + input: "my#workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with asterisk", + input: "my*workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with ampersand", + input: "my&workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with plus", + input: "my+workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + { + name: "invalid with equals", + input: "my=workflow", + expectError: true, + errorMsg: "workflow name must contain only alphanumeric characters, hyphens, and underscores", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateWorkflowName(tt.input) + + if tt.expectError { + if err == nil { + t.Errorf("ValidateWorkflowName(%q) expected error but got nil", tt.input) + return + } + if !strings.Contains(err.Error(), tt.errorMsg) { + t.Errorf("ValidateWorkflowName(%q) error = %q, want error containing %q", tt.input, err.Error(), tt.errorMsg) + } + } else { + if err != nil { + t.Errorf("ValidateWorkflowName(%q) unexpected error: %v", tt.input, err) + } + } + }) + } +} + +func TestValidateWorkflowName_EdgeCases(t *testing.T) { + tests := []struct { + name string + input string + expectError bool + }{ + { + name: "single character", + input: "a", + expectError: false, + }, + { + name: "single number", + input: "1", + expectError: false, + }, + { + name: "single hyphen", + input: "-", + expectError: false, + }, + { + name: "single underscore", + input: "_", + expectError: false, + }, + { + name: "very long valid name", + input: strings.Repeat("a", 100), + expectError: false, + }, + { + name: "starts with hyphen", + input: "-workflow", + expectError: false, + }, + { + name: "ends with hyphen", + input: "workflow-", + expectError: false, + }, + { + name: "starts with underscore", + input: "_workflow", + expectError: false, + }, + { + name: "ends with underscore", + input: "workflow_", + expectError: false, + }, + { + name: "starts with number", + input: "123workflow", + expectError: false, + }, + { + name: "multiple consecutive hyphens", + input: "my--workflow", + expectError: false, + }, + { + name: "multiple consecutive underscores", + input: "my__workflow", + expectError: false, + }, + { + name: "tab character", + input: "my\tworkflow", + expectError: true, + }, + { + name: "newline character", + input: "my\nworkflow", + expectError: true, + }, + { + name: "carriage return", + input: "my\rworkflow", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateWorkflowName(tt.input) + + if tt.expectError && err == nil { + t.Errorf("ValidateWorkflowName(%q) expected error but got nil", tt.input) + } + if !tt.expectError && err != nil { + t.Errorf("ValidateWorkflowName(%q) unexpected error: %v", tt.input, err) + } + }) + } +} diff --git a/pkg/workflow/compiler_safe_outputs_consolidated.go b/pkg/workflow/compiler_safe_outputs_core.go similarity index 50% rename from pkg/workflow/compiler_safe_outputs_consolidated.go rename to pkg/workflow/compiler_safe_outputs_core.go index 3cbbabe23e..c351100952 100644 --- a/pkg/workflow/compiler_safe_outputs_consolidated.go +++ b/pkg/workflow/compiler_safe_outputs_core.go @@ -672,697 +672,3 @@ func buildDetectionSuccessCondition() ConditionNode { BuildStringLiteral("true"), ) } - -// === Step Config Builders === -// These functions build the SafeOutputStepConfig for each safe output type - -func (c *Compiler) buildCreateIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CreateIssues - - var customEnvVars []string - customEnvVars = append(customEnvVars, buildTitlePrefixEnvVar("GH_AW_ISSUE_TITLE_PREFIX", cfg.TitlePrefix)...) - customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_ISSUE_LABELS", cfg.Labels)...) - customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_ISSUE_ALLOWED_LABELS", cfg.AllowedLabels)...) - customEnvVars = append(customEnvVars, buildAllowedReposEnvVar("GH_AW_ALLOWED_REPOS", cfg.AllowedRepos)...) - if cfg.Expires > 0 { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_ISSUE_EXPIRES: \"%d\"\n", cfg.Expires)) - } - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - condition := BuildSafeOutputType("create_issue") - - return SafeOutputStepConfig{ - StepName: "Create Issue", - StepID: "create_issue", - ScriptName: "create_issue", - Script: getCreateIssueScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCreateDiscussionStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CreateDiscussions - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("create_discussion") - - return SafeOutputStepConfig{ - StepName: "Create Discussion", - StepID: "create_discussion", - ScriptName: "create_discussion", - Script: getCreateDiscussionScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCreatePullRequestStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CreatePullRequests - - var customEnvVars []string - // Pass the base branch from GitHub context (required by create_pull_request.cjs) - // Note: GH_AW_WORKFLOW_ID is now set at the job level and inherited by all steps - customEnvVars = append(customEnvVars, " GH_AW_BASE_BRANCH: ${{ github.ref_name }}\n") - customEnvVars = append(customEnvVars, buildTitlePrefixEnvVar("GH_AW_PR_TITLE_PREFIX", cfg.TitlePrefix)...) - customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_PR_LABELS", cfg.Labels)...) - customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_PR_ALLOWED_LABELS", cfg.AllowedLabels)...) - // Add draft setting - always set with default to true for backwards compatibility - draftValue := true // Default value - if cfg.Draft != nil { - draftValue = *cfg.Draft - } - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_DRAFT: %q\n", fmt.Sprintf("%t", draftValue))) - // Add if-no-changes setting - always set with default to "warn" - ifNoChanges := cfg.IfNoChanges - if ifNoChanges == "" { - ifNoChanges = "warn" // Default value - } - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_IF_NO_CHANGES: %q\n", ifNoChanges)) - // Add allow-empty setting - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_ALLOW_EMPTY: %q\n", fmt.Sprintf("%t", cfg.AllowEmpty))) - // Add max patch size setting - maxPatchSize := 1024 // default 1024 KB - if data.SafeOutputs.MaximumPatchSize > 0 { - maxPatchSize = data.SafeOutputs.MaximumPatchSize - } - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_MAX_PATCH_SIZE: %d\n", maxPatchSize)) - // Add activation comment information if available (for updating the comment with PR link) - if data.AIReaction != "" && data.AIReaction != "none" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMENT_ID: ${{ needs.%s.outputs.comment_id }}\n", constants.ActivationJobName)) - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMENT_REPO: ${{ needs.%s.outputs.comment_repo }}\n", constants.ActivationJobName)) - } - // Add expires value if set (only for same-repo PRs - when target-repo is not set) - if cfg.Expires > 0 && cfg.TargetRepoSlug == "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_EXPIRES: \"%d\"\n", cfg.Expires)) - } - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - condition := BuildSafeOutputType("create_pull_request") - - // Build pre-steps for checkout and git config - preSteps := c.buildCreatePullRequestPreStepsConsolidated(data, cfg, condition) - - return SafeOutputStepConfig{ - StepName: "Create Pull Request", - StepID: "create_pull_request", - ScriptName: "create_pull_request", - Script: getCreatePullRequestScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - PreSteps: preSteps, - } -} - -func (c *Compiler) buildAddCommentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool, createIssueEnabled, createDiscussionEnabled, createPullRequestEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.AddComments - - var customEnvVars []string - if cfg.Target != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMENT_TARGET: %q\n", cfg.Target)) - } - if cfg.Discussion != nil && *cfg.Discussion { - customEnvVars = append(customEnvVars, " GITHUB_AW_COMMENT_DISCUSSION: \"true\"\n") - } - if cfg.HideOlderComments { - customEnvVars = append(customEnvVars, " GH_AW_HIDE_OLDER_COMMENTS: \"true\"\n") - } - - // Reference outputs from earlier steps in the same job - if createIssueEnabled { - customEnvVars = append(customEnvVars, " GH_AW_CREATED_ISSUE_URL: ${{ steps.create_issue.outputs.issue_url }}\n") - customEnvVars = append(customEnvVars, " GH_AW_CREATED_ISSUE_NUMBER: ${{ steps.create_issue.outputs.issue_number }}\n") - customEnvVars = append(customEnvVars, " GH_AW_TEMPORARY_ID_MAP: ${{ steps.create_issue.outputs.temporary_id_map }}\n") - } - if createDiscussionEnabled { - customEnvVars = append(customEnvVars, " GH_AW_CREATED_DISCUSSION_URL: ${{ steps.create_discussion.outputs.discussion_url }}\n") - customEnvVars = append(customEnvVars, " GH_AW_CREATED_DISCUSSION_NUMBER: ${{ steps.create_discussion.outputs.discussion_number }}\n") - } - if createPullRequestEnabled { - customEnvVars = append(customEnvVars, " GH_AW_CREATED_PULL_REQUEST_URL: ${{ steps.create_pull_request.outputs.pull_request_url }}\n") - customEnvVars = append(customEnvVars, " GH_AW_CREATED_PULL_REQUEST_NUMBER: ${{ steps.create_pull_request.outputs.pull_request_number }}\n") - } - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - condition := BuildSafeOutputType("add_comment") - - return SafeOutputStepConfig{ - StepName: "Add Comment", - StepID: "add_comment", - ScriptName: "add_comment", - Script: getAddCommentScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCloseDiscussionStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CloseDiscussions - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("close_discussion") - - return SafeOutputStepConfig{ - StepName: "Close Discussion", - StepID: "close_discussion", - ScriptName: "close_discussion", - Script: getCloseDiscussionScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCloseIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CloseIssues - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("close_issue") - - return SafeOutputStepConfig{ - StepName: "Close Issue", - StepID: "close_issue", - ScriptName: "close_issue", - Script: getCloseIssueScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildClosePullRequestStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.ClosePullRequests - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("close_pull_request") - - return SafeOutputStepConfig{ - StepName: "Close Pull Request", - StepID: "close_pull_request", - ScriptName: "close_pull_request", - Script: getClosePullRequestScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCreatePRReviewCommentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CreatePullRequestReviewComments - - var customEnvVars []string - // Add side configuration - if cfg.Side != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_REVIEW_COMMENT_SIDE: %q\n", cfg.Side)) - } - // Add target configuration - if cfg.Target != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_REVIEW_COMMENT_TARGET: %q\n", cfg.Target)) - } - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("create_pull_request_review_comment") - - return SafeOutputStepConfig{ - StepName: "Create PR Review Comment", - StepID: "create_pr_review_comment", - ScriptName: "create_pr_review_comment", - Script: getCreatePRReviewCommentScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCreateCodeScanningAlertStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool, workflowFilename string) SafeOutputStepConfig { - cfg := data.SafeOutputs.CreateCodeScanningAlerts - - var customEnvVars []string - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_WORKFLOW_FILENAME: %q\n", workflowFilename)) - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("create_code_scanning_alert") - - return SafeOutputStepConfig{ - StepName: "Create Code Scanning Alert", - StepID: "create_code_scanning_alert", - ScriptName: "create_code_scanning_alert", - Script: getCreateCodeScanningAlertScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildAddLabelsStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.AddLabels - - var customEnvVars []string - customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_LABELS_ALLOWED", cfg.Allowed)...) - if cfg.Max > 0 { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_LABELS_MAX_COUNT: %d\n", cfg.Max)) - } - if cfg.Target != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_LABELS_TARGET: %q\n", cfg.Target)) - } - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - condition := BuildSafeOutputType("add_labels") - - return SafeOutputStepConfig{ - StepName: "Add Labels", - StepID: "add_labels", - ScriptName: "add_labels", - Script: getAddLabelsScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildAddReviewerStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.AddReviewer - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("add_reviewer") - - return SafeOutputStepConfig{ - StepName: "Add Reviewer", - StepID: "add_reviewer", - ScriptName: "add_reviewer", - Script: getAddReviewerScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildAssignMilestoneStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.AssignMilestone - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("assign_milestone") - - return SafeOutputStepConfig{ - StepName: "Assign Milestone", - StepID: "assign_milestone", - ScriptName: "assign_milestone", - Script: getAssignMilestoneScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildAssignToAgentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.AssignToAgent - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("assign_to_agent") - - return SafeOutputStepConfig{ - StepName: "Assign To Agent", - StepID: "assign_to_agent", - ScriptName: "assign_to_agent", - Script: getAssignToAgentScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - UseAgentToken: true, - } -} - -func (c *Compiler) buildAssignToUserStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.AssignToUser - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("assign_to_user") - - return SafeOutputStepConfig{ - StepName: "Assign To User", - StepID: "assign_to_user", - ScriptName: "assign_to_user", - Script: getAssignToUserScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildUpdateIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.UpdateIssues - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - condition := BuildSafeOutputType("update_issue") - - return SafeOutputStepConfig{ - StepName: "Update Issue", - StepID: "update_issue", - ScriptName: "update_issue", - Script: getUpdateIssueScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildUpdatePullRequestStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.UpdatePullRequests - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - condition := BuildSafeOutputType("update_pull_request") - - return SafeOutputStepConfig{ - StepName: "Update Pull Request", - StepID: "update_pull_request", - ScriptName: "update_pull_request", - Script: getUpdatePullRequestScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildUpdateDiscussionStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.UpdateDiscussions - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) - - // Add target environment variable if set - if cfg.Target != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_UPDATE_TARGET: %q\n", cfg.Target)) - } - - // Add field update flags - presence of pointer indicates field can be updated - if cfg.Title != nil { - customEnvVars = append(customEnvVars, " GH_AW_UPDATE_TITLE: \"true\"\n") - } - if cfg.Body != nil { - customEnvVars = append(customEnvVars, " GH_AW_UPDATE_BODY: \"true\"\n") - } - if cfg.Labels != nil { - customEnvVars = append(customEnvVars, " GH_AW_UPDATE_LABELS: \"true\"\n") - } - - condition := BuildSafeOutputType("update_discussion") - - return SafeOutputStepConfig{ - StepName: "Update Discussion", - StepID: "update_discussion", - ScriptName: "update_discussion", - Script: getUpdateDiscussionScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildPushToPullRequestBranchStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.PushToPullRequestBranch - - var customEnvVars []string - // Add target config if set - if cfg.Target != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PUSH_TARGET: %q\n", cfg.Target)) - } - // Add if-no-changes config if set - if cfg.IfNoChanges != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PUSH_IF_NO_CHANGES: %q\n", cfg.IfNoChanges)) - } - // Add title prefix if set (using same env var as create-pull-request) - customEnvVars = append(customEnvVars, buildTitlePrefixEnvVar("GH_AW_PR_TITLE_PREFIX", cfg.TitlePrefix)...) - // Add labels if set - customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_PR_LABELS", cfg.Labels)...) - // Add commit title suffix if set - if cfg.CommitTitleSuffix != "" { - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMIT_TITLE_SUFFIX: %q\n", cfg.CommitTitleSuffix)) - } - // Add max patch size setting - maxPatchSize := 1024 // default 1024 KB - if data.SafeOutputs.MaximumPatchSize > 0 { - maxPatchSize = data.SafeOutputs.MaximumPatchSize - } - customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_MAX_PATCH_SIZE: %d\n", maxPatchSize)) - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("push_to_pull_request_branch") - - // Build pre-steps for checkout and git config - preSteps := c.buildPushToPullRequestBranchPreStepsConsolidated(data, cfg, condition) - - return SafeOutputStepConfig{ - StepName: "Push To Pull Request Branch", - StepID: "push_to_pull_request_branch", - ScriptName: "push_to_pull_request_branch", - Script: getPushToPullRequestBranchScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - PreSteps: preSteps, - } -} - -func (c *Compiler) buildUploadAssetsStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.UploadAssets - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("upload_asset") - - return SafeOutputStepConfig{ - StepName: "Upload Assets", - StepID: "upload_assets", - ScriptName: "upload_assets", - Script: getUploadAssetsScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildUpdateReleaseStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.UpdateRelease - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("update_release") - - return SafeOutputStepConfig{ - StepName: "Update Release", - StepID: "update_release", - ScriptName: "update_release", - Script: getUpdateReleaseScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildLinkSubIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool, createIssueEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.LinkSubIssue - - var customEnvVars []string - if createIssueEnabled { - customEnvVars = append(customEnvVars, " GH_AW_CREATED_ISSUE_NUMBER: ${{ steps.create_issue.outputs.issue_number }}\n") - customEnvVars = append(customEnvVars, " GH_AW_TEMPORARY_ID_MAP: ${{ steps.create_issue.outputs.temporary_id_map }}\n") - } - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("link_sub_issue") - - return SafeOutputStepConfig{ - StepName: "Link Sub Issue", - StepID: "link_sub_issue", - ScriptName: "link_sub_issue", - Script: getLinkSubIssueScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildHideCommentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.HideComment - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("hide_comment") - - return SafeOutputStepConfig{ - StepName: "Hide Comment", - StepID: "hide_comment", - ScriptName: "hide_comment", - Script: getHideCommentScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -func (c *Compiler) buildCreateAgentTaskStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.CreateAgentTasks - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("create_agent_task") - - return SafeOutputStepConfig{ - StepName: "Create Agent Task", - StepID: "create_agent_task", - Script: createAgentTaskScript, - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - UseCopilotToken: true, - } -} - -func (c *Compiler) buildUpdateProjectStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { - cfg := data.SafeOutputs.UpdateProjects - - var customEnvVars []string - customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) - - condition := BuildSafeOutputType("update_project") - - return SafeOutputStepConfig{ - StepName: "Update Project", - StepID: "update_project", - ScriptName: "update_project", - Script: getUpdateProjectScript(), - CustomEnvVars: customEnvVars, - Condition: condition, - Token: cfg.GitHubToken, - } -} - -// buildCreatePullRequestPreSteps builds the pre-steps for create-pull-request -func (c *Compiler) buildCreatePullRequestPreStepsConsolidated(data *WorkflowData, cfg *CreatePullRequestsConfig, condition ConditionNode) []string { - var preSteps []string - - // Determine which token to use for checkout - // If an app is configured, use the app token; otherwise use the default github.token - var checkoutToken string - var gitRemoteToken string - if data.SafeOutputs.App != nil { - checkoutToken = "${{ steps.app-token.outputs.token }}" - gitRemoteToken = "${{ steps.app-token.outputs.token }}" - } else { - checkoutToken = "${{ github.token }}" - gitRemoteToken = "${{ github.token }}" - } - - // Step 1: Checkout repository with conditional execution - preSteps = append(preSteps, " - name: Checkout repository\n") - // Add the condition to only checkout if create_pull_request will run - preSteps = append(preSteps, fmt.Sprintf(" if: %s\n", condition.Render())) - preSteps = append(preSteps, fmt.Sprintf(" uses: %s\n", GetActionPin("actions/checkout"))) - preSteps = append(preSteps, " with:\n") - preSteps = append(preSteps, fmt.Sprintf(" token: %s\n", checkoutToken)) - preSteps = append(preSteps, " persist-credentials: false\n") - preSteps = append(preSteps, " fetch-depth: 1\n") - if c.trialMode { - if c.trialLogicalRepoSlug != "" { - preSteps = append(preSteps, fmt.Sprintf(" repository: %s\n", c.trialLogicalRepoSlug)) - } - } - - // Step 2: Configure Git credentials with conditional execution - gitConfigSteps := []string{ - " - name: Configure Git credentials\n", - fmt.Sprintf(" if: %s\n", condition.Render()), - " env:\n", - " REPO_NAME: ${{ github.repository }}\n", - " SERVER_URL: ${{ github.server_url }}\n", - " run: |\n", - " git config --global user.email \"github-actions[bot]@users.noreply.github.com\"\n", - " git config --global user.name \"github-actions[bot]\"\n", - " # Re-authenticate git with GitHub token\n", - " SERVER_URL_STRIPPED=\"${SERVER_URL#https://}\"\n", - fmt.Sprintf(" git remote set-url origin \"https://x-access-token:%s@${SERVER_URL_STRIPPED}/${REPO_NAME}.git\"\n", gitRemoteToken), - " echo \"Git configured with standard GitHub Actions identity\"\n", - } - preSteps = append(preSteps, gitConfigSteps...) - - return preSteps -} - -// buildPushToPullRequestBranchPreSteps builds the pre-steps for push-to-pull-request-branch -func (c *Compiler) buildPushToPullRequestBranchPreStepsConsolidated(data *WorkflowData, cfg *PushToPullRequestBranchConfig, condition ConditionNode) []string { - var preSteps []string - - // Determine which token to use for checkout - // If an app is configured, use the app token; otherwise use the default github.token - var checkoutToken string - var gitRemoteToken string - if data.SafeOutputs.App != nil { - checkoutToken = "${{ steps.app-token.outputs.token }}" - gitRemoteToken = "${{ steps.app-token.outputs.token }}" - } else { - checkoutToken = "${{ github.token }}" - gitRemoteToken = "${{ github.token }}" - } - - // Step 1: Checkout repository with conditional execution - preSteps = append(preSteps, " - name: Checkout repository\n") - // Add the condition to only checkout if push_to_pull_request_branch will run - preSteps = append(preSteps, fmt.Sprintf(" if: %s\n", condition.Render())) - preSteps = append(preSteps, fmt.Sprintf(" uses: %s\n", GetActionPin("actions/checkout"))) - preSteps = append(preSteps, " with:\n") - preSteps = append(preSteps, fmt.Sprintf(" token: %s\n", checkoutToken)) - preSteps = append(preSteps, " persist-credentials: false\n") - preSteps = append(preSteps, " fetch-depth: 1\n") - if c.trialMode { - if c.trialLogicalRepoSlug != "" { - preSteps = append(preSteps, fmt.Sprintf(" repository: %s\n", c.trialLogicalRepoSlug)) - } - } - - // Step 2: Configure Git credentials with conditional execution - gitConfigSteps := []string{ - " - name: Configure Git credentials\n", - fmt.Sprintf(" if: %s\n", condition.Render()), - " env:\n", - " REPO_NAME: ${{ github.repository }}\n", - " SERVER_URL: ${{ github.server_url }}\n", - " run: |\n", - " git config --global user.email \"github-actions[bot]@users.noreply.github.com\"\n", - " git config --global user.name \"github-actions[bot]\"\n", - " # Re-authenticate git with GitHub token\n", - " SERVER_URL_STRIPPED=\"${SERVER_URL#https://}\"\n", - fmt.Sprintf(" git remote set-url origin \"https://x-access-token:%s@${SERVER_URL_STRIPPED}/${REPO_NAME}.git\"\n", gitRemoteToken), - " echo \"Git configured with standard GitHub Actions identity\"\n", - } - preSteps = append(preSteps, gitConfigSteps...) - - return preSteps -} diff --git a/pkg/workflow/compiler_safe_outputs_discussions.go b/pkg/workflow/compiler_safe_outputs_discussions.go new file mode 100644 index 0000000000..78463125f2 --- /dev/null +++ b/pkg/workflow/compiler_safe_outputs_discussions.go @@ -0,0 +1,79 @@ +package workflow + +import "fmt" + +// buildCreateDiscussionStepConfig builds the configuration for creating a discussion +func (c *Compiler) buildCreateDiscussionStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CreateDiscussions + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("create_discussion") + + return SafeOutputStepConfig{ + StepName: "Create Discussion", + StepID: "create_discussion", + ScriptName: "create_discussion", + Script: getCreateDiscussionScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildCloseDiscussionStepConfig builds the configuration for closing a discussion +func (c *Compiler) buildCloseDiscussionStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CloseDiscussions + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("close_discussion") + + return SafeOutputStepConfig{ + StepName: "Close Discussion", + StepID: "close_discussion", + ScriptName: "close_discussion", + Script: getCloseDiscussionScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildUpdateDiscussionStepConfig builds the configuration for updating a discussion +func (c *Compiler) buildUpdateDiscussionStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.UpdateDiscussions + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + // Add target environment variable if set + if cfg.Target != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_UPDATE_TARGET: %q\n", cfg.Target)) + } + + // Add field update flags - presence of pointer indicates field can be updated + if cfg.Title != nil { + customEnvVars = append(customEnvVars, " GH_AW_UPDATE_TITLE: \"true\"\n") + } + if cfg.Body != nil { + customEnvVars = append(customEnvVars, " GH_AW_UPDATE_BODY: \"true\"\n") + } + if cfg.Labels != nil { + customEnvVars = append(customEnvVars, " GH_AW_UPDATE_LABELS: \"true\"\n") + } + + condition := BuildSafeOutputType("update_discussion") + + return SafeOutputStepConfig{ + StepName: "Update Discussion", + StepID: "update_discussion", + ScriptName: "update_discussion", + Script: getUpdateDiscussionScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} diff --git a/pkg/workflow/compiler_safe_outputs_issues.go b/pkg/workflow/compiler_safe_outputs_issues.go new file mode 100644 index 0000000000..ddeeaffcdd --- /dev/null +++ b/pkg/workflow/compiler_safe_outputs_issues.go @@ -0,0 +1,94 @@ +package workflow + +import "fmt" + +// buildCreateIssueStepConfig builds the configuration for creating an issue +func (c *Compiler) buildCreateIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CreateIssues + + var customEnvVars []string + customEnvVars = append(customEnvVars, buildTitlePrefixEnvVar("GH_AW_ISSUE_TITLE_PREFIX", cfg.TitlePrefix)...) + customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_ISSUE_LABELS", cfg.Labels)...) + customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_ISSUE_ALLOWED_LABELS", cfg.AllowedLabels)...) + customEnvVars = append(customEnvVars, buildAllowedReposEnvVar("GH_AW_ALLOWED_REPOS", cfg.AllowedRepos)...) + if cfg.Expires > 0 { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_ISSUE_EXPIRES: \"%d\"\n", cfg.Expires)) + } + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + condition := BuildSafeOutputType("create_issue") + + return SafeOutputStepConfig{ + StepName: "Create Issue", + StepID: "create_issue", + ScriptName: "create_issue", + Script: getCreateIssueScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildCloseIssueStepConfig builds the configuration for closing an issue +func (c *Compiler) buildCloseIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CloseIssues + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("close_issue") + + return SafeOutputStepConfig{ + StepName: "Close Issue", + StepID: "close_issue", + ScriptName: "close_issue", + Script: getCloseIssueScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildUpdateIssueStepConfig builds the configuration for updating an issue +func (c *Compiler) buildUpdateIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.UpdateIssues + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + condition := BuildSafeOutputType("update_issue") + + return SafeOutputStepConfig{ + StepName: "Update Issue", + StepID: "update_issue", + ScriptName: "update_issue", + Script: getUpdateIssueScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildLinkSubIssueStepConfig builds the configuration for linking a sub-issue +func (c *Compiler) buildLinkSubIssueStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool, createIssueEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.LinkSubIssue + + var customEnvVars []string + if createIssueEnabled { + customEnvVars = append(customEnvVars, " GH_AW_CREATED_ISSUE_NUMBER: ${{ steps.create_issue.outputs.issue_number }}\n") + customEnvVars = append(customEnvVars, " GH_AW_TEMPORARY_ID_MAP: ${{ steps.create_issue.outputs.temporary_id_map }}\n") + } + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("link_sub_issue") + + return SafeOutputStepConfig{ + StepName: "Link Sub Issue", + StepID: "link_sub_issue", + ScriptName: "link_sub_issue", + Script: getLinkSubIssueScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} diff --git a/pkg/workflow/compiler_safe_outputs_prs.go b/pkg/workflow/compiler_safe_outputs_prs.go new file mode 100644 index 0000000000..a257569cbf --- /dev/null +++ b/pkg/workflow/compiler_safe_outputs_prs.go @@ -0,0 +1,304 @@ +package workflow + +import ( + "fmt" + + "github.com/githubnext/gh-aw/pkg/constants" +) + +// buildCreatePullRequestStepConfig builds the configuration for creating a pull request +func (c *Compiler) buildCreatePullRequestStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CreatePullRequests + + var customEnvVars []string + // Pass the base branch from GitHub context (required by create_pull_request.cjs) + // Note: GH_AW_WORKFLOW_ID is now set at the job level and inherited by all steps + customEnvVars = append(customEnvVars, " GH_AW_BASE_BRANCH: ${{ github.ref_name }}\n") + customEnvVars = append(customEnvVars, buildTitlePrefixEnvVar("GH_AW_PR_TITLE_PREFIX", cfg.TitlePrefix)...) + customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_PR_LABELS", cfg.Labels)...) + customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_PR_ALLOWED_LABELS", cfg.AllowedLabels)...) + // Add draft setting - always set with default to true for backwards compatibility + draftValue := true // Default value + if cfg.Draft != nil { + draftValue = *cfg.Draft + } + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_DRAFT: %q\n", fmt.Sprintf("%t", draftValue))) + // Add if-no-changes setting - always set with default to "warn" + ifNoChanges := cfg.IfNoChanges + if ifNoChanges == "" { + ifNoChanges = "warn" // Default value + } + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_IF_NO_CHANGES: %q\n", ifNoChanges)) + // Add allow-empty setting + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_ALLOW_EMPTY: %q\n", fmt.Sprintf("%t", cfg.AllowEmpty))) + // Add max patch size setting + maxPatchSize := 1024 // default 1024 KB + if data.SafeOutputs.MaximumPatchSize > 0 { + maxPatchSize = data.SafeOutputs.MaximumPatchSize + } + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_MAX_PATCH_SIZE: %d\n", maxPatchSize)) + // Add activation comment information if available (for updating the comment with PR link) + if data.AIReaction != "" && data.AIReaction != "none" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMENT_ID: ${{ needs.%s.outputs.comment_id }}\n", constants.ActivationJobName)) + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMENT_REPO: ${{ needs.%s.outputs.comment_repo }}\n", constants.ActivationJobName)) + } + // Add expires value if set (only for same-repo PRs - when target-repo is not set) + if cfg.Expires > 0 && cfg.TargetRepoSlug == "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_EXPIRES: \"%d\"\n", cfg.Expires)) + } + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + condition := BuildSafeOutputType("create_pull_request") + + // Build pre-steps for checkout and git config + preSteps := c.buildCreatePullRequestPreStepsConsolidated(data, cfg, condition) + + return SafeOutputStepConfig{ + StepName: "Create Pull Request", + StepID: "create_pull_request", + ScriptName: "create_pull_request", + Script: getCreatePullRequestScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + PreSteps: preSteps, + } +} + +// buildUpdatePullRequestStepConfig builds the configuration for updating a pull request +func (c *Compiler) buildUpdatePullRequestStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.UpdatePullRequests + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + condition := BuildSafeOutputType("update_pull_request") + + return SafeOutputStepConfig{ + StepName: "Update Pull Request", + StepID: "update_pull_request", + ScriptName: "update_pull_request", + Script: getUpdatePullRequestScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildClosePullRequestStepConfig builds the configuration for closing a pull request +func (c *Compiler) buildClosePullRequestStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.ClosePullRequests + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("close_pull_request") + + return SafeOutputStepConfig{ + StepName: "Close Pull Request", + StepID: "close_pull_request", + ScriptName: "close_pull_request", + Script: getClosePullRequestScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildCreatePRReviewCommentStepConfig builds the configuration for creating a PR review comment +func (c *Compiler) buildCreatePRReviewCommentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CreatePullRequestReviewComments + + var customEnvVars []string + // Add side configuration + if cfg.Side != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_REVIEW_COMMENT_SIDE: %q\n", cfg.Side)) + } + // Add target configuration + if cfg.Target != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PR_REVIEW_COMMENT_TARGET: %q\n", cfg.Target)) + } + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("create_pull_request_review_comment") + + return SafeOutputStepConfig{ + StepName: "Create PR Review Comment", + StepID: "create_pr_review_comment", + ScriptName: "create_pr_review_comment", + Script: getCreatePRReviewCommentScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildPushToPullRequestBranchStepConfig builds the configuration for pushing to a pull request branch +func (c *Compiler) buildPushToPullRequestBranchStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.PushToPullRequestBranch + + var customEnvVars []string + // Add target config if set + if cfg.Target != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PUSH_TARGET: %q\n", cfg.Target)) + } + // Add if-no-changes config if set + if cfg.IfNoChanges != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_PUSH_IF_NO_CHANGES: %q\n", cfg.IfNoChanges)) + } + // Add title prefix if set (using same env var as create-pull-request) + customEnvVars = append(customEnvVars, buildTitlePrefixEnvVar("GH_AW_PR_TITLE_PREFIX", cfg.TitlePrefix)...) + // Add labels if set + customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_PR_LABELS", cfg.Labels)...) + // Add commit title suffix if set + if cfg.CommitTitleSuffix != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMIT_TITLE_SUFFIX: %q\n", cfg.CommitTitleSuffix)) + } + // Add max patch size setting + maxPatchSize := 1024 // default 1024 KB + if data.SafeOutputs.MaximumPatchSize > 0 { + maxPatchSize = data.SafeOutputs.MaximumPatchSize + } + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_MAX_PATCH_SIZE: %d\n", maxPatchSize)) + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("push_to_pull_request_branch") + + // Build pre-steps for checkout and git config + preSteps := c.buildPushToPullRequestBranchPreStepsConsolidated(data, cfg, condition) + + return SafeOutputStepConfig{ + StepName: "Push To Pull Request Branch", + StepID: "push_to_pull_request_branch", + ScriptName: "push_to_pull_request_branch", + Script: getPushToPullRequestBranchScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + PreSteps: preSteps, + } +} + +// buildAddReviewerStepConfig builds the configuration for adding a reviewer +func (c *Compiler) buildAddReviewerStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.AddReviewer + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("add_reviewer") + + return SafeOutputStepConfig{ + StepName: "Add Reviewer", + StepID: "add_reviewer", + ScriptName: "add_reviewer", + Script: getAddReviewerScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildCreatePullRequestPreStepsConsolidated builds the pre-steps for create-pull-request +// in the consolidated safe outputs job +func (c *Compiler) buildCreatePullRequestPreStepsConsolidated(data *WorkflowData, cfg *CreatePullRequestsConfig, condition ConditionNode) []string { + var preSteps []string + + // Determine which token to use for checkout + // If an app is configured, use the app token; otherwise use the default github.token + var checkoutToken string + var gitRemoteToken string + if data.SafeOutputs.App != nil { + checkoutToken = "${{ steps.app-token.outputs.token }}" + gitRemoteToken = "${{ steps.app-token.outputs.token }}" + } else { + checkoutToken = "${{ github.token }}" + gitRemoteToken = "${{ github.token }}" + } + + // Step 1: Checkout repository with conditional execution + preSteps = append(preSteps, " - name: Checkout repository\n") + // Add the condition to only checkout if create_pull_request will run + preSteps = append(preSteps, fmt.Sprintf(" if: %s\n", condition.Render())) + preSteps = append(preSteps, fmt.Sprintf(" uses: %s\n", GetActionPin("actions/checkout"))) + preSteps = append(preSteps, " with:\n") + preSteps = append(preSteps, fmt.Sprintf(" token: %s\n", checkoutToken)) + preSteps = append(preSteps, " persist-credentials: false\n") + preSteps = append(preSteps, " fetch-depth: 1\n") + if c.trialMode { + if c.trialLogicalRepoSlug != "" { + preSteps = append(preSteps, fmt.Sprintf(" repository: %s\n", c.trialLogicalRepoSlug)) + } + } + + // Step 2: Configure Git credentials with conditional execution + gitConfigSteps := []string{ + " - name: Configure Git credentials\n", + fmt.Sprintf(" if: %s\n", condition.Render()), + " env:\n", + " REPO_NAME: ${{ github.repository }}\n", + " SERVER_URL: ${{ github.server_url }}\n", + " run: |\n", + " git config --global user.email \"github-actions[bot]@users.noreply.github.com\"\n", + " git config --global user.name \"github-actions[bot]\"\n", + " # Re-authenticate git with GitHub token\n", + " SERVER_URL_STRIPPED=\"${SERVER_URL#https://}\"\n", + fmt.Sprintf(" git remote set-url origin \"https://x-access-token:%s@${SERVER_URL_STRIPPED}/${REPO_NAME}.git\"\n", gitRemoteToken), + " echo \"Git configured with standard GitHub Actions identity\"\n", + } + preSteps = append(preSteps, gitConfigSteps...) + + return preSteps +} + +// buildPushToPullRequestBranchPreStepsConsolidated builds the pre-steps for push-to-pull-request-branch +// in the consolidated safe outputs job +func (c *Compiler) buildPushToPullRequestBranchPreStepsConsolidated(data *WorkflowData, cfg *PushToPullRequestBranchConfig, condition ConditionNode) []string { + var preSteps []string + + // Determine which token to use for checkout + // If an app is configured, use the app token; otherwise use the default github.token + var checkoutToken string + var gitRemoteToken string + if data.SafeOutputs.App != nil { + checkoutToken = "${{ steps.app-token.outputs.token }}" + gitRemoteToken = "${{ steps.app-token.outputs.token }}" + } else { + checkoutToken = "${{ github.token }}" + gitRemoteToken = "${{ github.token }}" + } + + // Step 1: Checkout repository with conditional execution + preSteps = append(preSteps, " - name: Checkout repository\n") + // Add the condition to only checkout if push_to_pull_request_branch will run + preSteps = append(preSteps, fmt.Sprintf(" if: %s\n", condition.Render())) + preSteps = append(preSteps, fmt.Sprintf(" uses: %s\n", GetActionPin("actions/checkout"))) + preSteps = append(preSteps, " with:\n") + preSteps = append(preSteps, fmt.Sprintf(" token: %s\n", checkoutToken)) + preSteps = append(preSteps, " persist-credentials: false\n") + preSteps = append(preSteps, " fetch-depth: 1\n") + if c.trialMode { + if c.trialLogicalRepoSlug != "" { + preSteps = append(preSteps, fmt.Sprintf(" repository: %s\n", c.trialLogicalRepoSlug)) + } + } + + // Step 2: Configure Git credentials with conditional execution + gitConfigSteps := []string{ + " - name: Configure Git credentials\n", + fmt.Sprintf(" if: %s\n", condition.Render()), + " env:\n", + " REPO_NAME: ${{ github.repository }}\n", + " SERVER_URL: ${{ github.server_url }}\n", + " run: |\n", + " git config --global user.email \"github-actions[bot]@users.noreply.github.com\"\n", + " git config --global user.name \"github-actions[bot]\"\n", + " # Re-authenticate git with GitHub token\n", + " SERVER_URL_STRIPPED=\"${SERVER_URL#https://}\"\n", + fmt.Sprintf(" git remote set-url origin \"https://x-access-token:%s@${SERVER_URL_STRIPPED}/${REPO_NAME}.git\"\n", gitRemoteToken), + " echo \"Git configured with standard GitHub Actions identity\"\n", + } + preSteps = append(preSteps, gitConfigSteps...) + + return preSteps +} diff --git a/pkg/workflow/compiler_safe_outputs_shared.go b/pkg/workflow/compiler_safe_outputs_shared.go new file mode 100644 index 0000000000..3651364d72 --- /dev/null +++ b/pkg/workflow/compiler_safe_outputs_shared.go @@ -0,0 +1,115 @@ +package workflow + +import "fmt" + +// buildAddCommentStepConfig builds the configuration for adding a comment +// This works across multiple entity types (issues, PRs, discussions) +func (c *Compiler) buildAddCommentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool, createIssueEnabled, createDiscussionEnabled, createPullRequestEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.AddComments + + var customEnvVars []string + if cfg.Target != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_COMMENT_TARGET: %q\n", cfg.Target)) + } + if cfg.Discussion != nil && *cfg.Discussion { + customEnvVars = append(customEnvVars, " GITHUB_AW_COMMENT_DISCUSSION: \"true\"\n") + } + if cfg.HideOlderComments { + customEnvVars = append(customEnvVars, " GH_AW_HIDE_OLDER_COMMENTS: \"true\"\n") + } + + // Reference outputs from earlier steps in the same job + if createIssueEnabled { + customEnvVars = append(customEnvVars, " GH_AW_CREATED_ISSUE_URL: ${{ steps.create_issue.outputs.issue_url }}\n") + customEnvVars = append(customEnvVars, " GH_AW_CREATED_ISSUE_NUMBER: ${{ steps.create_issue.outputs.issue_number }}\n") + customEnvVars = append(customEnvVars, " GH_AW_TEMPORARY_ID_MAP: ${{ steps.create_issue.outputs.temporary_id_map }}\n") + } + if createDiscussionEnabled { + customEnvVars = append(customEnvVars, " GH_AW_CREATED_DISCUSSION_URL: ${{ steps.create_discussion.outputs.discussion_url }}\n") + customEnvVars = append(customEnvVars, " GH_AW_CREATED_DISCUSSION_NUMBER: ${{ steps.create_discussion.outputs.discussion_number }}\n") + } + if createPullRequestEnabled { + customEnvVars = append(customEnvVars, " GH_AW_CREATED_PULL_REQUEST_URL: ${{ steps.create_pull_request.outputs.pull_request_url }}\n") + customEnvVars = append(customEnvVars, " GH_AW_CREATED_PULL_REQUEST_NUMBER: ${{ steps.create_pull_request.outputs.pull_request_number }}\n") + } + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + condition := BuildSafeOutputType("add_comment") + + return SafeOutputStepConfig{ + StepName: "Add Comment", + StepID: "add_comment", + ScriptName: "add_comment", + Script: getAddCommentScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildAddLabelsStepConfig builds the configuration for adding labels +func (c *Compiler) buildAddLabelsStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.AddLabels + + var customEnvVars []string + customEnvVars = append(customEnvVars, buildLabelsEnvVar("GH_AW_LABELS_ALLOWED", cfg.Allowed)...) + if cfg.Max > 0 { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_LABELS_MAX_COUNT: %d\n", cfg.Max)) + } + if cfg.Target != "" { + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_LABELS_TARGET: %q\n", cfg.Target)) + } + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, cfg.TargetRepoSlug)...) + + condition := BuildSafeOutputType("add_labels") + + return SafeOutputStepConfig{ + StepName: "Add Labels", + StepID: "add_labels", + ScriptName: "add_labels", + Script: getAddLabelsScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildHideCommentStepConfig builds the configuration for hiding a comment +func (c *Compiler) buildHideCommentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.HideComment + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("hide_comment") + + return SafeOutputStepConfig{ + StepName: "Hide Comment", + StepID: "hide_comment", + ScriptName: "hide_comment", + Script: getHideCommentScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildUploadAssetsStepConfig builds the configuration for uploading assets +func (c *Compiler) buildUploadAssetsStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.UploadAssets + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("upload_asset") + + return SafeOutputStepConfig{ + StepName: "Upload Assets", + StepID: "upload_assets", + ScriptName: "upload_assets", + Script: getUploadAssetsScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} diff --git a/pkg/workflow/compiler_safe_outputs_specialized.go b/pkg/workflow/compiler_safe_outputs_specialized.go new file mode 100644 index 0000000000..dd0e2a0de8 --- /dev/null +++ b/pkg/workflow/compiler_safe_outputs_specialized.go @@ -0,0 +1,145 @@ +package workflow + +import "fmt" + +// buildCreateCodeScanningAlertStepConfig builds the configuration for creating a code scanning alert +func (c *Compiler) buildCreateCodeScanningAlertStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool, workflowFilename string) SafeOutputStepConfig { + cfg := data.SafeOutputs.CreateCodeScanningAlerts + + var customEnvVars []string + customEnvVars = append(customEnvVars, fmt.Sprintf(" GH_AW_WORKFLOW_FILENAME: %q\n", workflowFilename)) + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("create_code_scanning_alert") + + return SafeOutputStepConfig{ + StepName: "Create Code Scanning Alert", + StepID: "create_code_scanning_alert", + ScriptName: "create_code_scanning_alert", + Script: getCreateCodeScanningAlertScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildAssignMilestoneStepConfig builds the configuration for assigning a milestone +func (c *Compiler) buildAssignMilestoneStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.AssignMilestone + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("assign_milestone") + + return SafeOutputStepConfig{ + StepName: "Assign Milestone", + StepID: "assign_milestone", + ScriptName: "assign_milestone", + Script: getAssignMilestoneScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildAssignToAgentStepConfig builds the configuration for assigning to an agent +func (c *Compiler) buildAssignToAgentStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.AssignToAgent + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("assign_to_agent") + + return SafeOutputStepConfig{ + StepName: "Assign To Agent", + StepID: "assign_to_agent", + ScriptName: "assign_to_agent", + Script: getAssignToAgentScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + UseAgentToken: true, + } +} + +// buildAssignToUserStepConfig builds the configuration for assigning to a user +func (c *Compiler) buildAssignToUserStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.AssignToUser + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("assign_to_user") + + return SafeOutputStepConfig{ + StepName: "Assign To User", + StepID: "assign_to_user", + ScriptName: "assign_to_user", + Script: getAssignToUserScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildUpdateReleaseStepConfig builds the configuration for updating a release +func (c *Compiler) buildUpdateReleaseStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.UpdateRelease + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("update_release") + + return SafeOutputStepConfig{ + StepName: "Update Release", + StepID: "update_release", + ScriptName: "update_release", + Script: getUpdateReleaseScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} + +// buildCreateAgentTaskStepConfig builds the configuration for creating an agent task +func (c *Compiler) buildCreateAgentTaskStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.CreateAgentTasks + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("create_agent_task") + + return SafeOutputStepConfig{ + StepName: "Create Agent Task", + StepID: "create_agent_task", + Script: createAgentTaskScript, + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + UseCopilotToken: true, + } +} + +// buildUpdateProjectStepConfig builds the configuration for updating a project +func (c *Compiler) buildUpdateProjectStepConfig(data *WorkflowData, mainJobName string, threatDetectionEnabled bool) SafeOutputStepConfig { + cfg := data.SafeOutputs.UpdateProjects + + var customEnvVars []string + customEnvVars = append(customEnvVars, c.buildStepLevelSafeOutputEnvVars(data, "")...) + + condition := BuildSafeOutputType("update_project") + + return SafeOutputStepConfig{ + StepName: "Update Project", + StepID: "update_project", + ScriptName: "update_project", + Script: getUpdateProjectScript(), + CustomEnvVars: customEnvVars, + Condition: condition, + Token: cfg.GitHubToken, + } +} diff --git a/pkg/workflow/js/safe_outputs_tools_loader.cjs b/pkg/workflow/js/safe_outputs_tools_loader.cjs index 69eb19e315..88c1975e94 100644 --- a/pkg/workflow/js/safe_outputs_tools_loader.cjs +++ b/pkg/workflow/js/safe_outputs_tools_loader.cjs @@ -9,30 +9,28 @@ const fs = require("fs"); */ function loadTools(server) { const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; - let ALL_TOOLS = []; - + server.debug(`Reading tools from file: ${toolsPath}`); + if (!fs.existsSync(toolsPath)) { + server.debug(`Tools file does not exist at: ${toolsPath}`); + server.debug(`Using empty tools array`); + return []; + } + try { - if (fs.existsSync(toolsPath)) { - server.debug(`Tools file exists at: ${toolsPath}`); - const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); - server.debug(`Tools file content length: ${toolsFileContent.length} characters`); - server.debug(`Tools file read successfully, attempting to parse JSON`); - ALL_TOOLS = JSON.parse(toolsFileContent); - server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); - } else { - server.debug(`Tools file does not exist at: ${toolsPath}`); - server.debug(`Using empty tools array`); - ALL_TOOLS = []; - } + server.debug(`Tools file exists at: ${toolsPath}`); + const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); + server.debug(`Tools file content length: ${toolsFileContent.length} characters`); + server.debug(`Tools file read successfully, attempting to parse JSON`); + const tools = JSON.parse(toolsFileContent); + server.debug(`Successfully parsed ${tools.length} tools from file`); + return tools; } catch (error) { server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); server.debug(`Falling back to empty tools array`); - ALL_TOOLS = []; + return []; } - - return ALL_TOOLS; } /** @@ -42,15 +40,19 @@ function loadTools(server) { * @returns {Array} Tools with handlers attached */ function attachHandlers(tools, handlers) { + const handlerMap = { + create_pull_request: handlers.createPullRequestHandler, + push_to_pull_request_branch: handlers.pushToPullRequestBranchHandler, + upload_asset: handlers.uploadAssetHandler, + }; + tools.forEach(tool => { - if (tool.name === "create_pull_request") { - tool.handler = handlers.createPullRequestHandler; - } else if (tool.name === "push_to_pull_request_branch") { - tool.handler = handlers.pushToPullRequestBranchHandler; - } else if (tool.name === "upload_asset") { - tool.handler = handlers.uploadAssetHandler; + const handler = handlerMap[tool.name]; + if (handler) { + tool.handler = handler; } }); + return tools; } @@ -84,76 +86,64 @@ function registerDynamicTools(server, tools, config, outputFile, registerTool, n const normalizedKey = normalizeTool(configKey); // Skip if it's already a predefined tool - if (server.tools[normalizedKey]) { + if (server.tools[normalizedKey] || tools.find(t => t.name === normalizedKey)) { return; } - // Check if this is a safe-job (not in ALL_TOOLS) - if (!tools.find(t => t.name === normalizedKey)) { - const jobConfig = config[configKey]; - - // Create a dynamic tool for this safe-job - const dynamicTool = { - name: normalizedKey, - description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, - inputSchema: { - type: "object", - properties: {}, - additionalProperties: true, // Allow any properties for flexibility - }, - handler: args => { - // Create a generic safe-job output entry - const entry = { - type: normalizedKey, - ...args, - }; - - // Write the entry to the output file in JSONL format - // CRITICAL: Use JSON.stringify WITHOUT formatting parameters for JSONL format - // Each entry must be on a single line, followed by a newline character - const entryJSON = JSON.stringify(entry); - fs.appendFileSync(outputFile, entryJSON + "\n"); - - // Use output from safe-job config if available - const outputText = jobConfig && jobConfig.output ? jobConfig.output : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; - - return { - content: [ - { - type: "text", - text: JSON.stringify({ result: outputText }), - }, - ], - }; - }, - }; - - // Add input schema based on job configuration if available - if (jobConfig && jobConfig.inputs) { - dynamicTool.inputSchema.properties = {}; - dynamicTool.inputSchema.required = []; - - Object.keys(jobConfig.inputs).forEach(inputName => { - const inputDef = jobConfig.inputs[inputName]; - const propSchema = { - type: inputDef.type || "string", - description: inputDef.description || `Input parameter: ${inputName}`, - }; - - if (inputDef.options && Array.isArray(inputDef.options)) { - propSchema.enum = inputDef.options; - } - - dynamicTool.inputSchema.properties[inputName] = propSchema; - - if (inputDef.required) { - dynamicTool.inputSchema.required.push(inputName); - } - }); - } - - registerTool(server, dynamicTool); + const jobConfig = config[configKey]; + + // Create a dynamic tool for this safe-job + const dynamicTool = { + name: normalizedKey, + description: jobConfig?.description ?? `Custom safe-job: ${configKey}`, + inputSchema: { + type: "object", + properties: {}, + additionalProperties: true, // Allow any properties for flexibility + }, + handler: args => { + // Create a generic safe-job output entry + const entry = { type: normalizedKey, ...args }; + + // Write the entry to the output file in JSONL format + // CRITICAL: Use JSON.stringify WITHOUT formatting parameters for JSONL format + // Each entry must be on a single line, followed by a newline character + fs.appendFileSync(outputFile, `${JSON.stringify(entry)}\n`); + + // Use output from safe-job config if available + const outputText = jobConfig?.output ?? `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; + + return { + content: [{ type: "text", text: JSON.stringify({ result: outputText }) }], + }; + }, + }; + + // Add input schema based on job configuration if available + if (jobConfig?.inputs) { + dynamicTool.inputSchema.properties = {}; + dynamicTool.inputSchema.required = []; + + Object.keys(jobConfig.inputs).forEach(inputName => { + const inputDef = jobConfig.inputs[inputName]; + const propSchema = { + type: inputDef.type || "string", + description: inputDef.description || `Input parameter: ${inputName}`, + }; + + if (Array.isArray(inputDef.options)) { + propSchema.enum = inputDef.options; + } + + dynamicTool.inputSchema.properties[inputName] = propSchema; + + if (inputDef.required) { + dynamicTool.inputSchema.required.push(inputName); + } + }); } + + registerTool(server, dynamicTool); }); } diff --git a/pkg/workflow/secrets_validation.go b/pkg/workflow/secrets_validation.go index 28ebdc4116..d098510040 100644 --- a/pkg/workflow/secrets_validation.go +++ b/pkg/workflow/secrets_validation.go @@ -19,7 +19,11 @@ var secretsExpressionPattern = regexp.MustCompile(`^\$\{\{\s*secrets\.[A-Za-z_][ func validateSecretsExpression(key, value string) error { if !secretsExpressionPattern.MatchString(value) { secretsValidationLog.Printf("Invalid secret expression detected") - return fmt.Errorf("jobs.secrets.%s must be a GitHub Actions expression with secrets reference (e.g., '${{ secrets.MY_SECRET }}' or '${{ secrets.SECRET1 || secrets.SECRET2 }}')", key) + // Note: We intentionally do NOT include the key name in the error message to avoid + // logging sensitive information (secret key names) that could expose details about + // the organization's security infrastructure. The key name is available in the + // calling context for debugging purposes if needed. + return fmt.Errorf("invalid secrets expression: must be a GitHub Actions expression with secrets reference (e.g., '${{ secrets.MY_SECRET }}' or '${{ secrets.SECRET1 || secrets.SECRET2 }}')") } secretsValidationLog.Printf("Valid secret expression validated") return nil diff --git a/pkg/workflow/secrets_validation_test.go b/pkg/workflow/secrets_validation_test.go index b273fa6198..589a25515f 100644 --- a/pkg/workflow/secrets_validation_test.go +++ b/pkg/workflow/secrets_validation_test.go @@ -64,7 +64,7 @@ func TestSecretsExpressionPattern(t *testing.T) { } // TestValidateSecretsExpressionErrorMessages tests that error messages are descriptive -// but do NOT include sensitive values to prevent clear-text logging +// but do NOT include sensitive values OR KEY NAMES to prevent clear-text logging func TestValidateSecretsExpressionErrorMessages(t *testing.T) { tests := []struct { name string @@ -74,24 +74,25 @@ func TestValidateSecretsExpressionErrorMessages(t *testing.T) { notExpectedInErrs []string }{ { - name: "plaintext does NOT show value in error", + name: "plaintext does NOT show value OR key name in error", key: "token", value: "plaintext", - expectedInErrs: []string{"jobs.secrets.token"}, - notExpectedInErrs: []string{"plaintext"}, + expectedInErrs: []string{"invalid secrets expression", "must be a GitHub Actions expression"}, + notExpectedInErrs: []string{"plaintext", "token"}, }, { - name: "env context does NOT show value in error", + name: "env context does NOT show value OR key name in error", key: "api_key", value: "${{ env.TOKEN }}", - expectedInErrs: []string{"jobs.secrets.api_key"}, - notExpectedInErrs: []string{"${{ env.TOKEN }}"}, + expectedInErrs: []string{"invalid secrets expression"}, + notExpectedInErrs: []string{"${{ env.TOKEN }}", "api_key"}, }, { - name: "key name in error", - key: "database_password", - value: "hardcoded", - expectedInErrs: []string{"jobs.secrets.database_password"}, + name: "key name NOT in error (security fix)", + key: "database_password", + value: "hardcoded", + expectedInErrs: []string{"invalid secrets expression"}, + notExpectedInErrs: []string{"database_password"}, }, { name: "example format in error", @@ -106,11 +107,11 @@ func TestValidateSecretsExpressionErrorMessages(t *testing.T) { expectedInErrs: []string{"${{ secrets.SECRET1 || secrets.SECRET2 }}"}, }, { - name: "mixed context error does NOT show value", - key: "token", + name: "mixed context error does NOT show value OR key name", + key: "deploy_token", value: "${{ secrets.TOKEN || env.FALLBACK }}", - expectedInErrs: []string{"jobs.secrets.token"}, - notExpectedInErrs: []string{"${{ secrets.TOKEN || env.FALLBACK }}"}, + expectedInErrs: []string{"invalid secrets expression"}, + notExpectedInErrs: []string{"${{ secrets.TOKEN || env.FALLBACK }}", "deploy_token"}, }, } @@ -164,9 +165,14 @@ func TestValidateSecretsExpressionWithDifferentKeys(t *testing.T) { if err == nil { t.Errorf("Expected error for invalid value with key %q, got nil", key) } - // Error message should include the key name (if not empty) - if key != "" && !strings.Contains(err.Error(), "jobs.secrets."+key) { - t.Errorf("Expected error to contain key name %q, got: %s", key, err.Error()) + // Security fix: Error message should NOT include the key name to prevent + // logging sensitive information about the organization's security infrastructure + if key != "" && strings.Contains(err.Error(), key) { + t.Errorf("Error should NOT contain sensitive key name %q, but got: %s", key, err.Error()) + } + // Error should still be descriptive + if !strings.Contains(err.Error(), "invalid secrets expression") { + t.Errorf("Error should contain descriptive message, got: %s", err.Error()) } }) }