Skip to content

Agent Performance Analyzer - Meta-Orchestrator #157

Agent Performance Analyzer - Meta-Orchestrator

Agent Performance Analyzer - Meta-Orchestrator #157

Triggered via schedule January 3, 2026 04:56
Status Failure
Total duration 31m 17s
Artifacts 5
pre_activation
6s
pre_activation
activation
7s
activation
detection
detection
push_repo_memory
push_repo_memory
safe_outputs
safe_outputs
conclusion
8s
conclusion
Fit to window
Zoom out
Zoom in

Annotations

16 errors, 14 warnings, and 40 notices
agent
Line 41518: {e}\\\", file=sys.stderr)\\n sys.exit(1)\\n\\n\\nif __name__ == \\\"__main__\\\":\\n main()\\nEOF\\nchmod +x /home/runner/work/gh-aw/gh-aw/collect_workflow_stats.py\\necho \\\"Script created successfully\\\"\",\"description\":\"Create workflow statistics collection script\"}", (Pattern: Generic ERROR messages, Raw log: "arguments": "{\"command\":\"cat \\u003e /home/runner/work/gh-aw/gh-aw/collect_workflow_stats.py \\u003c\\u003c 'EOF'\\n...)
agent
Line 41415: {e}\\\", file=sys.stderr)\\n sys.exit(1)\\n\\n\\nif __name__ == \\\"__main__\\\":\\n main()\\n\",\"path\":\"/home/runner/work/gh-aw/gh-aw/collect_workflow_stats.py\"}", (Pattern: Generic ERROR messages, Raw log: "arguments": "{\"file_text\":\"#!/usr/bin/env python3\\n\\\"\\\"\\\"\\nCollect workflow run statistics for githubnext/gh...)
agent
Line 25619: {response.text}\\\")\\nexcept Exception as e:\\n print(f\\\"Exception: {e}\\\")\\n\\nEOF\\n\",\"description\":\"Test GitHub API access for PRs\"}", (Pattern: Generic ERROR messages, Raw log: "arguments": "{\"command\":\"python3 << 'EOF'\\nimport json\\nimport requests\\nfrom datetime import datetime, timedelta...)
agent
Line 22530: {result.stderr}\\\")\\n \\nexcept Exception as e:\\n print(f\\\"Exception: {e}\\\")\\nEOF\\n\",\"description\":\"Fetch recent PRs from gh-aw repo\"}", (Pattern: Generic ERROR messages, Raw log: "arguments": "{\"command\":\"python3 << 'EOF'\\nimport json\\nimport subprocess\\nfrom datetime import datetime, timedel...)
agent
Line 18993: {response.text}\\\")\\n all_prs = []\\n\\nEOF\\n\",\"description\":\"Test GitHub API access\"}", (Pattern: Generic ERROR messages, Raw log: "arguments": "{\"command\":\"python3 << 'EOF'\\nimport json\\nimport requests\\nfrom datetime import datetime, timedelta...)
agent
Line 17358: {e.stderr}\\\", file=sys.stderr)\\n return None\\n except json.JSONDecodeError as e:\\n print(f\\\"Error parsing JSON from command: {cmd}\\\", file=sys.stderr)\\n print(f\\\"Error: {e}\\\", file=sys.stderr)\\n return None\\n\\ndef calculate_stats(runs, start_date):\\n \\\"\\\"\\\"Calculate statistics for workflow runs\\\"\\\"\\\"\\n # Filter runs from past 14 days\\n filtered_runs = [\\n run for run in runs\\n if datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00')) \\u003e= start_date\\n ]\\n \\n total_runs = len(filtered_runs)\\n if total_runs == 0:\\n return {\\n 'total_runs': 0,\\n 'successful_runs': 0,\\n 'failed_runs': 0,\\n 'success_rate': 0.0,\\n 'average_duration_seconds': 0.0\\n }\\n \\n successful_runs = sum(1 for run in filtered_runs if run.get('conclusion') == 'success')\\n failed_runs = sum(1 for run in filtered_runs if run.get('conclusion') == 'failure')\\n \\n # Calculate average duration (in milliseconds from API, convert to seconds)\\n durations = [run.get('duration', 0) for run in filtered_runs if run.get('duration')]\\n avg_duration = sum(durations) / len(durations) / 1000.0 if durations else 0.0\\n \\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n return {\\n 'total_runs': total_runs,\\n 'successful_runs': successful_runs,\\n 'failed_runs': failed_runs,\\n 'success_rate': round(success_rate, 2),\\n 'average_duration_seconds': round(avg_duration, 2)\\n }\\n\\ndef main():\\n # Set up dates\\n end_date = datetime.now(timezone.utc)\\n start_date = end_date - timedelta(days=14)\\n \\n print(\\\"Fetching workflows...\\\", file=sys.stderr)\\n \\n # Get all workflows\\n workflows_data = run_gh_command(\\n \\\"gh workflow list --repo githubnext/gh-aw --json id,name,path --limit 1000\\\"\\n )\\n \\n if not workflows_data:\\n print(\\\"Failed to fetch workflows\\\", file=sys.stderr)\\n sys.exit(1)\\n \\n # Filter for .lock.yml workflows\\n lock_workflows = [w for w in workflows_data if w['path'].endswith('.lock.yml')]\\n \\n print(f\\\"Found {len(lock_workflows)} agentic workflows (.lock.yml)\\\", file=sys.stderr)\\n \\n workflow_stats = []\\n \\n for i, workflow in enumerate(lock_workflows, 1):\\n workflow_id = workflow['id']\\n workflow_name = workflow['name']\\n workflow_path = workflow['path']\\n \\n print(f\\\"[{i}/{len(lock_workflows)}] Processing: {workflow_name}\\\", file=sys.stderr)\\n \\n # Get runs for this workflow\\n runs = run_gh_command(\\n f\\\"gh run list --repo githubnext/gh-aw --workflow {workflow_id} \\\"\\n f\\\"--limit 100 --json conclusion,duration,status,createdAt\\\"\\n )\\n \\n if runs is None:\\n print(f\\\" Warning: Failed to fetch runs for {workflow_name}\\\", file=sys.stderr)\\n continue\\n \\n # Calculate stats\\n stats = calculate_stats(runs, start_date)\\n \\n workflow_stats.append({\\n 'workflow_name': workflow_name,\\n 'workflow_path': workflow_path,\\n 'workflow_id': workflow_id,\\n **stats\\n })\\n \\n print(f\\\" Total: {stats['total_runs']}, Success: {stats['successful_runs']}, \\\"\\n f\\\"Failed: {stats['failed_runs']}, Rate: {stats['success_rate']}%\\\", file=sys.stderr)\\n \\n # Build final output\\n output = {\\n 'metadata': {\\n 'owner': 'githubnext',\\n 'repo': 'gh-aw',\\n 'data_collection_date': end_date.isoformat(),\\n 'period_days': 14,\\n 'start_date': start_date.isoformat(),\\n 'end_date': end_date.isoformat(),\\n 'workflow_count': len(workflow_stats)\\n },\\n 'workflows': sor
agent
Line 14713: {e.stderr}\\\", file=sys.stderr)\\n return None\\n except json.JSONDecodeError as e:\\n print(f\\\"Error parsing JSON from command: {cmd}\\\", file=sys.stderr)\\n print(f\\\"Error: {e}\\\", file=sys.stderr)\\n return None\\n\\ndef main():\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n \\n # Get current time and 14 days ago\\n end_date = datetime.utcnow()\\n start_date = end_date - timedelta(days=14)\\n \\n print(f\\\"Collecting workflow statistics from {start_date.isoformat()} to {end_date.isoformat()}\\\", file=sys.stderr)\\n \\n # List all workflows\\n workflows_data = run_gh_command(f\\\"gh workflow list --repo {owner}/{repo} --json id,name,path --limit 200\\\")\\n if not workflows_data:\\n print(\\\"Failed to fetch workflows\\\", file=sys.stderr)\\n sys.exit(1)\\n \\n # Filter only .lock.yml workflows\\n lock_workflows = [w for w in workflows_data if w['path'].endswith('.lock.yml')]\\n print(f\\\"Found {len(lock_workflows)} agentic workflows (.lock.yml files)\\\", file=sys.stderr)\\n \\n results = []\\n \\n for i, workflow in enumerate(lock_workflows, 1):\\n workflow_id = workflow['id']\\n workflow_name = workflow['name']\\n workflow_path = workflow['path']\\n \\n print(f\\\"[{i}/{len(lock_workflows)}] Processing: {workflow_name}\\\", file=sys.stderr)\\n \\n # Get workflow runs for the past 14 days\\n runs_data = run_gh_command(\\n f\\\"gh run list --repo {owner}/{repo} --workflow {workflow_id} \\\"\\n f\\\"--limit 100 --json conclusion,durationMs,status,createdAt\\\"\\n )\\n \\n if not runs_data:\\n print(f\\\" Warning: Failed to fetch runs for workflow {workflow_id}\\\", file=sys.stderr)\\n continue\\n \\n # Filter runs from the past 14 days\\n filtered_runs = []\\n for run in runs_data:\\n try:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if created_at \\u003e= start_date:\\n filtered_runs.append(run)\\n except (KeyError, ValueError) as e:\\n continue\\n \\n # Calculate statistics\\n total_runs = len(filtered_runs)\\n successful_runs = sum(1 for r in filtered_runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in filtered_runs if r.get('conclusion') == 'failure')\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0\\n \\n # Calculate average duration (in seconds)\\n durations = [r.get('durationMs', 0) for r in filtered_runs if r.get('durationMs')]\\n avg_duration = (sum(durations) / len(durations) / 1000) if durations else 0\\n \\n results.append({\\n \\\"workflow_name\\\": workflow_name,\\n \\\"workflow_path\\\": workflow_path,\\n \\\"workflow_id\\\": workflow_id,\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2)\\n })\\n \\n print(f\\\" Runs: {total_runs}, Success: {successful_runs}, Failed: {failed_runs}, \\\"\\n f\\\"Success Rate: {success_rate:.2f}%\\\", file=sys.stderr)\\n \\n # Sort by total runs (descending)\\n results.sort(key=lambda x: x['total_runs'], reverse=True)\\n \\n # Create final output\\n output = {\\n \\\"metadata\\\": {\\n \\\"owner\\\": owner,\\n \\\"repo\\\": repo,\\n \\\"data_collection_date\\\": end_date.isoformat() + \\\"Z\\\",\\n \\\"period_days\\\": 14,\\n \\\"start_date\\\": start_date.isoformat() + \\\"Z\\\",\\n \\\"end_date\\\":
agent
Line 10647: {e.stderr}\\\", file=sys.stderr)\\n return None\\n except json.JSONDecodeError as e:\\n print(f\\\"Error parsing JSON from command: {cmd}\\\", file=sys.stderr)\\n print(f\\\"Output: {result.stdout}\\\", file=sys.stderr)\\n return None\\n\\n# Get all workflows\\nprint(\\\"Fetching workflows...\\\", file=sys.stderr)\\nworkflows_cmd = f'gh workflow list --repo {OWNER}/{REPO} --json id,name,path'\\nworkflows_result = run_gh_command(workflows_cmd)\\n\\nif not workflows_result:\\n print(\\\"Failed to fetch workflows\\\", file=sys.stderr)\\n sys.exit(1)\\n\\n# Filter for agentic workflows (ending in .lock.yml)\\nagentic_workflows = [w for w in workflows_result if w['path'].endswith('.lock.yml')]\\n\\nprint(f\\\"Found {len(workflows_result)} total workflows, {len(agentic_workflows)} agentic workflows\\\", file=sys.stderr)\\n\\n# Calculate date range\\nend_date = datetime.utcnow()\\nstart_date = end_date - timedelta(days=DAYS)\\nstart_date_iso = start_date.isoformat() + \\\"Z\\\"\\n\\nprint(f\\\"Collecting runs from {start_date_iso} to {end_date.isoformat()}Z\\\", file=sys.stderr)\\n\\nresults = []\\n\\nfor workflow in agentic_workflows:\\n workflow_id = workflow['id']\\n workflow_name = workflow['name']\\n workflow_path = workflow['path']\\n \\n print(f\\\"Processing {workflow_name}...\\\", file=sys.stderr)\\n \\n # Get all runs for this workflow from the past 14 days\\n runs_cmd = f'gh run list --repo {OWNER}/{REPO} --workflow {workflow_id} --limit 100 --json conclusion,duration,status,createdAt,databaseId'\\n runs_result = run_gh_command(runs_cmd)\\n \\n if not runs_result:\\n print(f\\\" Warning: Could not fetch runs for {workflow_name}\\\", file=sys.stderr)\\n continue\\n \\n # Filter runs from the past 14 days\\n relevant_runs = []\\n for run in runs_result:\\n try:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if created_at >= start_date.replace(tzinfo=created_at.tzinfo):\\n relevant_runs.append(run)\\n except (KeyError, ValueError) as e:\\n print(f\\\" Warning: Could not parse run timestamp: {e}\\\", file=sys.stderr)\\n continue\\n \\n if not relevant_runs:\\n print(f\\\" No runs found in the past {DAYS} days for {workflow_name}\\\", file=sys.stderr)\\n continue\\n \\n # Calculate statistics\\n total_runs = len(relevant_runs)\\n successful_runs = sum(1 for r in relevant_runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in relevant_runs if r.get('conclusion') == 'failure')\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs > 0 else 0\\n \\n # Calculate average duration (in seconds)\\n durations = []\\n for run in relevant_runs:\\n duration = run.get('duration')\\n if duration is not None:\\n durations.append(duration)\\n \\n avg_duration = sum(durations) / len(durations) if durations else 0\\n \\n workflow_data = {\\n \\\"workflow_name\\\": workflow_name,\\n \\\"workflow_path\\\": workflow_path,\\n \\\"workflow_id\\\": workflow_id,\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2),\\n \\\"data_collection_period_days\\\": DAYS,\\n \\\"start_date\\\": start_date_iso,\\n \\\"end_date\\\": end_date.isoformat() + \\\"Z\\\"\\n }\\n \\n results.append(workflow_data)\\n \\n print(f\\\" Total: {total_runs}, Success: {successful_runs}, Failed: {failed_runs}, Success Rate: {success_rate:.2f}%\\\", file=sys.stderr)\\n\\n# Create output directory\\noutput_dir = \\\"/tmp/gh-aw/agent\\\"\\nos.makedirs(output_dir, exist_ok=True)\\n\\n# Write results to JSON file\\noutput_file = os.path.join(output
agent
Line 10020: {e.stderr}\\\")\\n exit(1)\\n\\nEOF\\n\",\"description\":\"Collect PR data from gh-aw repo\"}", (Pattern: Generic ERROR messages, Raw log: "arguments": "{\"command\":\"cd /home/runner/work/gh-aw/gh-aw && python3 << 'EOF'\\nimport json\\nimport subprocess\\nim...)
agent
Line 111: MCP error -32000: Connection closed (Pattern: Generic ERROR messages, Raw log: 2026-01-03T04:57:00.209Z [ERROR] Failed to start MCP client for agentic_workflows: McpError: MCP error -32000: Connectio...)
agent
Error: EACCES: permission denied, open '/tmp/gh-aw/sandbox/firewall/logs/access.log'
agent
An error has occurred while creating the zip file for upload
agent
The action 'Execute GitHub Copilot CLI' has timed out after 30 minutes.
conclusion
Error reading agent output file: ENOENT: no such file or directory, open '/tmp/gh-aw/safeoutputs/agent_output.json'
conclusion
Error reading agent output file: ENOENT: no such file or directory, open '/tmp/gh-aw/safeoutputs/agent_output.json'
conclusion
Unable to download artifact(s): Artifact not found for name: agent-output Please ensure that your artifact is not expired and the artifact was uploaded using a compatible version of toolkit/upload-artifact. For more information, visit the GitHub Artifacts FAQ: https://github.com/actions/toolkit/blob/main/packages/artifact/docs/faq.md
agent
Line 41740: Failed to fetch runs for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n\\n\\ndef calculate_workflow_stats(workflow: Dict[str, Any], runs: List[Dict[str, Any]], period_days: int) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Calculate statistics for a workflow.\\\"\\\"\\\"\\n total_runs = len(runs)\\n successful_runs = sum(1 for r in runs if r.get(\\\"conclusion\\\") == \\\"success\\\")\\n failed_runs = sum(1 for r in runs if r.get(\\\"conclusion\\\") == \\\"failure\\\")\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n # Calculate average duration (only for completed runs with duration data)\\n durations = []\\n for run in runs:\\n if run.get(\\\"conclusion\\\") in [\\\"success\\\", \\\"failure\\\"] and run.get(\\\"run_started_at\\\"):\\n started = datetime.fromisoformat(run[\\\"run_started_at\\\"].replace(\\\"Z\\\", \\\"+00:00\\\"))\\n updated = datetime.fromisoformat(run[\\\"updated_at\\\"].replace(\\\"Z\\\", \\\"+00:00\\\"))\\n duration = (updated - started).total_seconds()\\n if duration \\u003e 0:\\n durations.append(duration)\\n \\n avg_duration = sum(durations) / len(durations) if durations else 0.0\\n \\n return {\\n \\\"workflow_name\\\": workflow.get(\\\"name\\\", \\\"\\\"),\\n \\\"workflow_path\\\": workflow.get(\\\"path\\\", \\\"\\\"),\\n \\\"workflow_id\\\": workflow.get(\\\"id\\\", 0),\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate_percent\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2),\\n \\\"data_collection_date\\\": datetime.now(timezone.utc).isoformat(),\\n \\\"period_days\\\": period_days\\n }\\n\\n\\ndef collect_statistics(owner: str, repo: str, period_days: int) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Collect workflow statistics for the repository.\\\"\\\"\\\"\\n since_date = (datetime.now(timezone.utc) - timedelta(days=period_days)).isoformat()\\n \\n # Get all workflows\\n all_workflows = get_workflows(owner, repo)\\n \\n # Filter for agentic workflows\\n agentic_workflows = filter_agentic_workflows(all_workflows)\\n \\n # Collect stats for each workflow\\n workflow_stats = []\\n for workflow in agentic_workflows:\\n print(f\\\"Processing: {workflow.get('name')} ({workflow.get('path')})\\\", file=sys.stderr)\\n runs = get_workflow_runs(owner, repo, workflow[\\\"id\\\"], since_date)\\n stats = calculate_workflow_stats(workflow, runs, period_days)\\n workflow_stats.append(stats)\\n \\n # Calculate summary\\n total_runs = sum(w[\\\"total_runs\\\"] for w in workflow_stats)\\n total_successful = sum(w[\\\"successful_runs\\\"] for w in workflow_stats)\\n total_failed = sum(w[\\\"failed_runs\\\"] for w in workflow_stats)\\n overall_success_rate = (total_successful / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n return {\\n \\\"summary\\\": {\\n \\\"total_workflows\\\": len(workflow_stats),\\n \\\"total_runs\\\": total_runs,\\n \\\"total_successful\\\": total_successful,\\n \\\"total_failed\\\": total_failed,\\n \\\"overall_success_rate_percent\\\": round(overall_success_rate, 2),\\n \\\"collection_timestamp\\\": datetime.now(timezone.utc).isoformat(),\\n \\\"period_days\\\": period_days\\n },\\n \\\"workflows\\\": workflow_stats\\n }\\n\\n\\ndef main():\\n \\\"\\\"\\\"Main entry point.\\\"\\\"\\\"\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n period_days = 14\\n output_file = \\\"/home/runner/work/gh-aw/gh-aw/workflow_runs.json\\\"\\n \\n print(f\\\"Collecting workflow statistics for {owner}/{repo}\\\", file=sys.stderr)\\n print(f\\\"Period: past {period_da
agent
Line 41518: Failed to fetch runs for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n\\n\\ndef calculate_workflow_stats(workflow: Dict[str, Any], runs: List[Dict[str, Any]], period_days: int) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Calculate statistics for a workflow.\\\"\\\"\\\"\\n total_runs = len(runs)\\n successful_runs = sum(1 for r in runs if r.get(\\\"conclusion\\\") == \\\"success\\\")\\n failed_runs = sum(1 for r in runs if r.get(\\\"conclusion\\\") == \\\"failure\\\")\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n # Calculate average duration (only for completed runs with duration data)\\n durations = []\\n for run in runs:\\n if run.get(\\\"conclusion\\\") in [\\\"success\\\", \\\"failure\\\"] and run.get(\\\"run_started_at\\\"):\\n started = datetime.fromisoformat(run[\\\"run_started_at\\\"].replace(\\\"Z\\\", \\\"+00:00\\\"))\\n updated = datetime.fromisoformat(run[\\\"updated_at\\\"].replace(\\\"Z\\\", \\\"+00:00\\\"))\\n duration = (updated - started).total_seconds()\\n if duration \\u003e 0:\\n durations.append(duration)\\n \\n avg_duration = sum(durations) / len(durations) if durations else 0.0\\n \\n return {\\n \\\"workflow_name\\\": workflow.get(\\\"name\\\", \\\"\\\"),\\n \\\"workflow_path\\\": workflow.get(\\\"path\\\", \\\"\\\"),\\n \\\"workflow_id\\\": workflow.get(\\\"id\\\", 0),\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate_percent\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2),\\n \\\"data_collection_date\\\": datetime.now(timezone.utc).isoformat(),\\n \\\"period_days\\\": period_days\\n }\\n\\n\\ndef collect_statistics(owner: str, repo: str, period_days: int) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Collect workflow statistics for the repository.\\\"\\\"\\\"\\n since_date = (datetime.now(timezone.utc) - timedelta(days=period_days)).isoformat()\\n \\n # Get all workflows\\n all_workflows = get_workflows(owner, repo)\\n \\n # Filter for agentic workflows\\n agentic_workflows = filter_agentic_workflows(all_workflows)\\n \\n # Collect stats for each workflow\\n workflow_stats = []\\n for workflow in agentic_workflows:\\n print(f\\\"Processing: {workflow.get('name')} ({workflow.get('path')})\\\", file=sys.stderr)\\n runs = get_workflow_runs(owner, repo, workflow[\\\"id\\\"], since_date)\\n stats = calculate_workflow_stats(workflow, runs, period_days)\\n workflow_stats.append(stats)\\n \\n # Calculate summary\\n total_runs = sum(w[\\\"total_runs\\\"] for w in workflow_stats)\\n total_successful = sum(w[\\\"successful_runs\\\"] for w in workflow_stats)\\n total_failed = sum(w[\\\"failed_runs\\\"] for w in workflow_stats)\\n overall_success_rate = (total_successful / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n return {\\n \\\"summary\\\": {\\n \\\"total_workflows\\\": len(workflow_stats),\\n \\\"total_runs\\\": total_runs,\\n \\\"total_successful\\\": total_successful,\\n \\\"total_failed\\\": total_failed,\\n \\\"overall_success_rate_percent\\\": round(overall_success_rate, 2),\\n \\\"collection_timestamp\\\": datetime.now(timezone.utc).isoformat(),\\n \\\"period_days\\\": period_days\\n },\\n \\\"workflows\\\": workflow_stats\\n }\\n\\n\\ndef main():\\n \\\"\\\"\\\"Main entry point.\\\"\\\"\\\"\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n period_days = 14\\n output_file = \\\"/home/runner/work/gh-aw/gh-aw/workflow_runs.json\\\"\\n \\n print(f\\\"Collecting workflow statistics for {owner}/{repo}\\\", file=sys.stderr)\\n print(f\\\"Period: past {period_da
agent
Line 41415: Failed to fetch runs for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n\\n\\ndef calculate_workflow_stats(workflow: Dict[str, Any], runs: List[Dict[str, Any]], period_days: int) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Calculate statistics for a workflow.\\\"\\\"\\\"\\n total_runs = len(runs)\\n successful_runs = sum(1 for r in runs if r.get(\\\"conclusion\\\") == \\\"success\\\")\\n failed_runs = sum(1 for r in runs if r.get(\\\"conclusion\\\") == \\\"failure\\\")\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n # Calculate average duration (only for completed runs with duration data)\\n durations = []\\n for run in runs:\\n if run.get(\\\"conclusion\\\") in [\\\"success\\\", \\\"failure\\\"] and run.get(\\\"run_started_at\\\"):\\n started = datetime.fromisoformat(run[\\\"run_started_at\\\"].replace(\\\"Z\\\", \\\"+00:00\\\"))\\n updated = datetime.fromisoformat(run[\\\"updated_at\\\"].replace(\\\"Z\\\", \\\"+00:00\\\"))\\n duration = (updated - started).total_seconds()\\n if duration \\u003e 0:\\n durations.append(duration)\\n \\n avg_duration = sum(durations) / len(durations) if durations else 0.0\\n \\n return {\\n \\\"workflow_name\\\": workflow.get(\\\"name\\\", \\\"\\\"),\\n \\\"workflow_path\\\": workflow.get(\\\"path\\\", \\\"\\\"),\\n \\\"workflow_id\\\": workflow.get(\\\"id\\\", 0),\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate_percent\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2),\\n \\\"data_collection_date\\\": datetime.now(timezone.utc).isoformat(),\\n \\\"period_days\\\": period_days\\n }\\n\\n\\ndef collect_statistics(owner: str, repo: str, period_days: int) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Collect workflow statistics for the repository.\\\"\\\"\\\"\\n since_date = (datetime.now(timezone.utc) - timedelta(days=period_days)).isoformat()\\n \\n # Get all workflows\\n all_workflows = get_workflows(owner, repo)\\n \\n # Filter for agentic workflows\\n agentic_workflows = filter_agentic_workflows(all_workflows)\\n \\n # Collect stats for each workflow\\n workflow_stats = []\\n for workflow in agentic_workflows:\\n print(f\\\"Processing: {workflow.get('name')} ({workflow.get('path')})\\\", file=sys.stderr)\\n runs = get_workflow_runs(owner, repo, workflow[\\\"id\\\"], since_date)\\n stats = calculate_workflow_stats(workflow, runs, period_days)\\n workflow_stats.append(stats)\\n \\n # Calculate summary\\n total_runs = sum(w[\\\"total_runs\\\"] for w in workflow_stats)\\n total_successful = sum(w[\\\"successful_runs\\\"] for w in workflow_stats)\\n total_failed = sum(w[\\\"failed_runs\\\"] for w in workflow_stats)\\n overall_success_rate = (total_successful / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n return {\\n \\\"summary\\\": {\\n \\\"total_workflows\\\": len(workflow_stats),\\n \\\"total_runs\\\": total_runs,\\n \\\"total_successful\\\": total_successful,\\n \\\"total_failed\\\": total_failed,\\n \\\"overall_success_rate_percent\\\": round(overall_success_rate, 2),\\n \\\"collection_timestamp\\\": datetime.now(timezone.utc).isoformat(),\\n \\\"period_days\\\": period_days\\n },\\n \\\"workflows\\\": workflow_stats\\n }\\n\\n\\ndef main():\\n \\\"\\\"\\\"Main entry point.\\\"\\\"\\\"\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n period_days = 14\\n output_file = \\\"/home/runner/work/gh-aw/gh-aw/workflow_runs.json\\\"\\n \\n print(f\\\"Collecting workflow statistics for {owner}/{repo}\\\", file=sys.stderr)\\n print(f\\\"Period: past {period_da
agent
Line 37616: Error fetching runs for workflow {workflow_id}: {e}\", file=sys.stderr)\n return []\n except json.JSONDecodeError as e:\n print(f\"Warning: Error parsing JSON for workflow {workflow_id}: {e}\", file=sys.stderr)\n return []\n\n\ndef filter_runs_by_date(runs: List[Dict[str, Any]], start_date: datetime, end_date: datetime) -> List[Dict[str, Any]]:\n \"\"\"Filter runs within the specified date range.\"\"\"\n filtered = []\n for run in runs:\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\n if start_date <= created_at <= end_date:\n filtered.append(run)\n return filtered\n\n\ndef calculate_statistics(runs: List[Dict[str, Any]]) -> Dict[str, Any]:\n \"\"\"Calculate statistics for workflow runs.\"\"\"\n total_runs = len(runs)\n \n if total_runs == 0:\n return {\n \"total_runs\": 0,\n \"successful_runs\": 0,\n \"failed_runs\": 0,\n \"success_rate\": 0.0,\n \"average_duration_seconds\": 0.0\n }\n \n successful_runs = sum(1 for r in runs if r.get('conclusion') == 'success')\n failed_runs = sum(1 for r in runs if r.get('conclusion') == 'failure')\n success_rate = (successful_runs / total_runs * 100) if total_runs > 0 else 0.0\n \n # Calculate average duration (durationMs is in milliseconds)\n durations = [r.get('durationMs', 0) for r in runs if r.get('durationMs') is not None]\n avg_duration_ms = sum(durations) / len(durations) if durations else 0\n avg_duration_seconds = avg_duration_ms / 1000\n \n return {\n \"total_runs\": total_runs,\n \"successful_runs\": successful_runs,\n \"failed_runs\": failed_runs,\n \"success_rate\": round(success_rate, 2),\n \"average_duration_seconds\": round(avg_duration_seconds, 2)\n }\n\n\ndef get_date_range(runs: List[Dict[str, Any]]) -> Dict[str, str]:\n \"\"\"Get the actual date range of runs.\"\"\"\n if not runs:\n return {\"start\": None, \"end\": None}\n \n dates = [datetime.fromisoformat(r['createdAt'].replace('Z', '+00:00')) for r in runs]\n return {\n \"start\": min(dates).isoformat(),\n \"end\": max(dates).isoformat()\n }\n\n\ndef main():\n owner = \"githubnext\"\n repo = \"gh-aw\"\n period_days = 14\n \n # Calculate date range\n end_date = datetime.now(timezone.utc)\n start_date = end_date - timedelta(days=period_days)\n \n print(f\"Fetching workflows from {owner}/{repo}...\", file=sys.stderr)\n all_workflows = get_all_workflows(owner, repo)\n print(f\"Total workflows found: {len(all_workflows)}\", file=sys.stderr)\n \n agentic_workflows = filter_agentic_workflows(all_workflows)\n print(f\"Agentic workflows (.lock.yml): {len(agentic_workflows)}\", file=sys.stderr)\n \n workflow_stats = []\n total_runs_analyzed = 0\n \n for idx, workflow in enumerate(agentic_workflows, 1):\n workflow_id = workflow['id']\n workflow_name = workflow['name']\n workflow_path = workflow['path']\n \n print(f\"Processing {idx}/{len(agentic_workflows)}: {workflow_name} (ID: {workflow_id})...\", file=sys.stderr)\n \n # Get runs for this workflow\n runs = get_workflow_runs(owner, repo, workflow_id, limit=50)\n \n # Filter runs by date\n filtered_runs = filter_runs_by_date(runs, start_date, end_date)\n \n if not filtered_runs:\n print(f\" No runs in the past {period_days} days\", file=sys.stderr)\n continue\n \n # Calculate statistics\n stats = calculate_statistics(filtered_runs)\n date_range = get_date_range(filtered_runs)\n \n total_runs_analyzed += stats['total_runs']\n \n workflow_stats.append({\n \"workflow_id\": workflow_id,\n \"workflow_name\": workflow_name,\n \"workflow_path\": workflow_path,\n \"total_runs\": stats['total_runs'],\n \"successf
agent
Line 37485: Error fetching runs for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n except json.JSONDecodeError as e:\\n print(f\\\"Warning: Error parsing JSON for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n\\n\\ndef filter_runs_by_date(runs: List[Dict[str, Any]], start_date: datetime, end_date: datetime) -\\u003e List[Dict[str, Any]]:\\n \\\"\\\"\\\"Filter runs within the specified date range.\\\"\\\"\\\"\\n filtered = []\\n for run in runs:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if start_date \\u003c= created_at \\u003c= end_date:\\n filtered.append(run)\\n return filtered\\n\\n\\ndef calculate_statistics(runs: List[Dict[str, Any]]) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Calculate statistics for workflow runs.\\\"\\\"\\\"\\n total_runs = len(runs)\\n \\n if total_runs == 0:\\n return {\\n \\\"total_runs\\\": 0,\\n \\\"successful_runs\\\": 0,\\n \\\"failed_runs\\\": 0,\\n \\\"success_rate\\\": 0.0,\\n \\\"average_duration_seconds\\\": 0.0\\n }\\n \\n successful_runs = sum(1 for r in runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in runs if r.get('conclusion') == 'failure')\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n # Calculate average duration (durationMs is in milliseconds)\\n durations = [r.get('durationMs', 0) for r in runs if r.get('durationMs') is not None]\\n avg_duration_ms = sum(durations) / len(durations) if durations else 0\\n avg_duration_seconds = avg_duration_ms / 1000\\n \\n return {\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration_seconds, 2)\\n }\\n\\n\\ndef get_date_range(runs: List[Dict[str, Any]]) -\\u003e Dict[str, str]:\\n \\\"\\\"\\\"Get the actual date range of runs.\\\"\\\"\\\"\\n if not runs:\\n return {\\\"start\\\": None, \\\"end\\\": None}\\n \\n dates = [datetime.fromisoformat(r['createdAt'].replace('Z', '+00:00')) for r in runs]\\n return {\\n \\\"start\\\": min(dates).isoformat(),\\n \\\"end\\\": max(dates).isoformat()\\n }\\n\\n\\ndef main():\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n period_days = 14\\n \\n # Calculate date range\\n end_date = datetime.now(timezone.utc)\\n start_date = end_date - timedelta(days=period_days)\\n \\n print(f\\\"Fetching workflows from {owner}/{repo}...\\\", file=sys.stderr)\\n all_workflows = get_all_workflows(owner, repo)\\n print(f\\\"Total workflows found: {len(all_workflows)}\\\", file=sys.stderr)\\n \\n agentic_workflows = filter_agentic_workflows(all_workflows)\\n print(f\\\"Agentic workflows (.lock.yml): {len(agentic_workflows)}\\\", file=sys.stderr)\\n \\n workflow_stats = []\\n total_runs_analyzed = 0\\n \\n for idx, workflow in enumerate(agentic_workflows, 1):\\n workflow_id = workflow['id']\\n workflow_name = workflow['name']\\n workflow_path = workflow['path']\\n \\n print(f\\\"Processing {idx}/{len(agentic_workflows)}: {workflow_name} (ID: {workflow_id})...\\\", file=sys.stderr)\\n \\n # Get runs for this workflow\\n runs = get_workflow_runs(owner, repo, workflow_id, limit=50)\\n \\n # Filter runs by date\\n filtered_runs = filter_runs_by_date(runs, start_date, end_date)\\n \\n if not filtered_runs:\\n print(f\\\" No runs in the past {period_days} days\\\", file=sys.stderr)\\n continue\\n \\n # Calculate statistics\\n stats = calculate_statistics(filtered_runs)\\n date_range = get_date_range(filtered_runs)\\n \\n total_runs_analyzed += stats['tota
agent
Line 35112: Error fetching runs for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n except json.JSONDecodeError as e:\\n print(f\\\"Warning: Error parsing JSON for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n\\n\\ndef filter_runs_by_date(runs: List[Dict[str, Any]], start_date: datetime, end_date: datetime) -\\u003e List[Dict[str, Any]]:\\n \\\"\\\"\\\"Filter runs within the specified date range.\\\"\\\"\\\"\\n filtered = []\\n for run in runs:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if start_date \\u003c= created_at \\u003c= end_date:\\n filtered.append(run)\\n return filtered\\n\\n\\ndef calculate_statistics(runs: List[Dict[str, Any]]) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Calculate statistics for workflow runs.\\\"\\\"\\\"\\n total_runs = len(runs)\\n \\n if total_runs == 0:\\n return {\\n \\\"total_runs\\\": 0,\\n \\\"successful_runs\\\": 0,\\n \\\"failed_runs\\\": 0,\\n \\\"success_rate\\\": 0.0,\\n \\\"average_duration_seconds\\\": 0.0\\n }\\n \\n successful_runs = sum(1 for r in runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in runs if r.get('conclusion') == 'failure')\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n # Calculate average duration (durationMs is in milliseconds)\\n durations = [r.get('durationMs', 0) for r in runs if r.get('durationMs') is not None]\\n avg_duration_ms = sum(durations) / len(durations) if durations else 0\\n avg_duration_seconds = avg_duration_ms / 1000\\n \\n return {\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration_seconds, 2)\\n }\\n\\n\\ndef get_date_range(runs: List[Dict[str, Any]]) -\\u003e Dict[str, str]:\\n \\\"\\\"\\\"Get the actual date range of runs.\\\"\\\"\\\"\\n if not runs:\\n return {\\\"start\\\": None, \\\"end\\\": None}\\n \\n dates = [datetime.fromisoformat(r['createdAt'].replace('Z', '+00:00')) for r in runs]\\n return {\\n \\\"start\\\": min(dates).isoformat(),\\n \\\"end\\\": max(dates).isoformat()\\n }\\n\\n\\ndef main():\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n period_days = 14\\n \\n # Calculate date range\\n end_date = datetime.now(timezone.utc)\\n start_date = end_date - timedelta(days=period_days)\\n \\n print(f\\\"Fetching workflows from {owner}/{repo}...\\\", file=sys.stderr)\\n all_workflows = get_all_workflows(owner, repo)\\n print(f\\\"Total workflows found: {len(all_workflows)}\\\", file=sys.stderr)\\n \\n agentic_workflows = filter_agentic_workflows(all_workflows)\\n print(f\\\"Agentic workflows (.lock.yml): {len(agentic_workflows)}\\\", file=sys.stderr)\\n \\n workflow_stats = []\\n total_runs_analyzed = 0\\n \\n for idx, workflow in enumerate(agentic_workflows, 1):\\n workflow_id = workflow['id']\\n workflow_name = workflow['name']\\n workflow_path = workflow['path']\\n \\n print(f\\\"Processing {idx}/{len(agentic_workflows)}: {workflow_name} (ID: {workflow_id})...\\\", file=sys.stderr)\\n \\n # Get runs for this workflow\\n runs = get_workflow_runs(owner, repo, workflow_id, limit=50)\\n \\n # Filter runs by date\\n filtered_runs = filter_runs_by_date(runs, start_date, end_date)\\n \\n if not filtered_runs:\\n print(f\\\" No runs in the past {period_days} days\\\", file=sys.stderr)\\n continue\\n \\n # Calculate statistics\\n stats = calculate_statistics(filtered_runs)\\n date_range = get_date_range(filtered_runs)\\n \\n total_runs_analyzed += stats['tota
agent
Line 34803: Error fetching runs for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n except json.JSONDecodeError as e:\\n print(f\\\"Warning: Error parsing JSON for workflow {workflow_id}: {e}\\\", file=sys.stderr)\\n return []\\n\\n\\ndef filter_runs_by_date(runs: List[Dict[str, Any]], start_date: datetime, end_date: datetime) -\\u003e List[Dict[str, Any]]:\\n \\\"\\\"\\\"Filter runs within the specified date range.\\\"\\\"\\\"\\n filtered = []\\n for run in runs:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if start_date \\u003c= created_at \\u003c= end_date:\\n filtered.append(run)\\n return filtered\\n\\n\\ndef calculate_statistics(runs: List[Dict[str, Any]]) -\\u003e Dict[str, Any]:\\n \\\"\\\"\\\"Calculate statistics for workflow runs.\\\"\\\"\\\"\\n total_runs = len(runs)\\n \\n if total_runs == 0:\\n return {\\n \\\"total_runs\\\": 0,\\n \\\"successful_runs\\\": 0,\\n \\\"failed_runs\\\": 0,\\n \\\"success_rate\\\": 0.0,\\n \\\"average_duration_seconds\\\": 0.0\\n }\\n \\n successful_runs = sum(1 for r in runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in runs if r.get('conclusion') == 'failure')\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0.0\\n \\n # Calculate average duration (durationMs is in milliseconds)\\n durations = [r.get('durationMs', 0) for r in runs if r.get('durationMs') is not None]\\n avg_duration_ms = sum(durations) / len(durations) if durations else 0\\n avg_duration_seconds = avg_duration_ms / 1000\\n \\n return {\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration_seconds, 2)\\n }\\n\\n\\ndef get_date_range(runs: List[Dict[str, Any]]) -\\u003e Dict[str, str]:\\n \\\"\\\"\\\"Get the actual date range of runs.\\\"\\\"\\\"\\n if not runs:\\n return {\\\"start\\\": None, \\\"end\\\": None}\\n \\n dates = [datetime.fromisoformat(r['createdAt'].replace('Z', '+00:00')) for r in runs]\\n return {\\n \\\"start\\\": min(dates).isoformat(),\\n \\\"end\\\": max(dates).isoformat()\\n }\\n\\n\\ndef main():\\n owner = \\\"githubnext\\\"\\n repo = \\\"gh-aw\\\"\\n period_days = 14\\n \\n # Calculate date range\\n end_date = datetime.now(timezone.utc)\\n start_date = end_date - timedelta(days=period_days)\\n \\n print(f\\\"Fetching workflows from {owner}/{repo}...\\\", file=sys.stderr)\\n all_workflows = get_all_workflows(owner, repo)\\n print(f\\\"Total workflows found: {len(all_workflows)}\\\", file=sys.stderr)\\n \\n agentic_workflows = filter_agentic_workflows(all_workflows)\\n print(f\\\"Agentic workflows (.lock.yml): {len(agentic_workflows)}\\\", file=sys.stderr)\\n \\n workflow_stats = []\\n total_runs_analyzed = 0\\n \\n for idx, workflow in enumerate(agentic_workflows, 1):\\n workflow_id = workflow['id']\\n workflow_name = workflow['name']\\n workflow_path = workflow['path']\\n \\n print(f\\\"Processing {idx}/{len(agentic_workflows)}: {workflow_name} (ID: {workflow_id})...\\\", file=sys.stderr)\\n \\n # Get runs for this workflow\\n runs = get_workflow_runs(owner, repo, workflow_id, limit=50)\\n \\n # Filter runs by date\\n filtered_runs = filter_runs_by_date(runs, start_date, end_date)\\n \\n if not filtered_runs:\\n print(f\\\" No runs in the past {period_days} days\\\", file=sys.stderr)\\n continue\\n \\n # Calculate statistics\\n stats = calculate_statistics(filtered_runs)\\n date_range = get_date_range(filtered_runs)\\n \\n total_runs_analyzed += stats['tota
agent
Line 17358: Failed to fetch runs for {workflow_name}\\\", file=sys.stderr)\\n continue\\n \\n # Calculate stats\\n stats = calculate_stats(runs, start_date)\\n \\n workflow_stats.append({\\n 'workflow_name': workflow_name,\\n 'workflow_path': workflow_path,\\n 'workflow_id': workflow_id,\\n **stats\\n })\\n \\n print(f\\\" Total: {stats['total_runs']}, Success: {stats['successful_runs']}, \\\"\\n f\\\"Failed: {stats['failed_runs']}, Rate: {stats['success_rate']}%\\\", file=sys.stderr)\\n \\n # Build final output\\n output = {\\n 'metadata': {\\n 'owner': 'githubnext',\\n 'repo': 'gh-aw',\\n 'data_collection_date': end_date.isoformat(),\\n 'period_days': 14,\\n 'start_date': start_date.isoformat(),\\n 'end_date': end_date.isoformat(),\\n 'workflow_count': len(workflow_stats)\\n },\\n 'workflows': sorted(workflow_stats, key=lambda x: x['total_runs'], reverse=True)\\n }\\n \\n # Write to file\\n output_path = '/tmp/gh-aw/agent/workflow_runs.json'\\n with open(output_path, 'w') as f:\\n json.dump(output, f, indent=2)\\n \\n print(f\\\"\\\\n✓ Statistics saved to {output_path}\\\", file=sys.stderr)\\n print(f\\\"✓ Collected data for {len(workflow_stats)} workflows\\\", file=sys.stderr)\\n\\nif __name__ == '__main__':\\n main()\\nSCRIPT_EOF\\ncat /tmp/collect_stats.py\",\"description\":\"Create Python script to collect workflow statistics\"}", (Pattern: Generic WARNING messages, Raw log: "arguments": "{\"command\":\"cat \\u003e /tmp/collect_stats.py \\u003c\\u003c 'SCRIPT_EOF'\\nimport json\\nimport subpro...)
agent
Line 14713: Failed to fetch runs for workflow {workflow_id}\\\", file=sys.stderr)\\n continue\\n \\n # Filter runs from the past 14 days\\n filtered_runs = []\\n for run in runs_data:\\n try:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if created_at \\u003e= start_date:\\n filtered_runs.append(run)\\n except (KeyError, ValueError) as e:\\n continue\\n \\n # Calculate statistics\\n total_runs = len(filtered_runs)\\n successful_runs = sum(1 for r in filtered_runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in filtered_runs if r.get('conclusion') == 'failure')\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs \\u003e 0 else 0\\n \\n # Calculate average duration (in seconds)\\n durations = [r.get('durationMs', 0) for r in filtered_runs if r.get('durationMs')]\\n avg_duration = (sum(durations) / len(durations) / 1000) if durations else 0\\n \\n results.append({\\n \\\"workflow_name\\\": workflow_name,\\n \\\"workflow_path\\\": workflow_path,\\n \\\"workflow_id\\\": workflow_id,\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2)\\n })\\n \\n print(f\\\" Runs: {total_runs}, Success: {successful_runs}, Failed: {failed_runs}, \\\"\\n f\\\"Success Rate: {success_rate:.2f}%\\\", file=sys.stderr)\\n \\n # Sort by total runs (descending)\\n results.sort(key=lambda x: x['total_runs'], reverse=True)\\n \\n # Create final output\\n output = {\\n \\\"metadata\\\": {\\n \\\"owner\\\": owner,\\n \\\"repo\\\": repo,\\n \\\"data_collection_date\\\": end_date.isoformat() + \\\"Z\\\",\\n \\\"period_days\\\": 14,\\n \\\"start_date\\\": start_date.isoformat() + \\\"Z\\\",\\n \\\"end_date\\\": end_date.isoformat() + \\\"Z\\\",\\n \\\"workflow_count\\\": len(results)\\n },\\n \\\"workflows\\\": results\\n }\\n \\n # Save to file\\n output_path = \\\"/tmp/gh-aw/agent/workflow_runs.json\\\"\\n with open(output_path, 'w') as f:\\n json.dump(output, f, indent=2)\\n \\n print(f\\\"\\\\nStatistics saved to: {output_path}\\\", file=sys.stderr)\\n print(f\\\"Total workflows analyzed: {len(results)}\\\", file=sys.stderr)\\n print(f\\\"Total runs across all workflows: {sum(w['total_runs'] for w in results)}\\\", file=sys.stderr)\\n\\nif __name__ == \\\"__main__\\\":\\n main()\\nEOF\\npython3 /tmp/collect_stats.py\\n\",\"description\":\"Create and run Python script to collect workflow statistics\",\"initial_wait\":120,\"mode\":\"sync\"}", (Pattern: Generic WARNING messages, Raw log: "arguments": "{\"command\":\"cat \\u003e /tmp/collect_stats.py \\u003c\\u003c 'EOF'\\nimport json\\nimport subprocess\\n...)
agent
Line 10647: Could not fetch runs for {workflow_name}\\\", file=sys.stderr)\\n continue\\n \\n # Filter runs from the past 14 days\\n relevant_runs = []\\n for run in runs_result:\\n try:\\n created_at = datetime.fromisoformat(run['createdAt'].replace('Z', '+00:00'))\\n if created_at >= start_date.replace(tzinfo=created_at.tzinfo):\\n relevant_runs.append(run)\\n except (KeyError, ValueError) as e:\\n print(f\\\" Warning: Could not parse run timestamp: {e}\\\", file=sys.stderr)\\n continue\\n \\n if not relevant_runs:\\n print(f\\\" No runs found in the past {DAYS} days for {workflow_name}\\\", file=sys.stderr)\\n continue\\n \\n # Calculate statistics\\n total_runs = len(relevant_runs)\\n successful_runs = sum(1 for r in relevant_runs if r.get('conclusion') == 'success')\\n failed_runs = sum(1 for r in relevant_runs if r.get('conclusion') == 'failure')\\n \\n # Calculate success rate\\n success_rate = (successful_runs / total_runs * 100) if total_runs > 0 else 0\\n \\n # Calculate average duration (in seconds)\\n durations = []\\n for run in relevant_runs:\\n duration = run.get('duration')\\n if duration is not None:\\n durations.append(duration)\\n \\n avg_duration = sum(durations) / len(durations) if durations else 0\\n \\n workflow_data = {\\n \\\"workflow_name\\\": workflow_name,\\n \\\"workflow_path\\\": workflow_path,\\n \\\"workflow_id\\\": workflow_id,\\n \\\"total_runs\\\": total_runs,\\n \\\"successful_runs\\\": successful_runs,\\n \\\"failed_runs\\\": failed_runs,\\n \\\"success_rate\\\": round(success_rate, 2),\\n \\\"average_duration_seconds\\\": round(avg_duration, 2),\\n \\\"data_collection_period_days\\\": DAYS,\\n \\\"start_date\\\": start_date_iso,\\n \\\"end_date\\\": end_date.isoformat() + \\\"Z\\\"\\n }\\n \\n results.append(workflow_data)\\n \\n print(f\\\" Total: {total_runs}, Success: {successful_runs}, Failed: {failed_runs}, Success Rate: {success_rate:.2f}%\\\", file=sys.stderr)\\n\\n# Create output directory\\noutput_dir = \\\"/tmp/gh-aw/agent\\\"\\nos.makedirs(output_dir, exist_ok=True)\\n\\n# Write results to JSON file\\noutput_file = os.path.join(output_dir, \\\"workflow_runs.json\\\")\\nwith open(output_file, 'w') as f:\\n json.dump({\\n \\\"metadata\\\": {\\n \\\"owner\\\": OWNER,\\n \\\"repo\\\": REPO,\\n \\\"data_collection_date\\\": datetime.utcnow().isoformat() + \\\"Z\\\",\\n \\\"period_days\\\": DAYS,\\n \\\"start_date\\\": start_date_iso,\\n \\\"end_date\\\": end_date.isoformat() + \\\"Z\\\",\\n \\\"workflow_count\\\": len(results)\\n },\\n \\\"workflows\\\": results\\n }, f, indent=2)\\n\\nprint(f\\\"\\\\nResults saved to {output_file}\\\", file=sys.stderr)\\nprint(f\\\"Total workflows analyzed: {len(results)}\\\", file=sys.stderr)\\n\\n# Print summary\\nprint(f\\\"\\\\nSummary saved to {output_file}\\\")\\nEOF\\nchmod +x /tmp/collect_workflow_data.py\\npython3 /tmp/collect_workflow_data.py\\n\",\"description\":\"Collect workflow run statistics for the past 14 days\",\"initial_wait\":60}", (Pattern: Generic WARNING messages, Raw log: "arguments": "{\"command\":\"cat > /tmp/collect_workflow_data.py << 'EOF'\\n#!/usr/bin/env python3\\nimport json\\nimpor...)
agent
Failed to process file /tmp/gh-aw/sandbox/firewall/logs/cache.log: EACCES: permission denied, open '/tmp/gh-aw/sandbox/firewall/logs/cache.log'
agent
Failed to process file /tmp/gh-aw/sandbox/firewall/logs/access.log: EACCES: permission denied, open '/tmp/gh-aw/sandbox/firewall/logs/access.log'
agent
Optional runtime import file not found: .github/shared-instructions.md
agent
GitHub MCP lockdown mode enabled for public repository. This prevents the GitHub token from accessing private repositories.
pre_activation
Successfully copied 29 safe-outputs files to /tmp/gh-aw/safeoutputs
pre_activation
Copying safe-outputs files to /tmp/gh-aw/safeoutputs
pre_activation
Successfully copied 16 safe-inputs files to /tmp/gh-aw/safe-inputs
pre_activation
Copying safe-inputs files to /tmp/gh-aw/safe-inputs
pre_activation
Successfully copied 5 prompt files to /tmp/gh-aw/prompts
pre_activation
Copying prompt markdown files to /tmp/gh-aw/prompts
pre_activation
Successfully copied 273 files to /tmp/gh-aw/actions
pre_activation
Found 263 files in /home/runner/work/gh-aw/gh-aw/actions/setup/js
pre_activation
Created directory: /tmp/gh-aw/actions
pre_activation
Copying activation files to /tmp/gh-aw/actions
activation
Successfully copied 29 safe-outputs files to /tmp/gh-aw/safeoutputs
activation
Copying safe-outputs files to /tmp/gh-aw/safeoutputs
activation
Successfully copied 16 safe-inputs files to /tmp/gh-aw/safe-inputs
activation
Copying safe-inputs files to /tmp/gh-aw/safe-inputs
activation
Successfully copied 5 prompt files to /tmp/gh-aw/prompts
activation
Copying prompt markdown files to /tmp/gh-aw/prompts
activation
Successfully copied 273 files to /tmp/gh-aw/actions
activation
Found 263 files in /home/runner/work/gh-aw/gh-aw/actions/setup/js
activation
Created directory: /tmp/gh-aw/actions
activation
Copying activation files to /tmp/gh-aw/actions
agent
Successfully copied 29 safe-outputs files to /tmp/gh-aw/safeoutputs
agent
Copying safe-outputs files to /tmp/gh-aw/safeoutputs
agent
Successfully copied 16 safe-inputs files to /tmp/gh-aw/safe-inputs
agent
Copying safe-inputs files to /tmp/gh-aw/safe-inputs
agent
Successfully copied 5 prompt files to /tmp/gh-aw/prompts
agent
Copying prompt markdown files to /tmp/gh-aw/prompts
agent
Successfully copied 273 files to /tmp/gh-aw/actions
agent
Found 263 files in /home/runner/work/gh-aw/gh-aw/actions/setup/js
agent
Created directory: /tmp/gh-aw/actions
agent
Copying activation files to /tmp/gh-aw/actions
conclusion
Successfully copied 29 safe-outputs files to /tmp/gh-aw/safeoutputs
conclusion
Copying safe-outputs files to /tmp/gh-aw/safeoutputs
conclusion
Successfully copied 16 safe-inputs files to /tmp/gh-aw/safe-inputs
conclusion
Copying safe-inputs files to /tmp/gh-aw/safe-inputs
conclusion
Successfully copied 5 prompt files to /tmp/gh-aw/prompts
conclusion
Copying prompt markdown files to /tmp/gh-aw/prompts
conclusion
Successfully copied 273 files to /tmp/gh-aw/actions
conclusion
Found 263 files in /home/runner/work/gh-aw/gh-aw/actions/setup/js
conclusion
Created directory: /tmp/gh-aw/actions
conclusion
Copying activation files to /tmp/gh-aw/actions

Artifacts

Produced during runtime
Name Size Digest
agent-stdio.log
11.7 KB
sha256:26e37da37b37e67a47d45ec46f216468bef6af3339087fd52485f8f6ce5c302c
aw-info
565 Bytes
sha256:540456b1702f1d28c0e6e152ed819669c66f8dfb90dacd3a8cb3c4c9c572a104
mcp-logs
2.88 KB
sha256:dde30d23cf539643592cafcdd2c65895d1f99a81a950d7e065f7e4197c7473c0
prompt
8 KB
sha256:6fc905effc57296d31153cb0df320034ea390c99a045a3d213a8f0cb0438074f
safe-output
440 Bytes
sha256:c07214e16cc4d72a2a6b1bfcf7fe2dd4e550372da33de2cba4f9d7922c3dbeae