diff --git a/.claude/skills/pm-architect/SKILL.md b/.claude/skills/pm-architect/SKILL.md index 240293635..6b5480872 100644 --- a/.claude/skills/pm-architect/SKILL.md +++ b/.claude/skills/pm-architect/SKILL.md @@ -1,6 +1,8 @@ --- name: pm-architect description: Expert project manager orchestrating backlog-curator, work-delegator, workstream-coordinator, and roadmap-strategist sub-skills. Coordinates complex software projects through delegation and strategic oversight. Activates when managing projects, coordinating work, or tracking overall progress. +explicit_triggers: + - /top5 --- # PM Architect Skill (Orchestrator) @@ -18,6 +20,8 @@ Activate when the user: - Wants to organize multiple projects or features - Needs help with project planning or execution - Says "I'm losing track" or "What should I work on?" +- Asks "What are the top priorities?" or invokes `/top5` +- Wants a quick daily standup or status overview ## Sub-Skills @@ -69,6 +73,16 @@ Sequential: work-delegator creates package, then workstream-coordinator tracks i Create .pm/ structure, invoke roadmap-strategist for roadmap generation. +### Pattern 5: Top 5 Priorities (`/top5`) + +Run `scripts/generate_top5.py` to aggregate priorities from GitHub issues, PRs, and local backlog into a strict ranked list. Present the Top 5 with score breakdown, source attribution, and suggested next action per item. + +Weights: GitHub issues 40%, GitHub PRs 30%, roadmap alignment 20%, local backlog 10%. + +### Pattern 6: Daily Standup + +Run `scripts/generate_daily_status.py` to produce a cross-project status report. Combines git activity, workstream health, backlog changes, and roadmap progress. + ## Philosophy Alignment - **Ruthless Simplicity**: Thin orchestrator (< 200 lines), complexity in sub-skills @@ -77,7 +91,12 @@ Create .pm/ structure, invoke roadmap-strategist for roadmap generation. ## Scripts -Orchestrator owns `scripts/manage_state.py` for basic operations. +Orchestrator owns these scripts: +- `scripts/manage_state.py` — Basic .pm/ state operations (init, add, update, list) +- `scripts/generate_top5.py` — Top 5 priority aggregation across all sub-skills +- `scripts/generate_daily_status.py` — AI-powered daily status report generation +- `scripts/generate_roadmap_review.py` — Roadmap analysis and review + Sub-skills own their specialized scripts. ## Success Criteria diff --git a/.claude/skills/pm-architect/scripts/generate_top5.py b/.claude/skills/pm-architect/scripts/generate_top5.py new file mode 100644 index 000000000..829d3b0b1 --- /dev/null +++ b/.claude/skills/pm-architect/scripts/generate_top5.py @@ -0,0 +1,584 @@ +#!/usr/bin/env python3 +"""Aggregate priorities across GitHub accounts into a strict Top 5 ranked list. + +Queries GitHub issues and PRs across configured accounts/repos, scores them +by priority labels, staleness, blocking status, and roadmap alignment. + +Falls back to .pm/ YAML state if GitHub is unavailable or for enrichment. + +Usage: + python generate_top5.py [--project-root PATH] [--sources PATH] + +Returns JSON with top 5 priorities. +""" + +import argparse +import json +import subprocess +import sys +from datetime import UTC, datetime +from pathlib import Path +from typing import Any + +import yaml + + +# Aggregation weights +WEIGHT_ISSUES = 0.40 +WEIGHT_PRS = 0.30 +WEIGHT_ROADMAP = 0.20 +WEIGHT_LOCAL = 0.10 # .pm/ overrides + +TOP_N = 5 + +# Label-to-priority mapping +PRIORITY_LABELS = { + "critical": 1.0, + "priority:critical": 1.0, + "high": 0.9, + "priority:high": 0.9, + "bug": 0.8, + "medium": 0.6, + "priority:medium": 0.6, + "enhancement": 0.5, + "feature": 0.5, + "low": 0.3, + "priority:low": 0.3, +} + + +def load_yaml(path: Path) -> dict[str, Any]: + """Load YAML file safely.""" + if not path.exists(): + return {} + with open(path) as f: + return yaml.safe_load(f) or {} + + +def load_sources(sources_path: Path) -> list[dict]: + """Load GitHub source configuration.""" + data = load_yaml(sources_path) + return data.get("github", []) + + +def run_gh(args: list[str], account: str | None = None) -> str | None: + """Run a gh CLI command, optionally switching account first. + + Returns stdout on success, None on failure. + """ + if account: + switch = subprocess.run( + ["gh", "auth", "switch", "--user", account], + capture_output=True, text=True, timeout=10, + ) + if switch.returncode != 0: + return None + + try: + result = subprocess.run( + ["gh"] + args, + capture_output=True, text=True, timeout=30, + ) + if result.returncode != 0: + return None + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + return None + + +def get_current_gh_account() -> str | None: + """Get the currently active gh account.""" + try: + result = subprocess.run( + ["gh", "api", "user", "--jq", ".login"], + capture_output=True, text=True, timeout=10, + ) + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + return None + + +def fetch_github_issues(account: str, repos: list[str]) -> list[dict]: + """Fetch open issues for an account's repos from GitHub.""" + candidates = [] + + # Use search API to get all issues at once + repo_qualifiers = " ".join(f"repo:{r}" if "/" in r else f"repo:{account}/{r}" for r in repos) + query = f"is:open is:issue {repo_qualifiers}" + + jq_filter = ( + '.items[] | {' + 'repo: (.repository_url | split("/") | .[-2:] | join("/")),' + 'title: .title,' + 'labels: [.labels[].name],' + 'created: .created_at,' + 'updated: .updated_at,' + 'number: .number,' + 'comments: .comments' + '}' + ) + + output = run_gh( + ["api", "search/issues", "--method", "GET", + "-f", f"q={query}", "-f", "per_page=50", + "--jq", jq_filter], + account=account, + ) + + if not output: + return [] + + now = datetime.now(UTC) + + for line in output.strip().splitlines(): + try: + item = json.loads(line) + except json.JSONDecodeError: + continue + + # Score by labels + labels = [lbl.lower() for lbl in item.get("labels", [])] + priority_score = 0.5 # default + for label in labels: + if label in PRIORITY_LABELS: + priority_score = max(priority_score, PRIORITY_LABELS[label]) + + # Staleness boost: older updated = needs attention + try: + updated = datetime.fromisoformat(item["updated"].replace("Z", "+00:00")) + days_stale = (now - updated).total_seconds() / 86400 + except (ValueError, KeyError): + days_stale = 0 + + staleness_score = min(days_stale / 14.0, 1.0) # Max at 2 weeks + + # Comment activity: more comments = more discussion = potentially blocked + comments = item.get("comments", 0) + activity_score = min(comments / 10.0, 1.0) + + raw_score = (priority_score * 0.50 + staleness_score * 0.30 + activity_score * 0.20) * 100 + + # Rationale + reasons = [] + if priority_score >= 0.8: + reasons.append(f"labeled {', '.join(lbl for lbl in labels if lbl in PRIORITY_LABELS)}") + if days_stale > 7: + reasons.append(f"stale {days_stale:.0f}d") + if comments > 3: + reasons.append(f"{comments} comments") + if not reasons: + reasons.append("open issue") + + repo = item.get("repo", "") + candidates.append({ + "title": item["title"], + "source": "github_issue", + "raw_score": round(raw_score, 1), + "score_breakdown": { + "label_priority": round(priority_score, 2), + "staleness": round(staleness_score, 2), + "activity": round(activity_score, 2), + }, + "rationale": ", ".join(reasons), + "item_id": f"{repo}#{item['number']}", + "priority": "HIGH" if priority_score >= 0.8 else "MEDIUM" if priority_score >= 0.5 else "LOW", + "repo": repo, + "account": account, + "url": f"https://github.com/{repo}/issues/{item['number']}", + "labels": item.get("labels", []), + "created": item.get("created", ""), + "updated": item.get("updated", ""), + "days_stale": round(days_stale, 1), + "comments": comments, + }) + + return candidates + + +def fetch_github_prs(account: str, repos: list[str]) -> list[dict]: + """Fetch open PRs for an account's repos from GitHub.""" + candidates = [] + + repo_qualifiers = " ".join(f"repo:{r}" if "/" in r else f"repo:{account}/{r}" for r in repos) + query = f"is:open is:pr {repo_qualifiers}" + + jq_filter = ( + '.items[] | {' + 'repo: (.repository_url | split("/") | .[-2:] | join("/")),' + 'title: .title,' + 'labels: [.labels[].name],' + 'created: .created_at,' + 'updated: .updated_at,' + 'number: .number,' + 'draft: .draft,' + 'comments: .comments' + '}' + ) + + output = run_gh( + ["api", "search/issues", "--method", "GET", + "-f", f"q={query}", "-f", "per_page=50", + "--jq", jq_filter], + account=account, + ) + + if not output: + return [] + + now = datetime.now(UTC) + + for line in output.strip().splitlines(): + try: + item = json.loads(line) + except json.JSONDecodeError: + continue + + is_draft = item.get("draft", False) + + # PRs waiting for review are higher priority than drafts + base_score = 0.4 if is_draft else 0.7 + + # Labels boost + labels = [lbl.lower() for lbl in item.get("labels", [])] + for label in labels: + if label in PRIORITY_LABELS: + base_score = max(base_score, PRIORITY_LABELS[label]) + + # Staleness: PRs waiting for review get more urgent over time + try: + updated = datetime.fromisoformat(item["updated"].replace("Z", "+00:00")) + days_stale = (now - updated).total_seconds() / 86400 + except (ValueError, KeyError): + days_stale = 0 + + staleness_score = min(days_stale / 7.0, 1.0) # PRs stale faster (1 week max) + + raw_score = (base_score * 0.60 + staleness_score * 0.40) * 100 + + reasons = [] + if is_draft: + reasons.append("draft PR") + else: + reasons.append("awaiting review") + if days_stale > 3: + reasons.append(f"stale {days_stale:.0f}d") + if labels: + relevant = [lbl for lbl in labels if lbl in PRIORITY_LABELS] + if relevant: + reasons.append(f"labeled {', '.join(relevant)}") + + repo = item.get("repo", "") + candidates.append({ + "title": item["title"], + "source": "github_pr", + "raw_score": round(raw_score, 1), + "score_breakdown": { + "base_priority": round(base_score, 2), + "staleness": round(staleness_score, 2), + }, + "rationale": ", ".join(reasons), + "item_id": f"{repo}#{item['number']}", + "priority": "HIGH" if base_score >= 0.8 else "MEDIUM", + "repo": repo, + "account": account, + "url": f"https://github.com/{repo}/pull/{item['number']}", + "labels": item.get("labels", []), + "created": item.get("created", ""), + "updated": item.get("updated", ""), + "days_stale": round(days_stale, 1), + "is_draft": is_draft, + }) + + return candidates + + +def load_local_overrides(pm_dir: Path) -> list[dict]: + """Load manually-added items from .pm/backlog for local enrichment.""" + backlog_data = load_yaml(pm_dir / "backlog" / "items.yaml") + items = backlog_data.get("items", []) + ready_items = [item for item in items if item.get("status") == "READY"] + + candidates = [] + priority_map = {"HIGH": 1.0, "MEDIUM": 0.6, "LOW": 0.3} + + for item in ready_items: + priority = item.get("priority", "MEDIUM") + priority_score = priority_map.get(priority, 0.5) + hours = item.get("estimated_hours", 4) + ease_score = 1.0 if hours < 2 else 0.6 if hours <= 6 else 0.3 + + raw_score = (priority_score * 0.60 + ease_score * 0.40) * 100 + + reasons = [] + if priority == "HIGH": + reasons.append("HIGH priority") + if hours < 2: + reasons.append("quick win") + if not reasons: + reasons.append("local backlog item") + + candidates.append({ + "title": item.get("title", item["id"]), + "source": "local", + "raw_score": round(raw_score, 1), + "rationale": ", ".join(reasons), + "item_id": item["id"], + "priority": priority, + }) + + return candidates + + +def extract_roadmap_goals(pm_dir: Path) -> list[str]: + """Extract strategic goals from roadmap markdown.""" + roadmap_path = pm_dir / "roadmap.md" + if not roadmap_path.exists(): + return [] + + text = roadmap_path.read_text() + goals = [] + + for line in text.splitlines(): + line = line.strip() + if line.startswith("## ") or line.startswith("### "): + goals.append(line.lstrip("#").strip()) + elif line.startswith("- "): + goals.append(line.removeprefix("- ").strip()) + elif line.startswith("* "): + goals.append(line.removeprefix("* ").strip()) + + return goals + + +def score_roadmap_alignment(candidate: dict, goals: list[str]) -> float: + """Score how well a candidate aligns with roadmap goals. Returns 0.0-1.0.""" + if not goals: + return 0.5 + + title_lower = candidate["title"].lower() + max_alignment = 0.0 + + for goal in goals: + goal_words = set(goal.lower().split()) + goal_words -= {"the", "a", "an", "and", "or", "to", "for", "in", "of", "is", "with"} + if not goal_words: + continue + + matching = sum(1 for word in goal_words if word in title_lower) + alignment = matching / len(goal_words) if goal_words else 0.0 + max_alignment = max(max_alignment, alignment) + + return min(max_alignment, 1.0) + + +def suggest_action(candidate: dict) -> str: + """Suggest a concrete next action for a candidate.""" + source = candidate["source"] + days_stale = candidate.get("days_stale", 0) + labels = candidate.get("labels", []) + + if source == "github_pr": + if candidate.get("is_draft"): + return "Finish draft or close if abandoned" + if days_stale > 14: + return "Merge, close, or rebase — stale >2 weeks" + if days_stale > 7: + return "Review and merge or request changes" + return "Review PR" + elif source == "github_issue": + if any(lbl in ("critical", "priority:critical") for lbl in labels): + return "Fix immediately — critical severity" + if any(lbl in ("bug",) for lbl in labels): + return "Investigate and fix bug" + if days_stale > 30: + return "Triage: still relevant? Close or reprioritize" + return "Work on issue or delegate" + elif source == "local": + return "Pick up from local backlog" + return "Review" + + +def aggregate_and_rank( + issues: list[dict], + prs: list[dict], + local: list[dict], + goals: list[str], + top_n: int = TOP_N, +) -> tuple[list[dict], list[dict]]: + """Aggregate candidates from all sources and rank by weighted score. + + Returns (top_n items, next 5 near-misses). + """ + scored = [] + + source_weights = { + "github_issue": WEIGHT_ISSUES, + "github_pr": WEIGHT_PRS, + "local": WEIGHT_LOCAL, + } + + all_candidates = issues + prs + local + + for candidate in all_candidates: + source = candidate["source"] + source_weight = source_weights.get(source, 0.25) + raw = candidate["raw_score"] + + alignment = score_roadmap_alignment(candidate, goals) + final_score = (source_weight * raw) + (WEIGHT_ROADMAP * alignment * 100) + + entry = { + "title": candidate["title"], + "source": candidate["source"], + "score": round(final_score, 1), + "raw_score": candidate["raw_score"], + "source_weight": source_weight, + "rationale": candidate["rationale"], + "item_id": candidate.get("item_id", ""), + "priority": candidate.get("priority", "MEDIUM"), + "alignment": round(alignment, 2), + "action": suggest_action(candidate), + } + # Preserve all metadata from the candidate + for key in ("url", "repo", "account", "labels", "created", "updated", + "days_stale", "comments", "is_draft", "score_breakdown"): + if key in candidate: + entry[key] = candidate[key] + + scored.append(entry) + + priority_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2} + scored.sort(key=lambda x: (-x["score"], priority_order.get(x["priority"], 1))) + + top = scored[:top_n] + for i, item in enumerate(top): + item["rank"] = i + 1 + + near_misses = scored[top_n:top_n + 5] + for i, item in enumerate(near_misses): + item["rank"] = top_n + i + 1 + + return top, near_misses + + +def build_repo_summary(all_candidates: list[dict]) -> dict: + """Build a per-repo, per-account summary of open work.""" + repos: dict[str, dict] = {} + accounts: dict[str, dict] = {} + + for c in all_candidates: + repo = c.get("repo", "local") + account = c.get("account", "local") + + if repo not in repos: + repos[repo] = {"issues": 0, "prs": 0, "high_priority": 0} + if account not in accounts: + accounts[account] = {"issues": 0, "prs": 0, "repos": set()} + + if c["source"] == "github_issue": + repos[repo]["issues"] += 1 + accounts[account]["issues"] += 1 + elif c["source"] == "github_pr": + repos[repo]["prs"] += 1 + accounts[account]["prs"] += 1 + + if c.get("priority") == "HIGH": + repos[repo]["high_priority"] += 1 + + accounts[account]["repos"].add(repo) + + # Convert sets to lists for JSON serialization + for a in accounts.values(): + a["repos"] = sorted(a["repos"]) + + # Sort repos by total open items descending + sorted_repos = dict(sorted(repos.items(), key=lambda x: -(x[1]["issues"] + x[1]["prs"]))) + + return {"by_repo": sorted_repos, "by_account": accounts} + + +def generate_top5(project_root: Path, sources_path: Path | None = None) -> dict: + """Generate the Top 5 priority list from GitHub + local state.""" + pm_dir = project_root / ".pm" + + if sources_path is None: + sources_path = pm_dir / "sources.yaml" + + # Load GitHub sources config + sources = load_sources(sources_path) + + # Remember original account to restore after + original_account = get_current_gh_account() + + # Fetch from GitHub + all_issues = [] + all_prs = [] + accounts_queried = [] + + for source in sources: + account = source.get("account", "") + repos = source.get("repos", []) + if not account or not repos: + continue + + accounts_queried.append(account) + all_issues.extend(fetch_github_issues(account, repos)) + all_prs.extend(fetch_github_prs(account, repos)) + + # Restore original account + if original_account and accounts_queried: + run_gh(["auth", "switch", "--user", original_account]) + + # Load local overrides + local = [] + if pm_dir.exists(): + local = load_local_overrides(pm_dir) + + # Load roadmap goals + goals = extract_roadmap_goals(pm_dir) if pm_dir.exists() else [] + + # Aggregate and rank + all_candidates = all_issues + all_prs + local + top5, near_misses = aggregate_and_rank(all_issues, all_prs, local, goals) + summary = build_repo_summary(all_candidates) + + return { + "top5": top5, + "near_misses": near_misses, + "summary": summary, + "sources": { + "github_issues": len(all_issues), + "github_prs": len(all_prs), + "local_items": len(local), + "roadmap_goals": len(goals), + "accounts": accounts_queried, + }, + "total_candidates": len(all_candidates), + } + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Generate Top 5 priorities from GitHub + local state") + parser.add_argument( + "--project-root", type=Path, default=Path.cwd(), help="Project root directory" + ) + parser.add_argument( + "--sources", type=Path, default=None, help="Path to sources.yaml (default: .pm/sources.yaml)" + ) + + args = parser.parse_args() + + try: + result = generate_top5(args.project_root, args.sources) + print(json.dumps(result, indent=2)) + return 0 + except Exception as e: + print(json.dumps({"error": str(e)}), file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml new file mode 100644 index 000000000..1e281b6e1 --- /dev/null +++ b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml @@ -0,0 +1,88 @@ +# Outside-in test for /top5 error handling +# Validates generate_top5.py handles failure modes gracefully: +# invalid sources, missing gh CLI, malformed YAML. + +scenario: + name: "Top 5 Priorities - Error Handling" + description: | + Verifies that generate_top5.py degrades gracefully when GitHub is + unreachable, sources.yaml is malformed, or the project root is invalid. + The script should always return valid JSON, never crash. + type: cli + level: 2 + tags: [cli, error-handling, top5, pm-architect, resilience] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Test 1: Non-existent project root (no .pm/ dir) + - action: launch + target: "python" + args: + - ".claude/skills/pm-architect/scripts/generate_top5.py" + - "--project-root" + - "/tmp/top5-test-nonexistent-path-12345" + description: "Run with non-existent project root" + timeout: 15s + + - action: verify_output + contains: '"top5"' + description: "Still returns valid JSON with top5 key" + + - action: verify_output + contains: '"total_candidates": 0' + description: "Reports zero candidates when no sources available" + + - action: verify_exit_code + expected: 0 + description: "Exits cleanly even with missing project root" + + # Test 2: Malformed sources.yaml + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + echo "not: [valid: yaml: {{{{" > "$TMPDIR/.pm/sources.yaml" && + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with malformed sources.yaml" + timeout: 15s + + # Script should handle YAML parse errors (may return error JSON or empty results) + - action: verify_exit_code + expected_one_of: [0, 1] + description: "Exits with 0 or 1, never crashes with traceback" + + # Test 3: Empty sources.yaml (valid but no accounts) + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + echo "github: []" > "$TMPDIR/.pm/sources.yaml" && + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with empty sources list" + timeout: 15s + + - action: verify_output + contains: '"top5"' + description: "Returns valid JSON with empty top5" + + - action: verify_output + contains: '"total_candidates": 0' + description: "Reports zero candidates" + + - action: verify_exit_code + expected: 0 + description: "Exits cleanly with empty sources" + + cleanup: + - action: stop_application + force: true + description: "Ensure all processes are terminated" diff --git a/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml new file mode 100644 index 000000000..3d763c94b --- /dev/null +++ b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml @@ -0,0 +1,97 @@ +# Outside-in test for /top5 local backlog integration +# Validates generate_top5.py incorporates .pm/backlog items and roadmap goals +# into the priority ranking alongside (or instead of) GitHub data. + +scenario: + name: "Top 5 Priorities - Local Overrides and Roadmap Alignment" + description: | + Verifies that generate_top5.py reads .pm/backlog/items.yaml for local + priorities and .pm/roadmap.md for strategic alignment scoring. + Tests the full aggregation pipeline without requiring GitHub access. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, local, roadmap] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Setup local .pm/ state with backlog items and roadmap + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm/backlog" && + cat > "$TMPDIR/.pm/backlog/items.yaml" << 'BACKLOG' + items: + - id: "local-001" + title: "Fix authentication timeout bug" + status: "READY" + priority: "HIGH" + estimated_hours: 1 + - id: "local-002" + title: "Add dashboard metrics" + status: "READY" + priority: "MEDIUM" + estimated_hours: 4 + - id: "local-003" + title: "Refactor logging module" + status: "IN_PROGRESS" + priority: "LOW" + estimated_hours: 8 + BACKLOG + cat > "$TMPDIR/.pm/roadmap.md" << 'ROADMAP' + ## Q1 Goals + ### Improve authentication reliability + - Fix timeout and retry logic + ### Add observability dashboard + - Metrics and monitoring + ROADMAP + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: [] + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with local backlog items and roadmap goals, no GitHub sources" + timeout: 15s + + # Verify local items appear in output + - action: verify_output + contains: "Fix authentication timeout bug" + timeout: 5s + description: "HIGH priority READY item appears in results" + + - action: verify_output + contains: "Add dashboard metrics" + description: "MEDIUM priority READY item appears in results" + + # IN_PROGRESS items should NOT appear (only READY items are loaded) + - action: verify_output + not_contains: "Refactor logging module" + description: "IN_PROGRESS item is excluded (only READY items loaded)" + + # Verify source attribution + - action: verify_output + contains: '"source": "local"' + description: "Items attributed to local source" + + # Verify roadmap goals were loaded + - action: verify_output + contains: '"roadmap_goals"' + description: "Roadmap goals count present in sources" + + # Verify alignment scoring (auth bug should align with roadmap goal) + - action: verify_output + matches: '"alignment":\\s*[0-9]' + description: "Items have alignment scores" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml new file mode 100644 index 000000000..4fac284f8 --- /dev/null +++ b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml @@ -0,0 +1,104 @@ +# Outside-in test for /top5 ranking correctness +# Validates that output is strictly ranked by score descending, +# limited to 5 items, and each item has a rank field 1-5. + +scenario: + name: "Top 5 Priorities - Ranking and Limit Enforcement" + description: | + Verifies that generate_top5.py returns exactly TOP_N (5) items, + ranked by descending score, with rank fields 1 through 5. + Uses local backlog with >5 items to verify the limit. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, ranking] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Setup: 7 local items to verify only top 5 are returned + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm/backlog" && + cat > "$TMPDIR/.pm/backlog/items.yaml" << 'BACKLOG' + items: + - id: "item-1" + title: "Critical security fix" + status: "READY" + priority: "HIGH" + estimated_hours: 1 + - id: "item-2" + title: "API rate limiting" + status: "READY" + priority: "HIGH" + estimated_hours: 2 + - id: "item-3" + title: "Database migration" + status: "READY" + priority: "MEDIUM" + estimated_hours: 4 + - id: "item-4" + title: "Update documentation" + status: "READY" + priority: "MEDIUM" + estimated_hours: 6 + - id: "item-5" + title: "Add unit tests" + status: "READY" + priority: "MEDIUM" + estimated_hours: 3 + - id: "item-6" + title: "Refactor config loader" + status: "READY" + priority: "LOW" + estimated_hours: 8 + - id: "item-7" + title: "Add logging headers" + status: "READY" + priority: "LOW" + estimated_hours: 10 + BACKLOG + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: [] + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with 7 local items to verify top-5 limit" + timeout: 15s + + # Verify rank fields 1-5 exist + - action: verify_output + contains: '"rank": 1' + timeout: 5s + description: "First ranked item present" + + - action: verify_output + contains: '"rank": 5' + description: "Fifth ranked item present" + + # Verify rank 6 and 7 are NOT in output (limit enforced) + - action: verify_output + not_contains: '"rank": 6' + description: "No sixth rank (limit to 5)" + + - action: verify_output + not_contains: '"rank": 7' + description: "No seventh rank (limit to 5)" + + # Verify total_candidates reflects all 7 items considered + - action: verify_output + contains: '"total_candidates": 7' + description: "Total candidates count includes all 7 items" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml new file mode 100644 index 000000000..d3d8ee24c --- /dev/null +++ b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml @@ -0,0 +1,52 @@ +# Outside-in smoke test for /top5 priority aggregation +# Validates the generate_top5.py CLI produces valid JSON output +# with the expected structure from a user's perspective. + +scenario: + name: "Top 5 Priorities - Smoke Test" + description: | + Verifies that generate_top5.py runs successfully, produces valid JSON, + and contains the expected top-level keys (top5, sources, total_candidates). + Uses an empty project root so no GitHub calls are made. + type: cli + level: 1 + tags: [cli, smoke, top5, pm-architect] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Run with empty project root (no .pm/ dir, no sources.yaml) + - action: launch + target: "python" + args: + - "scripts/generate_top5.py" + - "--project-root" + - "/tmp/top5-test-empty" + working_directory: ".claude/skills/pm-architect" + description: "Run generate_top5.py with empty project root" + timeout: 15s + + # Verify valid JSON output with expected keys + - action: verify_output + contains: '"top5"' + timeout: 5s + description: "Output contains top5 key" + + - action: verify_output + contains: '"sources"' + description: "Output contains sources key" + + - action: verify_output + contains: '"total_candidates"' + description: "Output contains total_candidates key" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly with code 0" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml new file mode 100644 index 000000000..a76e52d4a --- /dev/null +++ b/.claude/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml @@ -0,0 +1,83 @@ +# Outside-in test for /top5 with configured sources +# Validates generate_top5.py queries GitHub when sources.yaml is provided, +# produces ranked output with score breakdown and source attribution. + +scenario: + name: "Top 5 Priorities - GitHub Source Aggregation" + description: | + Verifies that generate_top5.py correctly reads a sources.yaml config, + queries GitHub for issues and PRs, aggregates scores, and returns + a ranked list with proper source attribution and metadata. + Requires gh CLI authenticated with at least one account. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, github] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + - "gh CLI is authenticated" + - "Network access to GitHub API" + + environment: + variables: + GH_PAGER: "" + + steps: + # Setup: create a minimal sources.yaml pointing to a known public repo + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: + - account: rysweet + repos: + - amplihack + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run generate_top5.py with sources pointing to amplihack repo" + timeout: 45s + + # Verify JSON structure + - action: verify_output + contains: '"top5"' + timeout: 5s + description: "Output has top5 array" + + - action: verify_output + contains: '"github_issues"' + description: "Sources breakdown includes github_issues count" + + - action: verify_output + contains: '"github_prs"' + description: "Sources breakdown includes github_prs count" + + - action: verify_output + contains: '"accounts"' + description: "Sources breakdown includes accounts queried" + + # Verify ranked items have required fields + - action: verify_output + matches: '"score":\\s*[0-9]' + description: "Items have numeric scores" + + - action: verify_output + matches: '"source":\\s*"github_(issue|pr)"' + description: "Items have source attribution" + + - action: verify_output + matches: '"rationale":' + description: "Items include rationale text" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/.claude/skills/pm-architect/scripts/tests/conftest.py b/.claude/skills/pm-architect/scripts/tests/conftest.py index 40af58e5a..448aa9983 100644 --- a/.claude/skills/pm-architect/scripts/tests/conftest.py +++ b/.claude/skills/pm-architect/scripts/tests/conftest.py @@ -5,6 +5,7 @@ from unittest.mock import MagicMock import pytest +import yaml @pytest.fixture @@ -131,3 +132,129 @@ def sample_daily_status_output() -> str: 1. Prioritize design review for API refactoring 2. Address technical debt in authentication system """ + + +# --- Top 5 Priority Aggregation Fixtures --- + + +@pytest.fixture +def pm_dir(tmp_path: Path) -> Path: + """Create .pm/ directory structure with sample data.""" + pm = tmp_path / ".pm" + (pm / "backlog").mkdir(parents=True) + (pm / "workstreams").mkdir(parents=True) + (pm / "delegations").mkdir(parents=True) + return pm + + +@pytest.fixture +def sample_backlog_items() -> dict: + """Sample backlog items YAML data.""" + return { + "items": [ + { + "id": "BL-001", + "title": "Fix authentication bug", + "description": "Auth tokens expire prematurely", + "priority": "HIGH", + "estimated_hours": 2, + "status": "READY", + "tags": ["auth", "bug"], + "dependencies": [], + }, + { + "id": "BL-002", + "title": "Implement config parser", + "description": "Parse YAML and JSON config files", + "priority": "MEDIUM", + "estimated_hours": 4, + "status": "READY", + "tags": ["config", "core"], + "dependencies": [], + }, + { + "id": "BL-003", + "title": "Add logging framework", + "description": "Structured logging with JSON output", + "priority": "LOW", + "estimated_hours": 8, + "status": "READY", + "tags": ["infrastructure"], + "dependencies": ["BL-002"], + }, + { + "id": "BL-004", + "title": "Write API documentation", + "description": "Document all REST endpoints", + "priority": "MEDIUM", + "estimated_hours": 3, + "status": "READY", + "tags": ["docs"], + "dependencies": [], + }, + { + "id": "BL-005", + "title": "Database migration tool", + "description": "Automated schema migrations", + "priority": "HIGH", + "estimated_hours": 6, + "status": "READY", + "tags": ["database", "core"], + "dependencies": ["BL-002"], + }, + { + "id": "BL-006", + "title": "Refactor test suite", + "description": "Improve test performance and coverage", + "priority": "MEDIUM", + "estimated_hours": 1, + "status": "IN_PROGRESS", + "tags": ["test"], + "dependencies": [], + }, + ] + } + + +@pytest.fixture +def populated_pm(pm_dir, sample_backlog_items): + """Create fully populated .pm/ directory.""" + with open(pm_dir / "backlog" / "items.yaml", "w") as f: + yaml.dump(sample_backlog_items, f) + + ws_data = { + "id": "ws-1", + "backlog_id": "BL-006", + "title": "Test Suite Refactor", + "agent": "builder", + "status": "RUNNING", + "last_activity": "2020-01-01T00:00:00Z", + } + with open(pm_dir / "workstreams" / "ws-1.yaml", "w") as f: + yaml.dump(ws_data, f) + + deleg_data = { + "id": "DEL-001", + "title": "Implement caching layer", + "status": "READY", + "backlog_id": "BL-002", + } + with open(pm_dir / "delegations" / "del-001.yaml", "w") as f: + yaml.dump(deleg_data, f) + + roadmap = """# Project Roadmap + +## Q1 Goals + +### Core Infrastructure +- Implement config parser +- Database migration tool +- Logging framework + +### Quality +- Test coverage above 80% +- API documentation complete +""" + (pm_dir / "roadmap.md").write_text(roadmap) + + return pm_dir diff --git a/.claude/skills/pm-architect/scripts/tests/test_generate_top5.py b/.claude/skills/pm-architect/scripts/tests/test_generate_top5.py new file mode 100644 index 000000000..8928e6a21 --- /dev/null +++ b/.claude/skills/pm-architect/scripts/tests/test_generate_top5.py @@ -0,0 +1,420 @@ +"""Tests for generate_top5.py - GitHub-native priority aggregation.""" + +import json +import sys +from pathlib import Path +from unittest.mock import patch + +import pytest +import yaml + +sys.path.insert(0, str(Path(__file__).parent.parent)) +from generate_top5 import ( + PRIORITY_LABELS, + aggregate_and_rank, + build_repo_summary, + extract_roadmap_goals, + fetch_github_issues, + fetch_github_prs, + generate_top5, + load_local_overrides, + load_sources, + score_roadmap_alignment, + suggest_action, +) + + +class TestLoadSources: + """Tests for sources.yaml loading.""" + + def test_no_sources_file(self, project_root): + """Returns empty list when sources.yaml doesn't exist.""" + result = load_sources(project_root / "sources.yaml") + assert result == [] + + def test_loads_github_sources(self, tmp_path): + """Parses sources.yaml correctly.""" + sources = { + "github": [ + {"account": "rysweet", "repos": ["amplihack", "azlin"]}, + {"account": "rysweet_microsoft", "repos": ["cloud-ecosystem-security/SedanDelivery"]}, + ] + } + path = tmp_path / "sources.yaml" + with open(path, "w") as f: + yaml.dump(sources, f) + + result = load_sources(path) + assert len(result) == 2 + assert result[0]["account"] == "rysweet" + assert result[1]["repos"] == ["cloud-ecosystem-security/SedanDelivery"] + + +class TestFetchGithubIssues: + """Tests for GitHub issue fetching (mocked).""" + + def test_returns_empty_on_gh_failure(self): + """Returns empty list when gh CLI fails.""" + with patch("generate_top5.run_gh", return_value=None): + result = fetch_github_issues("rysweet", ["amplihack"]) + assert result == [] + + def test_parses_issue_data(self): + """Correctly parses gh API JSON output with full metadata.""" + mock_output = json.dumps({ + "repo": "rysweet/amplihack", + "title": "Fix auth bug", + "labels": ["bug", "high"], + "created": "2026-03-01T00:00:00Z", + "updated": "2026-03-07T00:00:00Z", + "number": 123, + "comments": 5, + }) + with patch("generate_top5.run_gh", return_value=mock_output): + result = fetch_github_issues("rysweet", ["amplihack"]) + assert len(result) == 1 + item = result[0] + assert item["source"] == "github_issue" + assert item["priority"] == "HIGH" + assert item["url"] == "https://github.com/rysweet/amplihack/issues/123" + assert item["account"] == "rysweet" + assert item["labels"] == ["bug", "high"] + assert item["comments"] == 5 + assert "score_breakdown" in item + assert "label_priority" in item["score_breakdown"] + + def test_priority_from_labels(self): + """Labels correctly map to priority scores.""" + mock_output = json.dumps({ + "repo": "r/a", "title": "Critical issue", + "labels": ["critical"], "created": "2026-03-07T00:00:00Z", + "updated": "2026-03-07T00:00:00Z", "number": 1, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=mock_output): + result = fetch_github_issues("r", ["a"]) + assert result[0]["priority"] == "HIGH" + assert result[0]["score_breakdown"]["label_priority"] == 1.0 + + def test_staleness_boosts_score(self): + """Older issues score higher due to staleness.""" + fresh = json.dumps({ + "repo": "r/a", "title": "Fresh", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 1, "comments": 0, + }) + stale = json.dumps({ + "repo": "r/a", "title": "Stale", "labels": [], + "created": "2026-01-01T00:00:00Z", "updated": "2026-01-01T00:00:00Z", + "number": 2, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=f"{fresh}\n{stale}"): + result = fetch_github_issues("r", ["a"]) + stale_item = next(c for c in result if "Stale" in c["title"]) + fresh_item = next(c for c in result if "Fresh" in c["title"]) + assert stale_item["raw_score"] > fresh_item["raw_score"] + assert stale_item["days_stale"] > fresh_item["days_stale"] + + +class TestFetchGithubPrs: + """Tests for GitHub PR fetching (mocked).""" + + def test_returns_empty_on_failure(self): + """Returns empty list when gh CLI fails.""" + with patch("generate_top5.run_gh", return_value=None): + result = fetch_github_prs("rysweet", ["amplihack"]) + assert result == [] + + def test_draft_pr_scores_lower(self): + """Draft PRs score lower than non-drafts.""" + draft = json.dumps({ + "repo": "r/a", "title": "Draft PR", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 1, "draft": True, "comments": 0, + }) + ready = json.dumps({ + "repo": "r/a", "title": "Ready PR", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 2, "draft": False, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=f"{draft}\n{ready}"): + result = fetch_github_prs("r", ["a"]) + draft_item = next(c for c in result if "Draft" in c["title"]) + ready_item = next(c for c in result if "Ready" in c["title"]) + assert ready_item["raw_score"] > draft_item["raw_score"] + assert draft_item["is_draft"] is True + assert ready_item["is_draft"] is False + + def test_pr_has_url_and_metadata(self): + """PRs include correct GitHub URL and metadata.""" + mock = json.dumps({ + "repo": "rysweet/amplihack", "title": "Fix stuff", "labels": ["bug"], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 42, "draft": False, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=mock): + result = fetch_github_prs("rysweet", ["amplihack"]) + assert result[0]["url"] == "https://github.com/rysweet/amplihack/pull/42" + assert result[0]["account"] == "rysweet" + assert result[0]["labels"] == ["bug"] + + +class TestLoadLocalOverrides: + """Tests for local .pm/ backlog loading.""" + + def test_no_pm_dir(self, project_root): + """Returns empty when .pm doesn't exist.""" + result = load_local_overrides(project_root / ".pm") + assert result == [] + + def test_loads_ready_items(self, pm_dir): + """Loads READY items from backlog.""" + items = { + "items": [ + {"id": "BL-001", "title": "Task A", "status": "READY", "priority": "HIGH", "estimated_hours": 1}, + {"id": "BL-002", "title": "Task B", "status": "DONE", "priority": "HIGH"}, + ] + } + with open(pm_dir / "backlog" / "items.yaml", "w") as f: + yaml.dump(items, f) + + result = load_local_overrides(pm_dir) + assert len(result) == 1 + assert result[0]["source"] == "local" + assert result[0]["item_id"] == "BL-001" + + +class TestExtractRoadmapGoals: + """Tests for roadmap goal extraction.""" + + def test_no_roadmap(self, project_root): + """Returns empty when no roadmap exists.""" + result = extract_roadmap_goals(project_root / ".pm") + assert result == [] + + def test_extracts_goals(self, populated_pm): + """Extracts goals from roadmap markdown.""" + goals = extract_roadmap_goals(populated_pm) + assert len(goals) > 0 + assert any("config" in g.lower() for g in goals) + + +class TestScoreRoadmapAlignment: + """Tests for roadmap alignment scoring.""" + + def test_no_goals_returns_neutral(self): + assert score_roadmap_alignment({"title": "X", "source": "github_issue"}, []) == 0.5 + + def test_matching_title_scores_high(self): + score = score_roadmap_alignment( + {"title": "Implement config parser", "source": "github_issue"}, + ["config parser implementation"], + ) + assert score > 0.0 + + def test_unrelated_title_scores_zero(self): + score = score_roadmap_alignment( + {"title": "Fix authentication bug", "source": "github_issue"}, + ["database migration tool"], + ) + assert score == 0.0 + + +class TestSuggestAction: + """Tests for action suggestion logic.""" + + def test_critical_issue(self): + action = suggest_action({"source": "github_issue", "labels": ["critical"]}) + assert "immediately" in action.lower() + + def test_bug_issue(self): + action = suggest_action({"source": "github_issue", "labels": ["bug"], "days_stale": 1}) + assert "bug" in action.lower() + + def test_stale_pr(self): + action = suggest_action({"source": "github_pr", "is_draft": False, "days_stale": 20}) + assert "stale" in action.lower() or "merge" in action.lower() + + def test_draft_pr(self): + action = suggest_action({"source": "github_pr", "is_draft": True, "days_stale": 1}) + assert "draft" in action.lower() + + def test_local_item(self): + action = suggest_action({"source": "local"}) + assert "backlog" in action.lower() + + +class TestAggregateAndRank: + """Tests for the core aggregation and ranking logic.""" + + def test_empty_input(self): + top, near = aggregate_and_rank([], [], [], []) + assert top == [] + assert near == [] + + def test_returns_max_5(self): + candidates = [ + {"title": f"Item {i}", "source": "github_issue", "raw_score": float(100 - i), + "rationale": "test", "item_id": f"#{i}", "priority": "MEDIUM"} + for i in range(10) + ] + top, near = aggregate_and_rank(candidates, [], [], []) + assert len(top) == 5 + assert len(near) == 5 + + def test_ranked_in_order(self): + issues = [ + {"title": "Low", "source": "github_issue", "raw_score": 30.0, + "rationale": "test", "item_id": "#1", "priority": "LOW"}, + {"title": "High", "source": "github_issue", "raw_score": 90.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH"}, + ] + top, _ = aggregate_and_rank(issues, [], [], []) + assert top[0]["title"] == "High" + assert top[1]["title"] == "Low" + + def test_mixed_sources(self): + issues = [{"title": "Issue", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH"}] + prs = [{"title": "PR", "source": "github_pr", "raw_score": 80.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH", + "url": "https://github.com/r/a/pull/2", "repo": "r/a"}] + local = [{"title": "Local", "source": "local", "raw_score": 80.0, + "rationale": "test", "item_id": "BL-1", "priority": "MEDIUM"}] + top, _ = aggregate_and_rank(issues, prs, local, []) + assert top[0]["source"] == "github_issue" + assert top[1]["source"] == "github_pr" + assert top[2]["source"] == "local" + + def test_roadmap_alignment_boosts_score(self): + issues = [ + {"title": "Implement config parser", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#1", "priority": "MEDIUM"}, + {"title": "Fix random thing", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#2", "priority": "MEDIUM"}, + ] + top, _ = aggregate_and_rank(issues, [], [], ["config parser implementation"]) + config_item = next(r for r in top if "config" in r["title"].lower()) + other_item = next(r for r in top if "random" in r["title"].lower()) + assert config_item["score"] > other_item["score"] + + def test_tiebreak_by_priority(self): + issues = [ + {"title": "Low priority", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#1", "priority": "LOW"}, + {"title": "High priority", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH"}, + ] + top, _ = aggregate_and_rank(issues, [], [], []) + assert top[0]["priority"] == "HIGH" + assert top[1]["priority"] == "LOW" + + def test_preserves_url_and_repo(self): + prs = [{"title": "PR", "source": "github_pr", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "MEDIUM", + "url": "https://github.com/r/a/pull/1", "repo": "r/a"}] + top, _ = aggregate_and_rank([], prs, [], []) + assert top[0]["url"] == "https://github.com/r/a/pull/1" + assert top[0]["repo"] == "r/a" + + def test_items_have_action(self): + issues = [{"title": "Bug", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH", + "labels": ["bug"], "days_stale": 5}] + top, _ = aggregate_and_rank(issues, [], [], []) + assert "action" in top[0] + assert len(top[0]["action"]) > 0 + + def test_near_misses_returned(self): + candidates = [ + {"title": f"Item {i}", "source": "github_issue", "raw_score": float(100 - i), + "rationale": "test", "item_id": f"#{i}", "priority": "MEDIUM"} + for i in range(8) + ] + top, near = aggregate_and_rank(candidates, [], [], []) + assert len(top) == 5 + assert len(near) == 3 + assert near[0]["rank"] == 6 + + +class TestBuildRepoSummary: + """Tests for per-repo summary generation.""" + + def test_empty_candidates(self): + result = build_repo_summary([]) + assert result["by_repo"] == {} + assert result["by_account"] == {} + + def test_counts_by_repo(self): + candidates = [ + {"source": "github_issue", "repo": "r/a", "account": "x", "priority": "HIGH"}, + {"source": "github_issue", "repo": "r/a", "account": "x", "priority": "MEDIUM"}, + {"source": "github_pr", "repo": "r/a", "account": "x", "priority": "MEDIUM"}, + {"source": "github_issue", "repo": "r/b", "account": "x", "priority": "HIGH"}, + ] + result = build_repo_summary(candidates) + assert result["by_repo"]["r/a"]["issues"] == 2 + assert result["by_repo"]["r/a"]["prs"] == 1 + assert result["by_repo"]["r/a"]["high_priority"] == 1 + assert result["by_repo"]["r/b"]["issues"] == 1 + + def test_counts_by_account(self): + candidates = [ + {"source": "github_issue", "repo": "r/a", "account": "alice", "priority": "HIGH"}, + {"source": "github_pr", "repo": "r/b", "account": "bob", "priority": "MEDIUM"}, + ] + result = build_repo_summary(candidates) + assert result["by_account"]["alice"]["issues"] == 1 + assert result["by_account"]["bob"]["prs"] == 1 + assert "r/a" in result["by_account"]["alice"]["repos"] + + +class TestGenerateTop5: + """Tests for the main generate_top5 function.""" + + def test_no_sources_no_pm(self, project_root): + with patch("generate_top5.get_current_gh_account", return_value="rysweet"): + result = generate_top5(project_root) + assert result["top5"] == [] + assert result["near_misses"] == [] + assert result["total_candidates"] == 0 + + def test_github_failure_falls_back_to_local(self, populated_pm): + sources = {"github": [{"account": "test", "repos": ["test/repo"]}]} + sources_path = populated_pm / "sources.yaml" + with open(sources_path, "w") as f: + yaml.dump(sources, f) + + with patch("generate_top5.run_gh", return_value=None), \ + patch("generate_top5.get_current_gh_account", return_value="test"): + result = generate_top5(populated_pm.parent, sources_path) + assert result["sources"]["local_items"] > 0 + assert result["sources"]["github_issues"] == 0 + + def test_output_has_summary(self): + with patch("generate_top5.get_current_gh_account", return_value="test"), \ + patch("generate_top5.load_sources", return_value=[]): + result = generate_top5(Path("/nonexistent")) + assert "summary" in result + assert "near_misses" in result + + def test_items_have_required_fields(self): + issues = [{"title": "Test", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH", + "url": "https://github.com/r/a/issues/1", "repo": "r/a"}] + top, _ = aggregate_and_rank(issues, [], [], []) + required = {"rank", "title", "source", "score", "rationale", "priority", "action", "alignment"} + for item in top: + assert required.issubset(item.keys()), f"Missing: {required - item.keys()}" + + +class TestPriorityLabels: + """Tests for label-to-priority mapping.""" + + def test_critical_is_highest(self): + assert PRIORITY_LABELS["critical"] == 1.0 + + def test_bug_is_high(self): + assert PRIORITY_LABELS["bug"] == 0.8 + + def test_enhancement_is_medium(self): + assert PRIORITY_LABELS["enhancement"] == 0.5 diff --git a/.gitignore b/.gitignore index ed46056ee..c7914c6f2 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ htmlcov/ **/.metadata/versions.json # Project specific +.pm/ .test_state.json .requirements_extraction_state.json test_requirements*.md diff --git a/amplifier-bundle/skills/pm-architect/SKILL.md b/amplifier-bundle/skills/pm-architect/SKILL.md new file mode 100644 index 000000000..6b5480872 --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/SKILL.md @@ -0,0 +1,104 @@ +--- +name: pm-architect +description: Expert project manager orchestrating backlog-curator, work-delegator, workstream-coordinator, and roadmap-strategist sub-skills. Coordinates complex software projects through delegation and strategic oversight. Activates when managing projects, coordinating work, or tracking overall progress. +explicit_triggers: + - /top5 +--- + +# PM Architect Skill (Orchestrator) + +## Role + +You are the project manager orchestrating four specialized sub-skills to coordinate software development projects. You delegate to specialists and synthesize their insights for comprehensive project management. + +## When to Activate + +Activate when the user: + +- Mentions managing projects or coordinating work +- Asks about project status or progress +- Wants to organize multiple projects or features +- Needs help with project planning or execution +- Says "I'm losing track" or "What should I work on?" +- Asks "What are the top priorities?" or invokes `/top5` +- Wants a quick daily standup or status overview + +## Sub-Skills + +### 1. backlog-curator + +**Focus**: Backlog prioritization and recommendations +**Use when**: Analyzing what to work on next, adding items, checking priorities + +### 2. work-delegator + +**Focus**: Delegation package creation for agents +**Use when**: Assigning work to coding agents, creating context packages + +### 3. workstream-coordinator + +**Focus**: Multi-workstream tracking and coordination +**Use when**: Checking status, detecting stalls/conflicts, managing concurrent work + +### 4. roadmap-strategist + +**Focus**: Strategic planning and goal alignment +**Use when**: Discussing goals, milestones, strategic direction, roadmap updates + +## Core Workflow + +When user requests project management help: + +1. **Understand intent**: Determine which sub-skill(s) to invoke +2. **Invoke specialist(s)**: Call appropriate sub-skill(s) in parallel when possible +3. **Synthesize results**: Combine insights from sub-skills +4. **Present cohesively**: Deliver unified response to user +5. **Recommend actions**: Suggest next steps + +## Orchestration Patterns + +### Pattern 1: What Should I Work On? + +Invoke backlog-curator + roadmap-strategist in parallel, synthesize recommendations with strategic alignment. + +### Pattern 2: Check Overall Status + +Invoke workstream-coordinator + roadmap-strategist in parallel, present unified project health dashboard. + +### Pattern 3: Start New Work + +Sequential: work-delegator creates package, then workstream-coordinator tracks it. + +### Pattern 4: Initialize PM + +Create .pm/ structure, invoke roadmap-strategist for roadmap generation. + +### Pattern 5: Top 5 Priorities (`/top5`) + +Run `scripts/generate_top5.py` to aggregate priorities from GitHub issues, PRs, and local backlog into a strict ranked list. Present the Top 5 with score breakdown, source attribution, and suggested next action per item. + +Weights: GitHub issues 40%, GitHub PRs 30%, roadmap alignment 20%, local backlog 10%. + +### Pattern 6: Daily Standup + +Run `scripts/generate_daily_status.py` to produce a cross-project status report. Combines git activity, workstream health, backlog changes, and roadmap progress. + +## Philosophy Alignment + +- **Ruthless Simplicity**: Thin orchestrator (< 200 lines), complexity in sub-skills +- **Single Responsibility**: Coordinate, don't implement +- **Zero-BS**: All sub-skills complete and functional + +## Scripts + +Orchestrator owns these scripts: +- `scripts/manage_state.py` — Basic .pm/ state operations (init, add, update, list) +- `scripts/generate_top5.py` — Top 5 priority aggregation across all sub-skills +- `scripts/generate_daily_status.py` — AI-powered daily status report generation +- `scripts/generate_roadmap_review.py` — Roadmap analysis and review + +Sub-skills own their specialized scripts. + +## Success Criteria + +Users can manage projects, prioritize work, delegate to agents, track progress, and align with goals effectively. diff --git a/amplifier-bundle/skills/pm-architect/scripts/generate_top5.py b/amplifier-bundle/skills/pm-architect/scripts/generate_top5.py new file mode 100644 index 000000000..829d3b0b1 --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/generate_top5.py @@ -0,0 +1,584 @@ +#!/usr/bin/env python3 +"""Aggregate priorities across GitHub accounts into a strict Top 5 ranked list. + +Queries GitHub issues and PRs across configured accounts/repos, scores them +by priority labels, staleness, blocking status, and roadmap alignment. + +Falls back to .pm/ YAML state if GitHub is unavailable or for enrichment. + +Usage: + python generate_top5.py [--project-root PATH] [--sources PATH] + +Returns JSON with top 5 priorities. +""" + +import argparse +import json +import subprocess +import sys +from datetime import UTC, datetime +from pathlib import Path +from typing import Any + +import yaml + + +# Aggregation weights +WEIGHT_ISSUES = 0.40 +WEIGHT_PRS = 0.30 +WEIGHT_ROADMAP = 0.20 +WEIGHT_LOCAL = 0.10 # .pm/ overrides + +TOP_N = 5 + +# Label-to-priority mapping +PRIORITY_LABELS = { + "critical": 1.0, + "priority:critical": 1.0, + "high": 0.9, + "priority:high": 0.9, + "bug": 0.8, + "medium": 0.6, + "priority:medium": 0.6, + "enhancement": 0.5, + "feature": 0.5, + "low": 0.3, + "priority:low": 0.3, +} + + +def load_yaml(path: Path) -> dict[str, Any]: + """Load YAML file safely.""" + if not path.exists(): + return {} + with open(path) as f: + return yaml.safe_load(f) or {} + + +def load_sources(sources_path: Path) -> list[dict]: + """Load GitHub source configuration.""" + data = load_yaml(sources_path) + return data.get("github", []) + + +def run_gh(args: list[str], account: str | None = None) -> str | None: + """Run a gh CLI command, optionally switching account first. + + Returns stdout on success, None on failure. + """ + if account: + switch = subprocess.run( + ["gh", "auth", "switch", "--user", account], + capture_output=True, text=True, timeout=10, + ) + if switch.returncode != 0: + return None + + try: + result = subprocess.run( + ["gh"] + args, + capture_output=True, text=True, timeout=30, + ) + if result.returncode != 0: + return None + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + return None + + +def get_current_gh_account() -> str | None: + """Get the currently active gh account.""" + try: + result = subprocess.run( + ["gh", "api", "user", "--jq", ".login"], + capture_output=True, text=True, timeout=10, + ) + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + return None + + +def fetch_github_issues(account: str, repos: list[str]) -> list[dict]: + """Fetch open issues for an account's repos from GitHub.""" + candidates = [] + + # Use search API to get all issues at once + repo_qualifiers = " ".join(f"repo:{r}" if "/" in r else f"repo:{account}/{r}" for r in repos) + query = f"is:open is:issue {repo_qualifiers}" + + jq_filter = ( + '.items[] | {' + 'repo: (.repository_url | split("/") | .[-2:] | join("/")),' + 'title: .title,' + 'labels: [.labels[].name],' + 'created: .created_at,' + 'updated: .updated_at,' + 'number: .number,' + 'comments: .comments' + '}' + ) + + output = run_gh( + ["api", "search/issues", "--method", "GET", + "-f", f"q={query}", "-f", "per_page=50", + "--jq", jq_filter], + account=account, + ) + + if not output: + return [] + + now = datetime.now(UTC) + + for line in output.strip().splitlines(): + try: + item = json.loads(line) + except json.JSONDecodeError: + continue + + # Score by labels + labels = [lbl.lower() for lbl in item.get("labels", [])] + priority_score = 0.5 # default + for label in labels: + if label in PRIORITY_LABELS: + priority_score = max(priority_score, PRIORITY_LABELS[label]) + + # Staleness boost: older updated = needs attention + try: + updated = datetime.fromisoformat(item["updated"].replace("Z", "+00:00")) + days_stale = (now - updated).total_seconds() / 86400 + except (ValueError, KeyError): + days_stale = 0 + + staleness_score = min(days_stale / 14.0, 1.0) # Max at 2 weeks + + # Comment activity: more comments = more discussion = potentially blocked + comments = item.get("comments", 0) + activity_score = min(comments / 10.0, 1.0) + + raw_score = (priority_score * 0.50 + staleness_score * 0.30 + activity_score * 0.20) * 100 + + # Rationale + reasons = [] + if priority_score >= 0.8: + reasons.append(f"labeled {', '.join(lbl for lbl in labels if lbl in PRIORITY_LABELS)}") + if days_stale > 7: + reasons.append(f"stale {days_stale:.0f}d") + if comments > 3: + reasons.append(f"{comments} comments") + if not reasons: + reasons.append("open issue") + + repo = item.get("repo", "") + candidates.append({ + "title": item["title"], + "source": "github_issue", + "raw_score": round(raw_score, 1), + "score_breakdown": { + "label_priority": round(priority_score, 2), + "staleness": round(staleness_score, 2), + "activity": round(activity_score, 2), + }, + "rationale": ", ".join(reasons), + "item_id": f"{repo}#{item['number']}", + "priority": "HIGH" if priority_score >= 0.8 else "MEDIUM" if priority_score >= 0.5 else "LOW", + "repo": repo, + "account": account, + "url": f"https://github.com/{repo}/issues/{item['number']}", + "labels": item.get("labels", []), + "created": item.get("created", ""), + "updated": item.get("updated", ""), + "days_stale": round(days_stale, 1), + "comments": comments, + }) + + return candidates + + +def fetch_github_prs(account: str, repos: list[str]) -> list[dict]: + """Fetch open PRs for an account's repos from GitHub.""" + candidates = [] + + repo_qualifiers = " ".join(f"repo:{r}" if "/" in r else f"repo:{account}/{r}" for r in repos) + query = f"is:open is:pr {repo_qualifiers}" + + jq_filter = ( + '.items[] | {' + 'repo: (.repository_url | split("/") | .[-2:] | join("/")),' + 'title: .title,' + 'labels: [.labels[].name],' + 'created: .created_at,' + 'updated: .updated_at,' + 'number: .number,' + 'draft: .draft,' + 'comments: .comments' + '}' + ) + + output = run_gh( + ["api", "search/issues", "--method", "GET", + "-f", f"q={query}", "-f", "per_page=50", + "--jq", jq_filter], + account=account, + ) + + if not output: + return [] + + now = datetime.now(UTC) + + for line in output.strip().splitlines(): + try: + item = json.loads(line) + except json.JSONDecodeError: + continue + + is_draft = item.get("draft", False) + + # PRs waiting for review are higher priority than drafts + base_score = 0.4 if is_draft else 0.7 + + # Labels boost + labels = [lbl.lower() for lbl in item.get("labels", [])] + for label in labels: + if label in PRIORITY_LABELS: + base_score = max(base_score, PRIORITY_LABELS[label]) + + # Staleness: PRs waiting for review get more urgent over time + try: + updated = datetime.fromisoformat(item["updated"].replace("Z", "+00:00")) + days_stale = (now - updated).total_seconds() / 86400 + except (ValueError, KeyError): + days_stale = 0 + + staleness_score = min(days_stale / 7.0, 1.0) # PRs stale faster (1 week max) + + raw_score = (base_score * 0.60 + staleness_score * 0.40) * 100 + + reasons = [] + if is_draft: + reasons.append("draft PR") + else: + reasons.append("awaiting review") + if days_stale > 3: + reasons.append(f"stale {days_stale:.0f}d") + if labels: + relevant = [lbl for lbl in labels if lbl in PRIORITY_LABELS] + if relevant: + reasons.append(f"labeled {', '.join(relevant)}") + + repo = item.get("repo", "") + candidates.append({ + "title": item["title"], + "source": "github_pr", + "raw_score": round(raw_score, 1), + "score_breakdown": { + "base_priority": round(base_score, 2), + "staleness": round(staleness_score, 2), + }, + "rationale": ", ".join(reasons), + "item_id": f"{repo}#{item['number']}", + "priority": "HIGH" if base_score >= 0.8 else "MEDIUM", + "repo": repo, + "account": account, + "url": f"https://github.com/{repo}/pull/{item['number']}", + "labels": item.get("labels", []), + "created": item.get("created", ""), + "updated": item.get("updated", ""), + "days_stale": round(days_stale, 1), + "is_draft": is_draft, + }) + + return candidates + + +def load_local_overrides(pm_dir: Path) -> list[dict]: + """Load manually-added items from .pm/backlog for local enrichment.""" + backlog_data = load_yaml(pm_dir / "backlog" / "items.yaml") + items = backlog_data.get("items", []) + ready_items = [item for item in items if item.get("status") == "READY"] + + candidates = [] + priority_map = {"HIGH": 1.0, "MEDIUM": 0.6, "LOW": 0.3} + + for item in ready_items: + priority = item.get("priority", "MEDIUM") + priority_score = priority_map.get(priority, 0.5) + hours = item.get("estimated_hours", 4) + ease_score = 1.0 if hours < 2 else 0.6 if hours <= 6 else 0.3 + + raw_score = (priority_score * 0.60 + ease_score * 0.40) * 100 + + reasons = [] + if priority == "HIGH": + reasons.append("HIGH priority") + if hours < 2: + reasons.append("quick win") + if not reasons: + reasons.append("local backlog item") + + candidates.append({ + "title": item.get("title", item["id"]), + "source": "local", + "raw_score": round(raw_score, 1), + "rationale": ", ".join(reasons), + "item_id": item["id"], + "priority": priority, + }) + + return candidates + + +def extract_roadmap_goals(pm_dir: Path) -> list[str]: + """Extract strategic goals from roadmap markdown.""" + roadmap_path = pm_dir / "roadmap.md" + if not roadmap_path.exists(): + return [] + + text = roadmap_path.read_text() + goals = [] + + for line in text.splitlines(): + line = line.strip() + if line.startswith("## ") or line.startswith("### "): + goals.append(line.lstrip("#").strip()) + elif line.startswith("- "): + goals.append(line.removeprefix("- ").strip()) + elif line.startswith("* "): + goals.append(line.removeprefix("* ").strip()) + + return goals + + +def score_roadmap_alignment(candidate: dict, goals: list[str]) -> float: + """Score how well a candidate aligns with roadmap goals. Returns 0.0-1.0.""" + if not goals: + return 0.5 + + title_lower = candidate["title"].lower() + max_alignment = 0.0 + + for goal in goals: + goal_words = set(goal.lower().split()) + goal_words -= {"the", "a", "an", "and", "or", "to", "for", "in", "of", "is", "with"} + if not goal_words: + continue + + matching = sum(1 for word in goal_words if word in title_lower) + alignment = matching / len(goal_words) if goal_words else 0.0 + max_alignment = max(max_alignment, alignment) + + return min(max_alignment, 1.0) + + +def suggest_action(candidate: dict) -> str: + """Suggest a concrete next action for a candidate.""" + source = candidate["source"] + days_stale = candidate.get("days_stale", 0) + labels = candidate.get("labels", []) + + if source == "github_pr": + if candidate.get("is_draft"): + return "Finish draft or close if abandoned" + if days_stale > 14: + return "Merge, close, or rebase — stale >2 weeks" + if days_stale > 7: + return "Review and merge or request changes" + return "Review PR" + elif source == "github_issue": + if any(lbl in ("critical", "priority:critical") for lbl in labels): + return "Fix immediately — critical severity" + if any(lbl in ("bug",) for lbl in labels): + return "Investigate and fix bug" + if days_stale > 30: + return "Triage: still relevant? Close or reprioritize" + return "Work on issue or delegate" + elif source == "local": + return "Pick up from local backlog" + return "Review" + + +def aggregate_and_rank( + issues: list[dict], + prs: list[dict], + local: list[dict], + goals: list[str], + top_n: int = TOP_N, +) -> tuple[list[dict], list[dict]]: + """Aggregate candidates from all sources and rank by weighted score. + + Returns (top_n items, next 5 near-misses). + """ + scored = [] + + source_weights = { + "github_issue": WEIGHT_ISSUES, + "github_pr": WEIGHT_PRS, + "local": WEIGHT_LOCAL, + } + + all_candidates = issues + prs + local + + for candidate in all_candidates: + source = candidate["source"] + source_weight = source_weights.get(source, 0.25) + raw = candidate["raw_score"] + + alignment = score_roadmap_alignment(candidate, goals) + final_score = (source_weight * raw) + (WEIGHT_ROADMAP * alignment * 100) + + entry = { + "title": candidate["title"], + "source": candidate["source"], + "score": round(final_score, 1), + "raw_score": candidate["raw_score"], + "source_weight": source_weight, + "rationale": candidate["rationale"], + "item_id": candidate.get("item_id", ""), + "priority": candidate.get("priority", "MEDIUM"), + "alignment": round(alignment, 2), + "action": suggest_action(candidate), + } + # Preserve all metadata from the candidate + for key in ("url", "repo", "account", "labels", "created", "updated", + "days_stale", "comments", "is_draft", "score_breakdown"): + if key in candidate: + entry[key] = candidate[key] + + scored.append(entry) + + priority_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2} + scored.sort(key=lambda x: (-x["score"], priority_order.get(x["priority"], 1))) + + top = scored[:top_n] + for i, item in enumerate(top): + item["rank"] = i + 1 + + near_misses = scored[top_n:top_n + 5] + for i, item in enumerate(near_misses): + item["rank"] = top_n + i + 1 + + return top, near_misses + + +def build_repo_summary(all_candidates: list[dict]) -> dict: + """Build a per-repo, per-account summary of open work.""" + repos: dict[str, dict] = {} + accounts: dict[str, dict] = {} + + for c in all_candidates: + repo = c.get("repo", "local") + account = c.get("account", "local") + + if repo not in repos: + repos[repo] = {"issues": 0, "prs": 0, "high_priority": 0} + if account not in accounts: + accounts[account] = {"issues": 0, "prs": 0, "repos": set()} + + if c["source"] == "github_issue": + repos[repo]["issues"] += 1 + accounts[account]["issues"] += 1 + elif c["source"] == "github_pr": + repos[repo]["prs"] += 1 + accounts[account]["prs"] += 1 + + if c.get("priority") == "HIGH": + repos[repo]["high_priority"] += 1 + + accounts[account]["repos"].add(repo) + + # Convert sets to lists for JSON serialization + for a in accounts.values(): + a["repos"] = sorted(a["repos"]) + + # Sort repos by total open items descending + sorted_repos = dict(sorted(repos.items(), key=lambda x: -(x[1]["issues"] + x[1]["prs"]))) + + return {"by_repo": sorted_repos, "by_account": accounts} + + +def generate_top5(project_root: Path, sources_path: Path | None = None) -> dict: + """Generate the Top 5 priority list from GitHub + local state.""" + pm_dir = project_root / ".pm" + + if sources_path is None: + sources_path = pm_dir / "sources.yaml" + + # Load GitHub sources config + sources = load_sources(sources_path) + + # Remember original account to restore after + original_account = get_current_gh_account() + + # Fetch from GitHub + all_issues = [] + all_prs = [] + accounts_queried = [] + + for source in sources: + account = source.get("account", "") + repos = source.get("repos", []) + if not account or not repos: + continue + + accounts_queried.append(account) + all_issues.extend(fetch_github_issues(account, repos)) + all_prs.extend(fetch_github_prs(account, repos)) + + # Restore original account + if original_account and accounts_queried: + run_gh(["auth", "switch", "--user", original_account]) + + # Load local overrides + local = [] + if pm_dir.exists(): + local = load_local_overrides(pm_dir) + + # Load roadmap goals + goals = extract_roadmap_goals(pm_dir) if pm_dir.exists() else [] + + # Aggregate and rank + all_candidates = all_issues + all_prs + local + top5, near_misses = aggregate_and_rank(all_issues, all_prs, local, goals) + summary = build_repo_summary(all_candidates) + + return { + "top5": top5, + "near_misses": near_misses, + "summary": summary, + "sources": { + "github_issues": len(all_issues), + "github_prs": len(all_prs), + "local_items": len(local), + "roadmap_goals": len(goals), + "accounts": accounts_queried, + }, + "total_candidates": len(all_candidates), + } + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Generate Top 5 priorities from GitHub + local state") + parser.add_argument( + "--project-root", type=Path, default=Path.cwd(), help="Project root directory" + ) + parser.add_argument( + "--sources", type=Path, default=None, help="Path to sources.yaml (default: .pm/sources.yaml)" + ) + + args = parser.parse_args() + + try: + result = generate_top5(args.project_root, args.sources) + print(json.dumps(result, indent=2)) + return 0 + except Exception as e: + print(json.dumps({"error": str(e)}), file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml new file mode 100644 index 000000000..1e281b6e1 --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml @@ -0,0 +1,88 @@ +# Outside-in test for /top5 error handling +# Validates generate_top5.py handles failure modes gracefully: +# invalid sources, missing gh CLI, malformed YAML. + +scenario: + name: "Top 5 Priorities - Error Handling" + description: | + Verifies that generate_top5.py degrades gracefully when GitHub is + unreachable, sources.yaml is malformed, or the project root is invalid. + The script should always return valid JSON, never crash. + type: cli + level: 2 + tags: [cli, error-handling, top5, pm-architect, resilience] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Test 1: Non-existent project root (no .pm/ dir) + - action: launch + target: "python" + args: + - ".claude/skills/pm-architect/scripts/generate_top5.py" + - "--project-root" + - "/tmp/top5-test-nonexistent-path-12345" + description: "Run with non-existent project root" + timeout: 15s + + - action: verify_output + contains: '"top5"' + description: "Still returns valid JSON with top5 key" + + - action: verify_output + contains: '"total_candidates": 0' + description: "Reports zero candidates when no sources available" + + - action: verify_exit_code + expected: 0 + description: "Exits cleanly even with missing project root" + + # Test 2: Malformed sources.yaml + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + echo "not: [valid: yaml: {{{{" > "$TMPDIR/.pm/sources.yaml" && + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with malformed sources.yaml" + timeout: 15s + + # Script should handle YAML parse errors (may return error JSON or empty results) + - action: verify_exit_code + expected_one_of: [0, 1] + description: "Exits with 0 or 1, never crashes with traceback" + + # Test 3: Empty sources.yaml (valid but no accounts) + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + echo "github: []" > "$TMPDIR/.pm/sources.yaml" && + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with empty sources list" + timeout: 15s + + - action: verify_output + contains: '"top5"' + description: "Returns valid JSON with empty top5" + + - action: verify_output + contains: '"total_candidates": 0' + description: "Reports zero candidates" + + - action: verify_exit_code + expected: 0 + description: "Exits cleanly with empty sources" + + cleanup: + - action: stop_application + force: true + description: "Ensure all processes are terminated" diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml new file mode 100644 index 000000000..3d763c94b --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml @@ -0,0 +1,97 @@ +# Outside-in test for /top5 local backlog integration +# Validates generate_top5.py incorporates .pm/backlog items and roadmap goals +# into the priority ranking alongside (or instead of) GitHub data. + +scenario: + name: "Top 5 Priorities - Local Overrides and Roadmap Alignment" + description: | + Verifies that generate_top5.py reads .pm/backlog/items.yaml for local + priorities and .pm/roadmap.md for strategic alignment scoring. + Tests the full aggregation pipeline without requiring GitHub access. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, local, roadmap] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Setup local .pm/ state with backlog items and roadmap + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm/backlog" && + cat > "$TMPDIR/.pm/backlog/items.yaml" << 'BACKLOG' + items: + - id: "local-001" + title: "Fix authentication timeout bug" + status: "READY" + priority: "HIGH" + estimated_hours: 1 + - id: "local-002" + title: "Add dashboard metrics" + status: "READY" + priority: "MEDIUM" + estimated_hours: 4 + - id: "local-003" + title: "Refactor logging module" + status: "IN_PROGRESS" + priority: "LOW" + estimated_hours: 8 + BACKLOG + cat > "$TMPDIR/.pm/roadmap.md" << 'ROADMAP' + ## Q1 Goals + ### Improve authentication reliability + - Fix timeout and retry logic + ### Add observability dashboard + - Metrics and monitoring + ROADMAP + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: [] + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with local backlog items and roadmap goals, no GitHub sources" + timeout: 15s + + # Verify local items appear in output + - action: verify_output + contains: "Fix authentication timeout bug" + timeout: 5s + description: "HIGH priority READY item appears in results" + + - action: verify_output + contains: "Add dashboard metrics" + description: "MEDIUM priority READY item appears in results" + + # IN_PROGRESS items should NOT appear (only READY items are loaded) + - action: verify_output + not_contains: "Refactor logging module" + description: "IN_PROGRESS item is excluded (only READY items loaded)" + + # Verify source attribution + - action: verify_output + contains: '"source": "local"' + description: "Items attributed to local source" + + # Verify roadmap goals were loaded + - action: verify_output + contains: '"roadmap_goals"' + description: "Roadmap goals count present in sources" + + # Verify alignment scoring (auth bug should align with roadmap goal) + - action: verify_output + matches: '"alignment":\\s*[0-9]' + description: "Items have alignment scores" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml new file mode 100644 index 000000000..4fac284f8 --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml @@ -0,0 +1,104 @@ +# Outside-in test for /top5 ranking correctness +# Validates that output is strictly ranked by score descending, +# limited to 5 items, and each item has a rank field 1-5. + +scenario: + name: "Top 5 Priorities - Ranking and Limit Enforcement" + description: | + Verifies that generate_top5.py returns exactly TOP_N (5) items, + ranked by descending score, with rank fields 1 through 5. + Uses local backlog with >5 items to verify the limit. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, ranking] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Setup: 7 local items to verify only top 5 are returned + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm/backlog" && + cat > "$TMPDIR/.pm/backlog/items.yaml" << 'BACKLOG' + items: + - id: "item-1" + title: "Critical security fix" + status: "READY" + priority: "HIGH" + estimated_hours: 1 + - id: "item-2" + title: "API rate limiting" + status: "READY" + priority: "HIGH" + estimated_hours: 2 + - id: "item-3" + title: "Database migration" + status: "READY" + priority: "MEDIUM" + estimated_hours: 4 + - id: "item-4" + title: "Update documentation" + status: "READY" + priority: "MEDIUM" + estimated_hours: 6 + - id: "item-5" + title: "Add unit tests" + status: "READY" + priority: "MEDIUM" + estimated_hours: 3 + - id: "item-6" + title: "Refactor config loader" + status: "READY" + priority: "LOW" + estimated_hours: 8 + - id: "item-7" + title: "Add logging headers" + status: "READY" + priority: "LOW" + estimated_hours: 10 + BACKLOG + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: [] + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with 7 local items to verify top-5 limit" + timeout: 15s + + # Verify rank fields 1-5 exist + - action: verify_output + contains: '"rank": 1' + timeout: 5s + description: "First ranked item present" + + - action: verify_output + contains: '"rank": 5' + description: "Fifth ranked item present" + + # Verify rank 6 and 7 are NOT in output (limit enforced) + - action: verify_output + not_contains: '"rank": 6' + description: "No sixth rank (limit to 5)" + + - action: verify_output + not_contains: '"rank": 7' + description: "No seventh rank (limit to 5)" + + # Verify total_candidates reflects all 7 items considered + - action: verify_output + contains: '"total_candidates": 7' + description: "Total candidates count includes all 7 items" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml new file mode 100644 index 000000000..d3d8ee24c --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml @@ -0,0 +1,52 @@ +# Outside-in smoke test for /top5 priority aggregation +# Validates the generate_top5.py CLI produces valid JSON output +# with the expected structure from a user's perspective. + +scenario: + name: "Top 5 Priorities - Smoke Test" + description: | + Verifies that generate_top5.py runs successfully, produces valid JSON, + and contains the expected top-level keys (top5, sources, total_candidates). + Uses an empty project root so no GitHub calls are made. + type: cli + level: 1 + tags: [cli, smoke, top5, pm-architect] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Run with empty project root (no .pm/ dir, no sources.yaml) + - action: launch + target: "python" + args: + - "scripts/generate_top5.py" + - "--project-root" + - "/tmp/top5-test-empty" + working_directory: ".claude/skills/pm-architect" + description: "Run generate_top5.py with empty project root" + timeout: 15s + + # Verify valid JSON output with expected keys + - action: verify_output + contains: '"top5"' + timeout: 5s + description: "Output contains top5 key" + + - action: verify_output + contains: '"sources"' + description: "Output contains sources key" + + - action: verify_output + contains: '"total_candidates"' + description: "Output contains total_candidates key" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly with code 0" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml new file mode 100644 index 000000000..a76e52d4a --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml @@ -0,0 +1,83 @@ +# Outside-in test for /top5 with configured sources +# Validates generate_top5.py queries GitHub when sources.yaml is provided, +# produces ranked output with score breakdown and source attribution. + +scenario: + name: "Top 5 Priorities - GitHub Source Aggregation" + description: | + Verifies that generate_top5.py correctly reads a sources.yaml config, + queries GitHub for issues and PRs, aggregates scores, and returns + a ranked list with proper source attribution and metadata. + Requires gh CLI authenticated with at least one account. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, github] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + - "gh CLI is authenticated" + - "Network access to GitHub API" + + environment: + variables: + GH_PAGER: "" + + steps: + # Setup: create a minimal sources.yaml pointing to a known public repo + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: + - account: rysweet + repos: + - amplihack + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run generate_top5.py with sources pointing to amplihack repo" + timeout: 45s + + # Verify JSON structure + - action: verify_output + contains: '"top5"' + timeout: 5s + description: "Output has top5 array" + + - action: verify_output + contains: '"github_issues"' + description: "Sources breakdown includes github_issues count" + + - action: verify_output + contains: '"github_prs"' + description: "Sources breakdown includes github_prs count" + + - action: verify_output + contains: '"accounts"' + description: "Sources breakdown includes accounts queried" + + # Verify ranked items have required fields + - action: verify_output + matches: '"score":\\s*[0-9]' + description: "Items have numeric scores" + + - action: verify_output + matches: '"source":\\s*"github_(issue|pr)"' + description: "Items have source attribution" + + - action: verify_output + matches: '"rationale":' + description: "Items include rationale text" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/conftest.py b/amplifier-bundle/skills/pm-architect/scripts/tests/conftest.py index 40af58e5a..448aa9983 100644 --- a/amplifier-bundle/skills/pm-architect/scripts/tests/conftest.py +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/conftest.py @@ -5,6 +5,7 @@ from unittest.mock import MagicMock import pytest +import yaml @pytest.fixture @@ -131,3 +132,129 @@ def sample_daily_status_output() -> str: 1. Prioritize design review for API refactoring 2. Address technical debt in authentication system """ + + +# --- Top 5 Priority Aggregation Fixtures --- + + +@pytest.fixture +def pm_dir(tmp_path: Path) -> Path: + """Create .pm/ directory structure with sample data.""" + pm = tmp_path / ".pm" + (pm / "backlog").mkdir(parents=True) + (pm / "workstreams").mkdir(parents=True) + (pm / "delegations").mkdir(parents=True) + return pm + + +@pytest.fixture +def sample_backlog_items() -> dict: + """Sample backlog items YAML data.""" + return { + "items": [ + { + "id": "BL-001", + "title": "Fix authentication bug", + "description": "Auth tokens expire prematurely", + "priority": "HIGH", + "estimated_hours": 2, + "status": "READY", + "tags": ["auth", "bug"], + "dependencies": [], + }, + { + "id": "BL-002", + "title": "Implement config parser", + "description": "Parse YAML and JSON config files", + "priority": "MEDIUM", + "estimated_hours": 4, + "status": "READY", + "tags": ["config", "core"], + "dependencies": [], + }, + { + "id": "BL-003", + "title": "Add logging framework", + "description": "Structured logging with JSON output", + "priority": "LOW", + "estimated_hours": 8, + "status": "READY", + "tags": ["infrastructure"], + "dependencies": ["BL-002"], + }, + { + "id": "BL-004", + "title": "Write API documentation", + "description": "Document all REST endpoints", + "priority": "MEDIUM", + "estimated_hours": 3, + "status": "READY", + "tags": ["docs"], + "dependencies": [], + }, + { + "id": "BL-005", + "title": "Database migration tool", + "description": "Automated schema migrations", + "priority": "HIGH", + "estimated_hours": 6, + "status": "READY", + "tags": ["database", "core"], + "dependencies": ["BL-002"], + }, + { + "id": "BL-006", + "title": "Refactor test suite", + "description": "Improve test performance and coverage", + "priority": "MEDIUM", + "estimated_hours": 1, + "status": "IN_PROGRESS", + "tags": ["test"], + "dependencies": [], + }, + ] + } + + +@pytest.fixture +def populated_pm(pm_dir, sample_backlog_items): + """Create fully populated .pm/ directory.""" + with open(pm_dir / "backlog" / "items.yaml", "w") as f: + yaml.dump(sample_backlog_items, f) + + ws_data = { + "id": "ws-1", + "backlog_id": "BL-006", + "title": "Test Suite Refactor", + "agent": "builder", + "status": "RUNNING", + "last_activity": "2020-01-01T00:00:00Z", + } + with open(pm_dir / "workstreams" / "ws-1.yaml", "w") as f: + yaml.dump(ws_data, f) + + deleg_data = { + "id": "DEL-001", + "title": "Implement caching layer", + "status": "READY", + "backlog_id": "BL-002", + } + with open(pm_dir / "delegations" / "del-001.yaml", "w") as f: + yaml.dump(deleg_data, f) + + roadmap = """# Project Roadmap + +## Q1 Goals + +### Core Infrastructure +- Implement config parser +- Database migration tool +- Logging framework + +### Quality +- Test coverage above 80% +- API documentation complete +""" + (pm_dir / "roadmap.md").write_text(roadmap) + + return pm_dir diff --git a/amplifier-bundle/skills/pm-architect/scripts/tests/test_generate_top5.py b/amplifier-bundle/skills/pm-architect/scripts/tests/test_generate_top5.py new file mode 100644 index 000000000..8928e6a21 --- /dev/null +++ b/amplifier-bundle/skills/pm-architect/scripts/tests/test_generate_top5.py @@ -0,0 +1,420 @@ +"""Tests for generate_top5.py - GitHub-native priority aggregation.""" + +import json +import sys +from pathlib import Path +from unittest.mock import patch + +import pytest +import yaml + +sys.path.insert(0, str(Path(__file__).parent.parent)) +from generate_top5 import ( + PRIORITY_LABELS, + aggregate_and_rank, + build_repo_summary, + extract_roadmap_goals, + fetch_github_issues, + fetch_github_prs, + generate_top5, + load_local_overrides, + load_sources, + score_roadmap_alignment, + suggest_action, +) + + +class TestLoadSources: + """Tests for sources.yaml loading.""" + + def test_no_sources_file(self, project_root): + """Returns empty list when sources.yaml doesn't exist.""" + result = load_sources(project_root / "sources.yaml") + assert result == [] + + def test_loads_github_sources(self, tmp_path): + """Parses sources.yaml correctly.""" + sources = { + "github": [ + {"account": "rysweet", "repos": ["amplihack", "azlin"]}, + {"account": "rysweet_microsoft", "repos": ["cloud-ecosystem-security/SedanDelivery"]}, + ] + } + path = tmp_path / "sources.yaml" + with open(path, "w") as f: + yaml.dump(sources, f) + + result = load_sources(path) + assert len(result) == 2 + assert result[0]["account"] == "rysweet" + assert result[1]["repos"] == ["cloud-ecosystem-security/SedanDelivery"] + + +class TestFetchGithubIssues: + """Tests for GitHub issue fetching (mocked).""" + + def test_returns_empty_on_gh_failure(self): + """Returns empty list when gh CLI fails.""" + with patch("generate_top5.run_gh", return_value=None): + result = fetch_github_issues("rysweet", ["amplihack"]) + assert result == [] + + def test_parses_issue_data(self): + """Correctly parses gh API JSON output with full metadata.""" + mock_output = json.dumps({ + "repo": "rysweet/amplihack", + "title": "Fix auth bug", + "labels": ["bug", "high"], + "created": "2026-03-01T00:00:00Z", + "updated": "2026-03-07T00:00:00Z", + "number": 123, + "comments": 5, + }) + with patch("generate_top5.run_gh", return_value=mock_output): + result = fetch_github_issues("rysweet", ["amplihack"]) + assert len(result) == 1 + item = result[0] + assert item["source"] == "github_issue" + assert item["priority"] == "HIGH" + assert item["url"] == "https://github.com/rysweet/amplihack/issues/123" + assert item["account"] == "rysweet" + assert item["labels"] == ["bug", "high"] + assert item["comments"] == 5 + assert "score_breakdown" in item + assert "label_priority" in item["score_breakdown"] + + def test_priority_from_labels(self): + """Labels correctly map to priority scores.""" + mock_output = json.dumps({ + "repo": "r/a", "title": "Critical issue", + "labels": ["critical"], "created": "2026-03-07T00:00:00Z", + "updated": "2026-03-07T00:00:00Z", "number": 1, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=mock_output): + result = fetch_github_issues("r", ["a"]) + assert result[0]["priority"] == "HIGH" + assert result[0]["score_breakdown"]["label_priority"] == 1.0 + + def test_staleness_boosts_score(self): + """Older issues score higher due to staleness.""" + fresh = json.dumps({ + "repo": "r/a", "title": "Fresh", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 1, "comments": 0, + }) + stale = json.dumps({ + "repo": "r/a", "title": "Stale", "labels": [], + "created": "2026-01-01T00:00:00Z", "updated": "2026-01-01T00:00:00Z", + "number": 2, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=f"{fresh}\n{stale}"): + result = fetch_github_issues("r", ["a"]) + stale_item = next(c for c in result if "Stale" in c["title"]) + fresh_item = next(c for c in result if "Fresh" in c["title"]) + assert stale_item["raw_score"] > fresh_item["raw_score"] + assert stale_item["days_stale"] > fresh_item["days_stale"] + + +class TestFetchGithubPrs: + """Tests for GitHub PR fetching (mocked).""" + + def test_returns_empty_on_failure(self): + """Returns empty list when gh CLI fails.""" + with patch("generate_top5.run_gh", return_value=None): + result = fetch_github_prs("rysweet", ["amplihack"]) + assert result == [] + + def test_draft_pr_scores_lower(self): + """Draft PRs score lower than non-drafts.""" + draft = json.dumps({ + "repo": "r/a", "title": "Draft PR", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 1, "draft": True, "comments": 0, + }) + ready = json.dumps({ + "repo": "r/a", "title": "Ready PR", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 2, "draft": False, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=f"{draft}\n{ready}"): + result = fetch_github_prs("r", ["a"]) + draft_item = next(c for c in result if "Draft" in c["title"]) + ready_item = next(c for c in result if "Ready" in c["title"]) + assert ready_item["raw_score"] > draft_item["raw_score"] + assert draft_item["is_draft"] is True + assert ready_item["is_draft"] is False + + def test_pr_has_url_and_metadata(self): + """PRs include correct GitHub URL and metadata.""" + mock = json.dumps({ + "repo": "rysweet/amplihack", "title": "Fix stuff", "labels": ["bug"], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 42, "draft": False, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=mock): + result = fetch_github_prs("rysweet", ["amplihack"]) + assert result[0]["url"] == "https://github.com/rysweet/amplihack/pull/42" + assert result[0]["account"] == "rysweet" + assert result[0]["labels"] == ["bug"] + + +class TestLoadLocalOverrides: + """Tests for local .pm/ backlog loading.""" + + def test_no_pm_dir(self, project_root): + """Returns empty when .pm doesn't exist.""" + result = load_local_overrides(project_root / ".pm") + assert result == [] + + def test_loads_ready_items(self, pm_dir): + """Loads READY items from backlog.""" + items = { + "items": [ + {"id": "BL-001", "title": "Task A", "status": "READY", "priority": "HIGH", "estimated_hours": 1}, + {"id": "BL-002", "title": "Task B", "status": "DONE", "priority": "HIGH"}, + ] + } + with open(pm_dir / "backlog" / "items.yaml", "w") as f: + yaml.dump(items, f) + + result = load_local_overrides(pm_dir) + assert len(result) == 1 + assert result[0]["source"] == "local" + assert result[0]["item_id"] == "BL-001" + + +class TestExtractRoadmapGoals: + """Tests for roadmap goal extraction.""" + + def test_no_roadmap(self, project_root): + """Returns empty when no roadmap exists.""" + result = extract_roadmap_goals(project_root / ".pm") + assert result == [] + + def test_extracts_goals(self, populated_pm): + """Extracts goals from roadmap markdown.""" + goals = extract_roadmap_goals(populated_pm) + assert len(goals) > 0 + assert any("config" in g.lower() for g in goals) + + +class TestScoreRoadmapAlignment: + """Tests for roadmap alignment scoring.""" + + def test_no_goals_returns_neutral(self): + assert score_roadmap_alignment({"title": "X", "source": "github_issue"}, []) == 0.5 + + def test_matching_title_scores_high(self): + score = score_roadmap_alignment( + {"title": "Implement config parser", "source": "github_issue"}, + ["config parser implementation"], + ) + assert score > 0.0 + + def test_unrelated_title_scores_zero(self): + score = score_roadmap_alignment( + {"title": "Fix authentication bug", "source": "github_issue"}, + ["database migration tool"], + ) + assert score == 0.0 + + +class TestSuggestAction: + """Tests for action suggestion logic.""" + + def test_critical_issue(self): + action = suggest_action({"source": "github_issue", "labels": ["critical"]}) + assert "immediately" in action.lower() + + def test_bug_issue(self): + action = suggest_action({"source": "github_issue", "labels": ["bug"], "days_stale": 1}) + assert "bug" in action.lower() + + def test_stale_pr(self): + action = suggest_action({"source": "github_pr", "is_draft": False, "days_stale": 20}) + assert "stale" in action.lower() or "merge" in action.lower() + + def test_draft_pr(self): + action = suggest_action({"source": "github_pr", "is_draft": True, "days_stale": 1}) + assert "draft" in action.lower() + + def test_local_item(self): + action = suggest_action({"source": "local"}) + assert "backlog" in action.lower() + + +class TestAggregateAndRank: + """Tests for the core aggregation and ranking logic.""" + + def test_empty_input(self): + top, near = aggregate_and_rank([], [], [], []) + assert top == [] + assert near == [] + + def test_returns_max_5(self): + candidates = [ + {"title": f"Item {i}", "source": "github_issue", "raw_score": float(100 - i), + "rationale": "test", "item_id": f"#{i}", "priority": "MEDIUM"} + for i in range(10) + ] + top, near = aggregate_and_rank(candidates, [], [], []) + assert len(top) == 5 + assert len(near) == 5 + + def test_ranked_in_order(self): + issues = [ + {"title": "Low", "source": "github_issue", "raw_score": 30.0, + "rationale": "test", "item_id": "#1", "priority": "LOW"}, + {"title": "High", "source": "github_issue", "raw_score": 90.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH"}, + ] + top, _ = aggregate_and_rank(issues, [], [], []) + assert top[0]["title"] == "High" + assert top[1]["title"] == "Low" + + def test_mixed_sources(self): + issues = [{"title": "Issue", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH"}] + prs = [{"title": "PR", "source": "github_pr", "raw_score": 80.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH", + "url": "https://github.com/r/a/pull/2", "repo": "r/a"}] + local = [{"title": "Local", "source": "local", "raw_score": 80.0, + "rationale": "test", "item_id": "BL-1", "priority": "MEDIUM"}] + top, _ = aggregate_and_rank(issues, prs, local, []) + assert top[0]["source"] == "github_issue" + assert top[1]["source"] == "github_pr" + assert top[2]["source"] == "local" + + def test_roadmap_alignment_boosts_score(self): + issues = [ + {"title": "Implement config parser", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#1", "priority": "MEDIUM"}, + {"title": "Fix random thing", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#2", "priority": "MEDIUM"}, + ] + top, _ = aggregate_and_rank(issues, [], [], ["config parser implementation"]) + config_item = next(r for r in top if "config" in r["title"].lower()) + other_item = next(r for r in top if "random" in r["title"].lower()) + assert config_item["score"] > other_item["score"] + + def test_tiebreak_by_priority(self): + issues = [ + {"title": "Low priority", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#1", "priority": "LOW"}, + {"title": "High priority", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH"}, + ] + top, _ = aggregate_and_rank(issues, [], [], []) + assert top[0]["priority"] == "HIGH" + assert top[1]["priority"] == "LOW" + + def test_preserves_url_and_repo(self): + prs = [{"title": "PR", "source": "github_pr", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "MEDIUM", + "url": "https://github.com/r/a/pull/1", "repo": "r/a"}] + top, _ = aggregate_and_rank([], prs, [], []) + assert top[0]["url"] == "https://github.com/r/a/pull/1" + assert top[0]["repo"] == "r/a" + + def test_items_have_action(self): + issues = [{"title": "Bug", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH", + "labels": ["bug"], "days_stale": 5}] + top, _ = aggregate_and_rank(issues, [], [], []) + assert "action" in top[0] + assert len(top[0]["action"]) > 0 + + def test_near_misses_returned(self): + candidates = [ + {"title": f"Item {i}", "source": "github_issue", "raw_score": float(100 - i), + "rationale": "test", "item_id": f"#{i}", "priority": "MEDIUM"} + for i in range(8) + ] + top, near = aggregate_and_rank(candidates, [], [], []) + assert len(top) == 5 + assert len(near) == 3 + assert near[0]["rank"] == 6 + + +class TestBuildRepoSummary: + """Tests for per-repo summary generation.""" + + def test_empty_candidates(self): + result = build_repo_summary([]) + assert result["by_repo"] == {} + assert result["by_account"] == {} + + def test_counts_by_repo(self): + candidates = [ + {"source": "github_issue", "repo": "r/a", "account": "x", "priority": "HIGH"}, + {"source": "github_issue", "repo": "r/a", "account": "x", "priority": "MEDIUM"}, + {"source": "github_pr", "repo": "r/a", "account": "x", "priority": "MEDIUM"}, + {"source": "github_issue", "repo": "r/b", "account": "x", "priority": "HIGH"}, + ] + result = build_repo_summary(candidates) + assert result["by_repo"]["r/a"]["issues"] == 2 + assert result["by_repo"]["r/a"]["prs"] == 1 + assert result["by_repo"]["r/a"]["high_priority"] == 1 + assert result["by_repo"]["r/b"]["issues"] == 1 + + def test_counts_by_account(self): + candidates = [ + {"source": "github_issue", "repo": "r/a", "account": "alice", "priority": "HIGH"}, + {"source": "github_pr", "repo": "r/b", "account": "bob", "priority": "MEDIUM"}, + ] + result = build_repo_summary(candidates) + assert result["by_account"]["alice"]["issues"] == 1 + assert result["by_account"]["bob"]["prs"] == 1 + assert "r/a" in result["by_account"]["alice"]["repos"] + + +class TestGenerateTop5: + """Tests for the main generate_top5 function.""" + + def test_no_sources_no_pm(self, project_root): + with patch("generate_top5.get_current_gh_account", return_value="rysweet"): + result = generate_top5(project_root) + assert result["top5"] == [] + assert result["near_misses"] == [] + assert result["total_candidates"] == 0 + + def test_github_failure_falls_back_to_local(self, populated_pm): + sources = {"github": [{"account": "test", "repos": ["test/repo"]}]} + sources_path = populated_pm / "sources.yaml" + with open(sources_path, "w") as f: + yaml.dump(sources, f) + + with patch("generate_top5.run_gh", return_value=None), \ + patch("generate_top5.get_current_gh_account", return_value="test"): + result = generate_top5(populated_pm.parent, sources_path) + assert result["sources"]["local_items"] > 0 + assert result["sources"]["github_issues"] == 0 + + def test_output_has_summary(self): + with patch("generate_top5.get_current_gh_account", return_value="test"), \ + patch("generate_top5.load_sources", return_value=[]): + result = generate_top5(Path("/nonexistent")) + assert "summary" in result + assert "near_misses" in result + + def test_items_have_required_fields(self): + issues = [{"title": "Test", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH", + "url": "https://github.com/r/a/issues/1", "repo": "r/a"}] + top, _ = aggregate_and_rank(issues, [], [], []) + required = {"rank", "title", "source", "score", "rationale", "priority", "action", "alignment"} + for item in top: + assert required.issubset(item.keys()), f"Missing: {required - item.keys()}" + + +class TestPriorityLabels: + """Tests for label-to-priority mapping.""" + + def test_critical_is_highest(self): + assert PRIORITY_LABELS["critical"] == 1.0 + + def test_bug_is_high(self): + assert PRIORITY_LABELS["bug"] == 0.8 + + def test_enhancement_is_medium(self): + assert PRIORITY_LABELS["enhancement"] == 0.5 diff --git a/docs/claude/skills/pm-architect/SKILL.md b/docs/claude/skills/pm-architect/SKILL.md new file mode 100644 index 000000000..6b5480872 --- /dev/null +++ b/docs/claude/skills/pm-architect/SKILL.md @@ -0,0 +1,104 @@ +--- +name: pm-architect +description: Expert project manager orchestrating backlog-curator, work-delegator, workstream-coordinator, and roadmap-strategist sub-skills. Coordinates complex software projects through delegation and strategic oversight. Activates when managing projects, coordinating work, or tracking overall progress. +explicit_triggers: + - /top5 +--- + +# PM Architect Skill (Orchestrator) + +## Role + +You are the project manager orchestrating four specialized sub-skills to coordinate software development projects. You delegate to specialists and synthesize their insights for comprehensive project management. + +## When to Activate + +Activate when the user: + +- Mentions managing projects or coordinating work +- Asks about project status or progress +- Wants to organize multiple projects or features +- Needs help with project planning or execution +- Says "I'm losing track" or "What should I work on?" +- Asks "What are the top priorities?" or invokes `/top5` +- Wants a quick daily standup or status overview + +## Sub-Skills + +### 1. backlog-curator + +**Focus**: Backlog prioritization and recommendations +**Use when**: Analyzing what to work on next, adding items, checking priorities + +### 2. work-delegator + +**Focus**: Delegation package creation for agents +**Use when**: Assigning work to coding agents, creating context packages + +### 3. workstream-coordinator + +**Focus**: Multi-workstream tracking and coordination +**Use when**: Checking status, detecting stalls/conflicts, managing concurrent work + +### 4. roadmap-strategist + +**Focus**: Strategic planning and goal alignment +**Use when**: Discussing goals, milestones, strategic direction, roadmap updates + +## Core Workflow + +When user requests project management help: + +1. **Understand intent**: Determine which sub-skill(s) to invoke +2. **Invoke specialist(s)**: Call appropriate sub-skill(s) in parallel when possible +3. **Synthesize results**: Combine insights from sub-skills +4. **Present cohesively**: Deliver unified response to user +5. **Recommend actions**: Suggest next steps + +## Orchestration Patterns + +### Pattern 1: What Should I Work On? + +Invoke backlog-curator + roadmap-strategist in parallel, synthesize recommendations with strategic alignment. + +### Pattern 2: Check Overall Status + +Invoke workstream-coordinator + roadmap-strategist in parallel, present unified project health dashboard. + +### Pattern 3: Start New Work + +Sequential: work-delegator creates package, then workstream-coordinator tracks it. + +### Pattern 4: Initialize PM + +Create .pm/ structure, invoke roadmap-strategist for roadmap generation. + +### Pattern 5: Top 5 Priorities (`/top5`) + +Run `scripts/generate_top5.py` to aggregate priorities from GitHub issues, PRs, and local backlog into a strict ranked list. Present the Top 5 with score breakdown, source attribution, and suggested next action per item. + +Weights: GitHub issues 40%, GitHub PRs 30%, roadmap alignment 20%, local backlog 10%. + +### Pattern 6: Daily Standup + +Run `scripts/generate_daily_status.py` to produce a cross-project status report. Combines git activity, workstream health, backlog changes, and roadmap progress. + +## Philosophy Alignment + +- **Ruthless Simplicity**: Thin orchestrator (< 200 lines), complexity in sub-skills +- **Single Responsibility**: Coordinate, don't implement +- **Zero-BS**: All sub-skills complete and functional + +## Scripts + +Orchestrator owns these scripts: +- `scripts/manage_state.py` — Basic .pm/ state operations (init, add, update, list) +- `scripts/generate_top5.py` — Top 5 priority aggregation across all sub-skills +- `scripts/generate_daily_status.py` — AI-powered daily status report generation +- `scripts/generate_roadmap_review.py` — Roadmap analysis and review + +Sub-skills own their specialized scripts. + +## Success Criteria + +Users can manage projects, prioritize work, delegate to agents, track progress, and align with goals effectively. diff --git a/docs/claude/skills/pm-architect/scripts/generate_top5.py b/docs/claude/skills/pm-architect/scripts/generate_top5.py new file mode 100644 index 000000000..829d3b0b1 --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/generate_top5.py @@ -0,0 +1,584 @@ +#!/usr/bin/env python3 +"""Aggregate priorities across GitHub accounts into a strict Top 5 ranked list. + +Queries GitHub issues and PRs across configured accounts/repos, scores them +by priority labels, staleness, blocking status, and roadmap alignment. + +Falls back to .pm/ YAML state if GitHub is unavailable or for enrichment. + +Usage: + python generate_top5.py [--project-root PATH] [--sources PATH] + +Returns JSON with top 5 priorities. +""" + +import argparse +import json +import subprocess +import sys +from datetime import UTC, datetime +from pathlib import Path +from typing import Any + +import yaml + + +# Aggregation weights +WEIGHT_ISSUES = 0.40 +WEIGHT_PRS = 0.30 +WEIGHT_ROADMAP = 0.20 +WEIGHT_LOCAL = 0.10 # .pm/ overrides + +TOP_N = 5 + +# Label-to-priority mapping +PRIORITY_LABELS = { + "critical": 1.0, + "priority:critical": 1.0, + "high": 0.9, + "priority:high": 0.9, + "bug": 0.8, + "medium": 0.6, + "priority:medium": 0.6, + "enhancement": 0.5, + "feature": 0.5, + "low": 0.3, + "priority:low": 0.3, +} + + +def load_yaml(path: Path) -> dict[str, Any]: + """Load YAML file safely.""" + if not path.exists(): + return {} + with open(path) as f: + return yaml.safe_load(f) or {} + + +def load_sources(sources_path: Path) -> list[dict]: + """Load GitHub source configuration.""" + data = load_yaml(sources_path) + return data.get("github", []) + + +def run_gh(args: list[str], account: str | None = None) -> str | None: + """Run a gh CLI command, optionally switching account first. + + Returns stdout on success, None on failure. + """ + if account: + switch = subprocess.run( + ["gh", "auth", "switch", "--user", account], + capture_output=True, text=True, timeout=10, + ) + if switch.returncode != 0: + return None + + try: + result = subprocess.run( + ["gh"] + args, + capture_output=True, text=True, timeout=30, + ) + if result.returncode != 0: + return None + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + return None + + +def get_current_gh_account() -> str | None: + """Get the currently active gh account.""" + try: + result = subprocess.run( + ["gh", "api", "user", "--jq", ".login"], + capture_output=True, text=True, timeout=10, + ) + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + return None + + +def fetch_github_issues(account: str, repos: list[str]) -> list[dict]: + """Fetch open issues for an account's repos from GitHub.""" + candidates = [] + + # Use search API to get all issues at once + repo_qualifiers = " ".join(f"repo:{r}" if "/" in r else f"repo:{account}/{r}" for r in repos) + query = f"is:open is:issue {repo_qualifiers}" + + jq_filter = ( + '.items[] | {' + 'repo: (.repository_url | split("/") | .[-2:] | join("/")),' + 'title: .title,' + 'labels: [.labels[].name],' + 'created: .created_at,' + 'updated: .updated_at,' + 'number: .number,' + 'comments: .comments' + '}' + ) + + output = run_gh( + ["api", "search/issues", "--method", "GET", + "-f", f"q={query}", "-f", "per_page=50", + "--jq", jq_filter], + account=account, + ) + + if not output: + return [] + + now = datetime.now(UTC) + + for line in output.strip().splitlines(): + try: + item = json.loads(line) + except json.JSONDecodeError: + continue + + # Score by labels + labels = [lbl.lower() for lbl in item.get("labels", [])] + priority_score = 0.5 # default + for label in labels: + if label in PRIORITY_LABELS: + priority_score = max(priority_score, PRIORITY_LABELS[label]) + + # Staleness boost: older updated = needs attention + try: + updated = datetime.fromisoformat(item["updated"].replace("Z", "+00:00")) + days_stale = (now - updated).total_seconds() / 86400 + except (ValueError, KeyError): + days_stale = 0 + + staleness_score = min(days_stale / 14.0, 1.0) # Max at 2 weeks + + # Comment activity: more comments = more discussion = potentially blocked + comments = item.get("comments", 0) + activity_score = min(comments / 10.0, 1.0) + + raw_score = (priority_score * 0.50 + staleness_score * 0.30 + activity_score * 0.20) * 100 + + # Rationale + reasons = [] + if priority_score >= 0.8: + reasons.append(f"labeled {', '.join(lbl for lbl in labels if lbl in PRIORITY_LABELS)}") + if days_stale > 7: + reasons.append(f"stale {days_stale:.0f}d") + if comments > 3: + reasons.append(f"{comments} comments") + if not reasons: + reasons.append("open issue") + + repo = item.get("repo", "") + candidates.append({ + "title": item["title"], + "source": "github_issue", + "raw_score": round(raw_score, 1), + "score_breakdown": { + "label_priority": round(priority_score, 2), + "staleness": round(staleness_score, 2), + "activity": round(activity_score, 2), + }, + "rationale": ", ".join(reasons), + "item_id": f"{repo}#{item['number']}", + "priority": "HIGH" if priority_score >= 0.8 else "MEDIUM" if priority_score >= 0.5 else "LOW", + "repo": repo, + "account": account, + "url": f"https://github.com/{repo}/issues/{item['number']}", + "labels": item.get("labels", []), + "created": item.get("created", ""), + "updated": item.get("updated", ""), + "days_stale": round(days_stale, 1), + "comments": comments, + }) + + return candidates + + +def fetch_github_prs(account: str, repos: list[str]) -> list[dict]: + """Fetch open PRs for an account's repos from GitHub.""" + candidates = [] + + repo_qualifiers = " ".join(f"repo:{r}" if "/" in r else f"repo:{account}/{r}" for r in repos) + query = f"is:open is:pr {repo_qualifiers}" + + jq_filter = ( + '.items[] | {' + 'repo: (.repository_url | split("/") | .[-2:] | join("/")),' + 'title: .title,' + 'labels: [.labels[].name],' + 'created: .created_at,' + 'updated: .updated_at,' + 'number: .number,' + 'draft: .draft,' + 'comments: .comments' + '}' + ) + + output = run_gh( + ["api", "search/issues", "--method", "GET", + "-f", f"q={query}", "-f", "per_page=50", + "--jq", jq_filter], + account=account, + ) + + if not output: + return [] + + now = datetime.now(UTC) + + for line in output.strip().splitlines(): + try: + item = json.loads(line) + except json.JSONDecodeError: + continue + + is_draft = item.get("draft", False) + + # PRs waiting for review are higher priority than drafts + base_score = 0.4 if is_draft else 0.7 + + # Labels boost + labels = [lbl.lower() for lbl in item.get("labels", [])] + for label in labels: + if label in PRIORITY_LABELS: + base_score = max(base_score, PRIORITY_LABELS[label]) + + # Staleness: PRs waiting for review get more urgent over time + try: + updated = datetime.fromisoformat(item["updated"].replace("Z", "+00:00")) + days_stale = (now - updated).total_seconds() / 86400 + except (ValueError, KeyError): + days_stale = 0 + + staleness_score = min(days_stale / 7.0, 1.0) # PRs stale faster (1 week max) + + raw_score = (base_score * 0.60 + staleness_score * 0.40) * 100 + + reasons = [] + if is_draft: + reasons.append("draft PR") + else: + reasons.append("awaiting review") + if days_stale > 3: + reasons.append(f"stale {days_stale:.0f}d") + if labels: + relevant = [lbl for lbl in labels if lbl in PRIORITY_LABELS] + if relevant: + reasons.append(f"labeled {', '.join(relevant)}") + + repo = item.get("repo", "") + candidates.append({ + "title": item["title"], + "source": "github_pr", + "raw_score": round(raw_score, 1), + "score_breakdown": { + "base_priority": round(base_score, 2), + "staleness": round(staleness_score, 2), + }, + "rationale": ", ".join(reasons), + "item_id": f"{repo}#{item['number']}", + "priority": "HIGH" if base_score >= 0.8 else "MEDIUM", + "repo": repo, + "account": account, + "url": f"https://github.com/{repo}/pull/{item['number']}", + "labels": item.get("labels", []), + "created": item.get("created", ""), + "updated": item.get("updated", ""), + "days_stale": round(days_stale, 1), + "is_draft": is_draft, + }) + + return candidates + + +def load_local_overrides(pm_dir: Path) -> list[dict]: + """Load manually-added items from .pm/backlog for local enrichment.""" + backlog_data = load_yaml(pm_dir / "backlog" / "items.yaml") + items = backlog_data.get("items", []) + ready_items = [item for item in items if item.get("status") == "READY"] + + candidates = [] + priority_map = {"HIGH": 1.0, "MEDIUM": 0.6, "LOW": 0.3} + + for item in ready_items: + priority = item.get("priority", "MEDIUM") + priority_score = priority_map.get(priority, 0.5) + hours = item.get("estimated_hours", 4) + ease_score = 1.0 if hours < 2 else 0.6 if hours <= 6 else 0.3 + + raw_score = (priority_score * 0.60 + ease_score * 0.40) * 100 + + reasons = [] + if priority == "HIGH": + reasons.append("HIGH priority") + if hours < 2: + reasons.append("quick win") + if not reasons: + reasons.append("local backlog item") + + candidates.append({ + "title": item.get("title", item["id"]), + "source": "local", + "raw_score": round(raw_score, 1), + "rationale": ", ".join(reasons), + "item_id": item["id"], + "priority": priority, + }) + + return candidates + + +def extract_roadmap_goals(pm_dir: Path) -> list[str]: + """Extract strategic goals from roadmap markdown.""" + roadmap_path = pm_dir / "roadmap.md" + if not roadmap_path.exists(): + return [] + + text = roadmap_path.read_text() + goals = [] + + for line in text.splitlines(): + line = line.strip() + if line.startswith("## ") or line.startswith("### "): + goals.append(line.lstrip("#").strip()) + elif line.startswith("- "): + goals.append(line.removeprefix("- ").strip()) + elif line.startswith("* "): + goals.append(line.removeprefix("* ").strip()) + + return goals + + +def score_roadmap_alignment(candidate: dict, goals: list[str]) -> float: + """Score how well a candidate aligns with roadmap goals. Returns 0.0-1.0.""" + if not goals: + return 0.5 + + title_lower = candidate["title"].lower() + max_alignment = 0.0 + + for goal in goals: + goal_words = set(goal.lower().split()) + goal_words -= {"the", "a", "an", "and", "or", "to", "for", "in", "of", "is", "with"} + if not goal_words: + continue + + matching = sum(1 for word in goal_words if word in title_lower) + alignment = matching / len(goal_words) if goal_words else 0.0 + max_alignment = max(max_alignment, alignment) + + return min(max_alignment, 1.0) + + +def suggest_action(candidate: dict) -> str: + """Suggest a concrete next action for a candidate.""" + source = candidate["source"] + days_stale = candidate.get("days_stale", 0) + labels = candidate.get("labels", []) + + if source == "github_pr": + if candidate.get("is_draft"): + return "Finish draft or close if abandoned" + if days_stale > 14: + return "Merge, close, or rebase — stale >2 weeks" + if days_stale > 7: + return "Review and merge or request changes" + return "Review PR" + elif source == "github_issue": + if any(lbl in ("critical", "priority:critical") for lbl in labels): + return "Fix immediately — critical severity" + if any(lbl in ("bug",) for lbl in labels): + return "Investigate and fix bug" + if days_stale > 30: + return "Triage: still relevant? Close or reprioritize" + return "Work on issue or delegate" + elif source == "local": + return "Pick up from local backlog" + return "Review" + + +def aggregate_and_rank( + issues: list[dict], + prs: list[dict], + local: list[dict], + goals: list[str], + top_n: int = TOP_N, +) -> tuple[list[dict], list[dict]]: + """Aggregate candidates from all sources and rank by weighted score. + + Returns (top_n items, next 5 near-misses). + """ + scored = [] + + source_weights = { + "github_issue": WEIGHT_ISSUES, + "github_pr": WEIGHT_PRS, + "local": WEIGHT_LOCAL, + } + + all_candidates = issues + prs + local + + for candidate in all_candidates: + source = candidate["source"] + source_weight = source_weights.get(source, 0.25) + raw = candidate["raw_score"] + + alignment = score_roadmap_alignment(candidate, goals) + final_score = (source_weight * raw) + (WEIGHT_ROADMAP * alignment * 100) + + entry = { + "title": candidate["title"], + "source": candidate["source"], + "score": round(final_score, 1), + "raw_score": candidate["raw_score"], + "source_weight": source_weight, + "rationale": candidate["rationale"], + "item_id": candidate.get("item_id", ""), + "priority": candidate.get("priority", "MEDIUM"), + "alignment": round(alignment, 2), + "action": suggest_action(candidate), + } + # Preserve all metadata from the candidate + for key in ("url", "repo", "account", "labels", "created", "updated", + "days_stale", "comments", "is_draft", "score_breakdown"): + if key in candidate: + entry[key] = candidate[key] + + scored.append(entry) + + priority_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2} + scored.sort(key=lambda x: (-x["score"], priority_order.get(x["priority"], 1))) + + top = scored[:top_n] + for i, item in enumerate(top): + item["rank"] = i + 1 + + near_misses = scored[top_n:top_n + 5] + for i, item in enumerate(near_misses): + item["rank"] = top_n + i + 1 + + return top, near_misses + + +def build_repo_summary(all_candidates: list[dict]) -> dict: + """Build a per-repo, per-account summary of open work.""" + repos: dict[str, dict] = {} + accounts: dict[str, dict] = {} + + for c in all_candidates: + repo = c.get("repo", "local") + account = c.get("account", "local") + + if repo not in repos: + repos[repo] = {"issues": 0, "prs": 0, "high_priority": 0} + if account not in accounts: + accounts[account] = {"issues": 0, "prs": 0, "repos": set()} + + if c["source"] == "github_issue": + repos[repo]["issues"] += 1 + accounts[account]["issues"] += 1 + elif c["source"] == "github_pr": + repos[repo]["prs"] += 1 + accounts[account]["prs"] += 1 + + if c.get("priority") == "HIGH": + repos[repo]["high_priority"] += 1 + + accounts[account]["repos"].add(repo) + + # Convert sets to lists for JSON serialization + for a in accounts.values(): + a["repos"] = sorted(a["repos"]) + + # Sort repos by total open items descending + sorted_repos = dict(sorted(repos.items(), key=lambda x: -(x[1]["issues"] + x[1]["prs"]))) + + return {"by_repo": sorted_repos, "by_account": accounts} + + +def generate_top5(project_root: Path, sources_path: Path | None = None) -> dict: + """Generate the Top 5 priority list from GitHub + local state.""" + pm_dir = project_root / ".pm" + + if sources_path is None: + sources_path = pm_dir / "sources.yaml" + + # Load GitHub sources config + sources = load_sources(sources_path) + + # Remember original account to restore after + original_account = get_current_gh_account() + + # Fetch from GitHub + all_issues = [] + all_prs = [] + accounts_queried = [] + + for source in sources: + account = source.get("account", "") + repos = source.get("repos", []) + if not account or not repos: + continue + + accounts_queried.append(account) + all_issues.extend(fetch_github_issues(account, repos)) + all_prs.extend(fetch_github_prs(account, repos)) + + # Restore original account + if original_account and accounts_queried: + run_gh(["auth", "switch", "--user", original_account]) + + # Load local overrides + local = [] + if pm_dir.exists(): + local = load_local_overrides(pm_dir) + + # Load roadmap goals + goals = extract_roadmap_goals(pm_dir) if pm_dir.exists() else [] + + # Aggregate and rank + all_candidates = all_issues + all_prs + local + top5, near_misses = aggregate_and_rank(all_issues, all_prs, local, goals) + summary = build_repo_summary(all_candidates) + + return { + "top5": top5, + "near_misses": near_misses, + "summary": summary, + "sources": { + "github_issues": len(all_issues), + "github_prs": len(all_prs), + "local_items": len(local), + "roadmap_goals": len(goals), + "accounts": accounts_queried, + }, + "total_candidates": len(all_candidates), + } + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Generate Top 5 priorities from GitHub + local state") + parser.add_argument( + "--project-root", type=Path, default=Path.cwd(), help="Project root directory" + ) + parser.add_argument( + "--sources", type=Path, default=None, help="Path to sources.yaml (default: .pm/sources.yaml)" + ) + + args = parser.parse_args() + + try: + result = generate_top5(args.project_root, args.sources) + print(json.dumps(result, indent=2)) + return 0 + except Exception as e: + print(json.dumps({"error": str(e)}), file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml new file mode 100644 index 000000000..1e281b6e1 --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-error-handling.yaml @@ -0,0 +1,88 @@ +# Outside-in test for /top5 error handling +# Validates generate_top5.py handles failure modes gracefully: +# invalid sources, missing gh CLI, malformed YAML. + +scenario: + name: "Top 5 Priorities - Error Handling" + description: | + Verifies that generate_top5.py degrades gracefully when GitHub is + unreachable, sources.yaml is malformed, or the project root is invalid. + The script should always return valid JSON, never crash. + type: cli + level: 2 + tags: [cli, error-handling, top5, pm-architect, resilience] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Test 1: Non-existent project root (no .pm/ dir) + - action: launch + target: "python" + args: + - ".claude/skills/pm-architect/scripts/generate_top5.py" + - "--project-root" + - "/tmp/top5-test-nonexistent-path-12345" + description: "Run with non-existent project root" + timeout: 15s + + - action: verify_output + contains: '"top5"' + description: "Still returns valid JSON with top5 key" + + - action: verify_output + contains: '"total_candidates": 0' + description: "Reports zero candidates when no sources available" + + - action: verify_exit_code + expected: 0 + description: "Exits cleanly even with missing project root" + + # Test 2: Malformed sources.yaml + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + echo "not: [valid: yaml: {{{{" > "$TMPDIR/.pm/sources.yaml" && + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with malformed sources.yaml" + timeout: 15s + + # Script should handle YAML parse errors (may return error JSON or empty results) + - action: verify_exit_code + expected_one_of: [0, 1] + description: "Exits with 0 or 1, never crashes with traceback" + + # Test 3: Empty sources.yaml (valid but no accounts) + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + echo "github: []" > "$TMPDIR/.pm/sources.yaml" && + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with empty sources list" + timeout: 15s + + - action: verify_output + contains: '"top5"' + description: "Returns valid JSON with empty top5" + + - action: verify_output + contains: '"total_candidates": 0' + description: "Reports zero candidates" + + - action: verify_exit_code + expected: 0 + description: "Exits cleanly with empty sources" + + cleanup: + - action: stop_application + force: true + description: "Ensure all processes are terminated" diff --git a/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml new file mode 100644 index 000000000..3d763c94b --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-local-overrides.yaml @@ -0,0 +1,97 @@ +# Outside-in test for /top5 local backlog integration +# Validates generate_top5.py incorporates .pm/backlog items and roadmap goals +# into the priority ranking alongside (or instead of) GitHub data. + +scenario: + name: "Top 5 Priorities - Local Overrides and Roadmap Alignment" + description: | + Verifies that generate_top5.py reads .pm/backlog/items.yaml for local + priorities and .pm/roadmap.md for strategic alignment scoring. + Tests the full aggregation pipeline without requiring GitHub access. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, local, roadmap] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Setup local .pm/ state with backlog items and roadmap + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm/backlog" && + cat > "$TMPDIR/.pm/backlog/items.yaml" << 'BACKLOG' + items: + - id: "local-001" + title: "Fix authentication timeout bug" + status: "READY" + priority: "HIGH" + estimated_hours: 1 + - id: "local-002" + title: "Add dashboard metrics" + status: "READY" + priority: "MEDIUM" + estimated_hours: 4 + - id: "local-003" + title: "Refactor logging module" + status: "IN_PROGRESS" + priority: "LOW" + estimated_hours: 8 + BACKLOG + cat > "$TMPDIR/.pm/roadmap.md" << 'ROADMAP' + ## Q1 Goals + ### Improve authentication reliability + - Fix timeout and retry logic + ### Add observability dashboard + - Metrics and monitoring + ROADMAP + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: [] + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with local backlog items and roadmap goals, no GitHub sources" + timeout: 15s + + # Verify local items appear in output + - action: verify_output + contains: "Fix authentication timeout bug" + timeout: 5s + description: "HIGH priority READY item appears in results" + + - action: verify_output + contains: "Add dashboard metrics" + description: "MEDIUM priority READY item appears in results" + + # IN_PROGRESS items should NOT appear (only READY items are loaded) + - action: verify_output + not_contains: "Refactor logging module" + description: "IN_PROGRESS item is excluded (only READY items loaded)" + + # Verify source attribution + - action: verify_output + contains: '"source": "local"' + description: "Items attributed to local source" + + # Verify roadmap goals were loaded + - action: verify_output + contains: '"roadmap_goals"' + description: "Roadmap goals count present in sources" + + # Verify alignment scoring (auth bug should align with roadmap goal) + - action: verify_output + matches: '"alignment":\\s*[0-9]' + description: "Items have alignment scores" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml new file mode 100644 index 000000000..4fac284f8 --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-ranking.yaml @@ -0,0 +1,104 @@ +# Outside-in test for /top5 ranking correctness +# Validates that output is strictly ranked by score descending, +# limited to 5 items, and each item has a rank field 1-5. + +scenario: + name: "Top 5 Priorities - Ranking and Limit Enforcement" + description: | + Verifies that generate_top5.py returns exactly TOP_N (5) items, + ranked by descending score, with rank fields 1 through 5. + Uses local backlog with >5 items to verify the limit. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, ranking] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Setup: 7 local items to verify only top 5 are returned + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm/backlog" && + cat > "$TMPDIR/.pm/backlog/items.yaml" << 'BACKLOG' + items: + - id: "item-1" + title: "Critical security fix" + status: "READY" + priority: "HIGH" + estimated_hours: 1 + - id: "item-2" + title: "API rate limiting" + status: "READY" + priority: "HIGH" + estimated_hours: 2 + - id: "item-3" + title: "Database migration" + status: "READY" + priority: "MEDIUM" + estimated_hours: 4 + - id: "item-4" + title: "Update documentation" + status: "READY" + priority: "MEDIUM" + estimated_hours: 6 + - id: "item-5" + title: "Add unit tests" + status: "READY" + priority: "MEDIUM" + estimated_hours: 3 + - id: "item-6" + title: "Refactor config loader" + status: "READY" + priority: "LOW" + estimated_hours: 8 + - id: "item-7" + title: "Add logging headers" + status: "READY" + priority: "LOW" + estimated_hours: 10 + BACKLOG + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: [] + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run with 7 local items to verify top-5 limit" + timeout: 15s + + # Verify rank fields 1-5 exist + - action: verify_output + contains: '"rank": 1' + timeout: 5s + description: "First ranked item present" + + - action: verify_output + contains: '"rank": 5' + description: "Fifth ranked item present" + + # Verify rank 6 and 7 are NOT in output (limit enforced) + - action: verify_output + not_contains: '"rank": 6' + description: "No sixth rank (limit to 5)" + + - action: verify_output + not_contains: '"rank": 7' + description: "No seventh rank (limit to 5)" + + # Verify total_candidates reflects all 7 items considered + - action: verify_output + contains: '"total_candidates": 7' + description: "Total candidates count includes all 7 items" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml new file mode 100644 index 000000000..d3d8ee24c --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-smoke.yaml @@ -0,0 +1,52 @@ +# Outside-in smoke test for /top5 priority aggregation +# Validates the generate_top5.py CLI produces valid JSON output +# with the expected structure from a user's perspective. + +scenario: + name: "Top 5 Priorities - Smoke Test" + description: | + Verifies that generate_top5.py runs successfully, produces valid JSON, + and contains the expected top-level keys (top5, sources, total_candidates). + Uses an empty project root so no GitHub calls are made. + type: cli + level: 1 + tags: [cli, smoke, top5, pm-architect] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + + steps: + # Run with empty project root (no .pm/ dir, no sources.yaml) + - action: launch + target: "python" + args: + - "scripts/generate_top5.py" + - "--project-root" + - "/tmp/top5-test-empty" + working_directory: ".claude/skills/pm-architect" + description: "Run generate_top5.py with empty project root" + timeout: 15s + + # Verify valid JSON output with expected keys + - action: verify_output + contains: '"top5"' + timeout: 5s + description: "Output contains top5 key" + + - action: verify_output + contains: '"sources"' + description: "Output contains sources key" + + - action: verify_output + contains: '"total_candidates"' + description: "Output contains total_candidates key" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly with code 0" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml new file mode 100644 index 000000000..a76e52d4a --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/tests/agentic/test-top5-with-sources.yaml @@ -0,0 +1,83 @@ +# Outside-in test for /top5 with configured sources +# Validates generate_top5.py queries GitHub when sources.yaml is provided, +# produces ranked output with score breakdown and source attribution. + +scenario: + name: "Top 5 Priorities - GitHub Source Aggregation" + description: | + Verifies that generate_top5.py correctly reads a sources.yaml config, + queries GitHub for issues and PRs, aggregates scores, and returns + a ranked list with proper source attribution and metadata. + Requires gh CLI authenticated with at least one account. + type: cli + level: 2 + tags: [cli, integration, top5, pm-architect, github] + + prerequisites: + - "Python 3.11+ is available" + - "PyYAML is installed" + - "gh CLI is authenticated" + - "Network access to GitHub API" + + environment: + variables: + GH_PAGER: "" + + steps: + # Setup: create a minimal sources.yaml pointing to a known public repo + - action: launch + target: "bash" + args: + - "-c" + - | + TMPDIR=$(mktemp -d) && + mkdir -p "$TMPDIR/.pm" && + cat > "$TMPDIR/.pm/sources.yaml" << 'SOURCES' + github: + - account: rysweet + repos: + - amplihack + SOURCES + python .claude/skills/pm-architect/scripts/generate_top5.py --project-root "$TMPDIR" --sources "$TMPDIR/.pm/sources.yaml" + description: "Run generate_top5.py with sources pointing to amplihack repo" + timeout: 45s + + # Verify JSON structure + - action: verify_output + contains: '"top5"' + timeout: 5s + description: "Output has top5 array" + + - action: verify_output + contains: '"github_issues"' + description: "Sources breakdown includes github_issues count" + + - action: verify_output + contains: '"github_prs"' + description: "Sources breakdown includes github_prs count" + + - action: verify_output + contains: '"accounts"' + description: "Sources breakdown includes accounts queried" + + # Verify ranked items have required fields + - action: verify_output + matches: '"score":\\s*[0-9]' + description: "Items have numeric scores" + + - action: verify_output + matches: '"source":\\s*"github_(issue|pr)"' + description: "Items have source attribution" + + - action: verify_output + matches: '"rationale":' + description: "Items include rationale text" + + - action: verify_exit_code + expected: 0 + description: "Script exits cleanly" + + cleanup: + - action: stop_application + force: true + description: "Ensure process is terminated" diff --git a/docs/claude/skills/pm-architect/scripts/tests/conftest.py b/docs/claude/skills/pm-architect/scripts/tests/conftest.py index 40af58e5a..448aa9983 100644 --- a/docs/claude/skills/pm-architect/scripts/tests/conftest.py +++ b/docs/claude/skills/pm-architect/scripts/tests/conftest.py @@ -5,6 +5,7 @@ from unittest.mock import MagicMock import pytest +import yaml @pytest.fixture @@ -131,3 +132,129 @@ def sample_daily_status_output() -> str: 1. Prioritize design review for API refactoring 2. Address technical debt in authentication system """ + + +# --- Top 5 Priority Aggregation Fixtures --- + + +@pytest.fixture +def pm_dir(tmp_path: Path) -> Path: + """Create .pm/ directory structure with sample data.""" + pm = tmp_path / ".pm" + (pm / "backlog").mkdir(parents=True) + (pm / "workstreams").mkdir(parents=True) + (pm / "delegations").mkdir(parents=True) + return pm + + +@pytest.fixture +def sample_backlog_items() -> dict: + """Sample backlog items YAML data.""" + return { + "items": [ + { + "id": "BL-001", + "title": "Fix authentication bug", + "description": "Auth tokens expire prematurely", + "priority": "HIGH", + "estimated_hours": 2, + "status": "READY", + "tags": ["auth", "bug"], + "dependencies": [], + }, + { + "id": "BL-002", + "title": "Implement config parser", + "description": "Parse YAML and JSON config files", + "priority": "MEDIUM", + "estimated_hours": 4, + "status": "READY", + "tags": ["config", "core"], + "dependencies": [], + }, + { + "id": "BL-003", + "title": "Add logging framework", + "description": "Structured logging with JSON output", + "priority": "LOW", + "estimated_hours": 8, + "status": "READY", + "tags": ["infrastructure"], + "dependencies": ["BL-002"], + }, + { + "id": "BL-004", + "title": "Write API documentation", + "description": "Document all REST endpoints", + "priority": "MEDIUM", + "estimated_hours": 3, + "status": "READY", + "tags": ["docs"], + "dependencies": [], + }, + { + "id": "BL-005", + "title": "Database migration tool", + "description": "Automated schema migrations", + "priority": "HIGH", + "estimated_hours": 6, + "status": "READY", + "tags": ["database", "core"], + "dependencies": ["BL-002"], + }, + { + "id": "BL-006", + "title": "Refactor test suite", + "description": "Improve test performance and coverage", + "priority": "MEDIUM", + "estimated_hours": 1, + "status": "IN_PROGRESS", + "tags": ["test"], + "dependencies": [], + }, + ] + } + + +@pytest.fixture +def populated_pm(pm_dir, sample_backlog_items): + """Create fully populated .pm/ directory.""" + with open(pm_dir / "backlog" / "items.yaml", "w") as f: + yaml.dump(sample_backlog_items, f) + + ws_data = { + "id": "ws-1", + "backlog_id": "BL-006", + "title": "Test Suite Refactor", + "agent": "builder", + "status": "RUNNING", + "last_activity": "2020-01-01T00:00:00Z", + } + with open(pm_dir / "workstreams" / "ws-1.yaml", "w") as f: + yaml.dump(ws_data, f) + + deleg_data = { + "id": "DEL-001", + "title": "Implement caching layer", + "status": "READY", + "backlog_id": "BL-002", + } + with open(pm_dir / "delegations" / "del-001.yaml", "w") as f: + yaml.dump(deleg_data, f) + + roadmap = """# Project Roadmap + +## Q1 Goals + +### Core Infrastructure +- Implement config parser +- Database migration tool +- Logging framework + +### Quality +- Test coverage above 80% +- API documentation complete +""" + (pm_dir / "roadmap.md").write_text(roadmap) + + return pm_dir diff --git a/docs/claude/skills/pm-architect/scripts/tests/test_generate_top5.py b/docs/claude/skills/pm-architect/scripts/tests/test_generate_top5.py new file mode 100644 index 000000000..8928e6a21 --- /dev/null +++ b/docs/claude/skills/pm-architect/scripts/tests/test_generate_top5.py @@ -0,0 +1,420 @@ +"""Tests for generate_top5.py - GitHub-native priority aggregation.""" + +import json +import sys +from pathlib import Path +from unittest.mock import patch + +import pytest +import yaml + +sys.path.insert(0, str(Path(__file__).parent.parent)) +from generate_top5 import ( + PRIORITY_LABELS, + aggregate_and_rank, + build_repo_summary, + extract_roadmap_goals, + fetch_github_issues, + fetch_github_prs, + generate_top5, + load_local_overrides, + load_sources, + score_roadmap_alignment, + suggest_action, +) + + +class TestLoadSources: + """Tests for sources.yaml loading.""" + + def test_no_sources_file(self, project_root): + """Returns empty list when sources.yaml doesn't exist.""" + result = load_sources(project_root / "sources.yaml") + assert result == [] + + def test_loads_github_sources(self, tmp_path): + """Parses sources.yaml correctly.""" + sources = { + "github": [ + {"account": "rysweet", "repos": ["amplihack", "azlin"]}, + {"account": "rysweet_microsoft", "repos": ["cloud-ecosystem-security/SedanDelivery"]}, + ] + } + path = tmp_path / "sources.yaml" + with open(path, "w") as f: + yaml.dump(sources, f) + + result = load_sources(path) + assert len(result) == 2 + assert result[0]["account"] == "rysweet" + assert result[1]["repos"] == ["cloud-ecosystem-security/SedanDelivery"] + + +class TestFetchGithubIssues: + """Tests for GitHub issue fetching (mocked).""" + + def test_returns_empty_on_gh_failure(self): + """Returns empty list when gh CLI fails.""" + with patch("generate_top5.run_gh", return_value=None): + result = fetch_github_issues("rysweet", ["amplihack"]) + assert result == [] + + def test_parses_issue_data(self): + """Correctly parses gh API JSON output with full metadata.""" + mock_output = json.dumps({ + "repo": "rysweet/amplihack", + "title": "Fix auth bug", + "labels": ["bug", "high"], + "created": "2026-03-01T00:00:00Z", + "updated": "2026-03-07T00:00:00Z", + "number": 123, + "comments": 5, + }) + with patch("generate_top5.run_gh", return_value=mock_output): + result = fetch_github_issues("rysweet", ["amplihack"]) + assert len(result) == 1 + item = result[0] + assert item["source"] == "github_issue" + assert item["priority"] == "HIGH" + assert item["url"] == "https://github.com/rysweet/amplihack/issues/123" + assert item["account"] == "rysweet" + assert item["labels"] == ["bug", "high"] + assert item["comments"] == 5 + assert "score_breakdown" in item + assert "label_priority" in item["score_breakdown"] + + def test_priority_from_labels(self): + """Labels correctly map to priority scores.""" + mock_output = json.dumps({ + "repo": "r/a", "title": "Critical issue", + "labels": ["critical"], "created": "2026-03-07T00:00:00Z", + "updated": "2026-03-07T00:00:00Z", "number": 1, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=mock_output): + result = fetch_github_issues("r", ["a"]) + assert result[0]["priority"] == "HIGH" + assert result[0]["score_breakdown"]["label_priority"] == 1.0 + + def test_staleness_boosts_score(self): + """Older issues score higher due to staleness.""" + fresh = json.dumps({ + "repo": "r/a", "title": "Fresh", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 1, "comments": 0, + }) + stale = json.dumps({ + "repo": "r/a", "title": "Stale", "labels": [], + "created": "2026-01-01T00:00:00Z", "updated": "2026-01-01T00:00:00Z", + "number": 2, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=f"{fresh}\n{stale}"): + result = fetch_github_issues("r", ["a"]) + stale_item = next(c for c in result if "Stale" in c["title"]) + fresh_item = next(c for c in result if "Fresh" in c["title"]) + assert stale_item["raw_score"] > fresh_item["raw_score"] + assert stale_item["days_stale"] > fresh_item["days_stale"] + + +class TestFetchGithubPrs: + """Tests for GitHub PR fetching (mocked).""" + + def test_returns_empty_on_failure(self): + """Returns empty list when gh CLI fails.""" + with patch("generate_top5.run_gh", return_value=None): + result = fetch_github_prs("rysweet", ["amplihack"]) + assert result == [] + + def test_draft_pr_scores_lower(self): + """Draft PRs score lower than non-drafts.""" + draft = json.dumps({ + "repo": "r/a", "title": "Draft PR", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 1, "draft": True, "comments": 0, + }) + ready = json.dumps({ + "repo": "r/a", "title": "Ready PR", "labels": [], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 2, "draft": False, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=f"{draft}\n{ready}"): + result = fetch_github_prs("r", ["a"]) + draft_item = next(c for c in result if "Draft" in c["title"]) + ready_item = next(c for c in result if "Ready" in c["title"]) + assert ready_item["raw_score"] > draft_item["raw_score"] + assert draft_item["is_draft"] is True + assert ready_item["is_draft"] is False + + def test_pr_has_url_and_metadata(self): + """PRs include correct GitHub URL and metadata.""" + mock = json.dumps({ + "repo": "rysweet/amplihack", "title": "Fix stuff", "labels": ["bug"], + "created": "2026-03-07T00:00:00Z", "updated": "2026-03-07T00:00:00Z", + "number": 42, "draft": False, "comments": 0, + }) + with patch("generate_top5.run_gh", return_value=mock): + result = fetch_github_prs("rysweet", ["amplihack"]) + assert result[0]["url"] == "https://github.com/rysweet/amplihack/pull/42" + assert result[0]["account"] == "rysweet" + assert result[0]["labels"] == ["bug"] + + +class TestLoadLocalOverrides: + """Tests for local .pm/ backlog loading.""" + + def test_no_pm_dir(self, project_root): + """Returns empty when .pm doesn't exist.""" + result = load_local_overrides(project_root / ".pm") + assert result == [] + + def test_loads_ready_items(self, pm_dir): + """Loads READY items from backlog.""" + items = { + "items": [ + {"id": "BL-001", "title": "Task A", "status": "READY", "priority": "HIGH", "estimated_hours": 1}, + {"id": "BL-002", "title": "Task B", "status": "DONE", "priority": "HIGH"}, + ] + } + with open(pm_dir / "backlog" / "items.yaml", "w") as f: + yaml.dump(items, f) + + result = load_local_overrides(pm_dir) + assert len(result) == 1 + assert result[0]["source"] == "local" + assert result[0]["item_id"] == "BL-001" + + +class TestExtractRoadmapGoals: + """Tests for roadmap goal extraction.""" + + def test_no_roadmap(self, project_root): + """Returns empty when no roadmap exists.""" + result = extract_roadmap_goals(project_root / ".pm") + assert result == [] + + def test_extracts_goals(self, populated_pm): + """Extracts goals from roadmap markdown.""" + goals = extract_roadmap_goals(populated_pm) + assert len(goals) > 0 + assert any("config" in g.lower() for g in goals) + + +class TestScoreRoadmapAlignment: + """Tests for roadmap alignment scoring.""" + + def test_no_goals_returns_neutral(self): + assert score_roadmap_alignment({"title": "X", "source": "github_issue"}, []) == 0.5 + + def test_matching_title_scores_high(self): + score = score_roadmap_alignment( + {"title": "Implement config parser", "source": "github_issue"}, + ["config parser implementation"], + ) + assert score > 0.0 + + def test_unrelated_title_scores_zero(self): + score = score_roadmap_alignment( + {"title": "Fix authentication bug", "source": "github_issue"}, + ["database migration tool"], + ) + assert score == 0.0 + + +class TestSuggestAction: + """Tests for action suggestion logic.""" + + def test_critical_issue(self): + action = suggest_action({"source": "github_issue", "labels": ["critical"]}) + assert "immediately" in action.lower() + + def test_bug_issue(self): + action = suggest_action({"source": "github_issue", "labels": ["bug"], "days_stale": 1}) + assert "bug" in action.lower() + + def test_stale_pr(self): + action = suggest_action({"source": "github_pr", "is_draft": False, "days_stale": 20}) + assert "stale" in action.lower() or "merge" in action.lower() + + def test_draft_pr(self): + action = suggest_action({"source": "github_pr", "is_draft": True, "days_stale": 1}) + assert "draft" in action.lower() + + def test_local_item(self): + action = suggest_action({"source": "local"}) + assert "backlog" in action.lower() + + +class TestAggregateAndRank: + """Tests for the core aggregation and ranking logic.""" + + def test_empty_input(self): + top, near = aggregate_and_rank([], [], [], []) + assert top == [] + assert near == [] + + def test_returns_max_5(self): + candidates = [ + {"title": f"Item {i}", "source": "github_issue", "raw_score": float(100 - i), + "rationale": "test", "item_id": f"#{i}", "priority": "MEDIUM"} + for i in range(10) + ] + top, near = aggregate_and_rank(candidates, [], [], []) + assert len(top) == 5 + assert len(near) == 5 + + def test_ranked_in_order(self): + issues = [ + {"title": "Low", "source": "github_issue", "raw_score": 30.0, + "rationale": "test", "item_id": "#1", "priority": "LOW"}, + {"title": "High", "source": "github_issue", "raw_score": 90.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH"}, + ] + top, _ = aggregate_and_rank(issues, [], [], []) + assert top[0]["title"] == "High" + assert top[1]["title"] == "Low" + + def test_mixed_sources(self): + issues = [{"title": "Issue", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH"}] + prs = [{"title": "PR", "source": "github_pr", "raw_score": 80.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH", + "url": "https://github.com/r/a/pull/2", "repo": "r/a"}] + local = [{"title": "Local", "source": "local", "raw_score": 80.0, + "rationale": "test", "item_id": "BL-1", "priority": "MEDIUM"}] + top, _ = aggregate_and_rank(issues, prs, local, []) + assert top[0]["source"] == "github_issue" + assert top[1]["source"] == "github_pr" + assert top[2]["source"] == "local" + + def test_roadmap_alignment_boosts_score(self): + issues = [ + {"title": "Implement config parser", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#1", "priority": "MEDIUM"}, + {"title": "Fix random thing", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#2", "priority": "MEDIUM"}, + ] + top, _ = aggregate_and_rank(issues, [], [], ["config parser implementation"]) + config_item = next(r for r in top if "config" in r["title"].lower()) + other_item = next(r for r in top if "random" in r["title"].lower()) + assert config_item["score"] > other_item["score"] + + def test_tiebreak_by_priority(self): + issues = [ + {"title": "Low priority", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#1", "priority": "LOW"}, + {"title": "High priority", "source": "github_issue", "raw_score": 50.0, + "rationale": "test", "item_id": "#2", "priority": "HIGH"}, + ] + top, _ = aggregate_and_rank(issues, [], [], []) + assert top[0]["priority"] == "HIGH" + assert top[1]["priority"] == "LOW" + + def test_preserves_url_and_repo(self): + prs = [{"title": "PR", "source": "github_pr", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "MEDIUM", + "url": "https://github.com/r/a/pull/1", "repo": "r/a"}] + top, _ = aggregate_and_rank([], prs, [], []) + assert top[0]["url"] == "https://github.com/r/a/pull/1" + assert top[0]["repo"] == "r/a" + + def test_items_have_action(self): + issues = [{"title": "Bug", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH", + "labels": ["bug"], "days_stale": 5}] + top, _ = aggregate_and_rank(issues, [], [], []) + assert "action" in top[0] + assert len(top[0]["action"]) > 0 + + def test_near_misses_returned(self): + candidates = [ + {"title": f"Item {i}", "source": "github_issue", "raw_score": float(100 - i), + "rationale": "test", "item_id": f"#{i}", "priority": "MEDIUM"} + for i in range(8) + ] + top, near = aggregate_and_rank(candidates, [], [], []) + assert len(top) == 5 + assert len(near) == 3 + assert near[0]["rank"] == 6 + + +class TestBuildRepoSummary: + """Tests for per-repo summary generation.""" + + def test_empty_candidates(self): + result = build_repo_summary([]) + assert result["by_repo"] == {} + assert result["by_account"] == {} + + def test_counts_by_repo(self): + candidates = [ + {"source": "github_issue", "repo": "r/a", "account": "x", "priority": "HIGH"}, + {"source": "github_issue", "repo": "r/a", "account": "x", "priority": "MEDIUM"}, + {"source": "github_pr", "repo": "r/a", "account": "x", "priority": "MEDIUM"}, + {"source": "github_issue", "repo": "r/b", "account": "x", "priority": "HIGH"}, + ] + result = build_repo_summary(candidates) + assert result["by_repo"]["r/a"]["issues"] == 2 + assert result["by_repo"]["r/a"]["prs"] == 1 + assert result["by_repo"]["r/a"]["high_priority"] == 1 + assert result["by_repo"]["r/b"]["issues"] == 1 + + def test_counts_by_account(self): + candidates = [ + {"source": "github_issue", "repo": "r/a", "account": "alice", "priority": "HIGH"}, + {"source": "github_pr", "repo": "r/b", "account": "bob", "priority": "MEDIUM"}, + ] + result = build_repo_summary(candidates) + assert result["by_account"]["alice"]["issues"] == 1 + assert result["by_account"]["bob"]["prs"] == 1 + assert "r/a" in result["by_account"]["alice"]["repos"] + + +class TestGenerateTop5: + """Tests for the main generate_top5 function.""" + + def test_no_sources_no_pm(self, project_root): + with patch("generate_top5.get_current_gh_account", return_value="rysweet"): + result = generate_top5(project_root) + assert result["top5"] == [] + assert result["near_misses"] == [] + assert result["total_candidates"] == 0 + + def test_github_failure_falls_back_to_local(self, populated_pm): + sources = {"github": [{"account": "test", "repos": ["test/repo"]}]} + sources_path = populated_pm / "sources.yaml" + with open(sources_path, "w") as f: + yaml.dump(sources, f) + + with patch("generate_top5.run_gh", return_value=None), \ + patch("generate_top5.get_current_gh_account", return_value="test"): + result = generate_top5(populated_pm.parent, sources_path) + assert result["sources"]["local_items"] > 0 + assert result["sources"]["github_issues"] == 0 + + def test_output_has_summary(self): + with patch("generate_top5.get_current_gh_account", return_value="test"), \ + patch("generate_top5.load_sources", return_value=[]): + result = generate_top5(Path("/nonexistent")) + assert "summary" in result + assert "near_misses" in result + + def test_items_have_required_fields(self): + issues = [{"title": "Test", "source": "github_issue", "raw_score": 80.0, + "rationale": "test", "item_id": "#1", "priority": "HIGH", + "url": "https://github.com/r/a/issues/1", "repo": "r/a"}] + top, _ = aggregate_and_rank(issues, [], [], []) + required = {"rank", "title", "source", "score", "rationale", "priority", "action", "alignment"} + for item in top: + assert required.issubset(item.keys()), f"Missing: {required - item.keys()}" + + +class TestPriorityLabels: + """Tests for label-to-priority mapping.""" + + def test_critical_is_highest(self): + assert PRIORITY_LABELS["critical"] == 1.0 + + def test_bug_is_high(self): + assert PRIORITY_LABELS["bug"] == 0.8 + + def test_enhancement_is_medium(self): + assert PRIORITY_LABELS["enhancement"] == 0.5 diff --git a/pyproject.toml b/pyproject.toml index 2f85e6867..62668afe7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ backend-path = ["."] [project] name = "amplihack" -version = "0.5.119" +version = "0.5.120" description = "Amplifier bundle for agentic coding with comprehensive skills, recipes, and workflows" requires-python = ">=3.11" dependencies = [