Skip to content

Commit bea7976

Browse files
committed
refactor(linear): reorganize Linear metrics into separate modules
This commit refactors the Linear metrics functionality into separate modules for better organization and maintainability. The changes include: 1. Moving Linear-related code to a new 'linear' directory 2. Splitting metrics calculation and display into separate files 3. Creating a new metrics model for Linear data 4. Updating imports in the review command to use the new structure 5. Adding the new 'linear/__pycache__' directory to .gitignore 6. Incrementing the package version to 0.1.23 These changes improve code organization, separation of concerns, and make the Linear metrics functionality more modular and easier to maintain.
1 parent 818e822 commit bea7976

11 files changed

Lines changed: 926 additions & 461 deletions

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,4 @@ config.py
77
wellcode_cli.egg-info
88
wellcode_cli/github/__pycache__
99
wellcode_cli/commands/__pycache__
10+
wellcode_cli/linear/__pycache__

wellcode_cli/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.1.22"
1+
__version__ = "0.1.23"

wellcode_cli/commands/review.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
from ..github.github_metrics import get_github_metrics
99
from ..github.github_display import display_github_metrics
1010
from ..github.github_format_ai import format_ai_response, get_ai_analysis
11-
from ..linear_metrics import get_linear_metrics, display_linear_metrics
11+
from ..linear.linear_metrics import get_linear_metrics
12+
from ..linear.linear_display import display_linear_metrics
1213
from ..split_metrics import get_split_metrics, display_split_metrics
1314
from ..utils import save_analysis_data
1415
from .config import load_config,config
@@ -94,8 +95,7 @@ def review(start_date, end_date, user, team):
9495
if config_data.get('ANTHROPIC_API_KEY'):
9596
try:
9697
status.update("Generating AI analysis...")
97-
analysis_result = get_ai_analysis(all_metrics)
98-
console.print("\n[bold cyan]AI Analysis:[/]")
98+
analysis_result = get_ai_analysis(all_metrics)
9999
format_ai_response(analysis_result)
100100
except InternalServerError as e:
101101
if "overloaded_error" in str(e):

wellcode_cli/github/github_format_ai.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@ def format_ai_response(response):
2626
if not sections:
2727
sections = [('general', para.strip()) for para in analysis_content.split('\n') if para.strip()]
2828

29-
# Create a panel for the entire analysis
30-
console.print("\n[bold green]AI Analysis[/]")
3129

3230
for section, content in sections:
3331
# Convert section name to title case and replace underscores with spaces
@@ -85,7 +83,6 @@ def get_ai_analysis(all_metrics):
8583
# GitHub metrics
8684
if 'github' in all_metrics:
8785
github_data = all_metrics['github']
88-
print("GitHub data type:", type(github_data))
8986
metrics_json = json.dumps(github_data, cls=MetricsJSONEncoder, indent=2, default=str)
9087
metrics_summary = {'github': json.loads(metrics_json)}
9188

wellcode_cli/github/github_metrics.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,23 +135,36 @@ def process_repository(repo, org_metrics: OrganizationMetrics, start_date, end_d
135135
"""Process a single repository's metrics"""
136136
repo_metrics = org_metrics.get_or_create_repository(repo.name, repo.default_branch)
137137

138+
# Initialize last_updated to start_date
139+
repo_metrics.last_updated = start_date
140+
138141
pulls = repo.get_pulls(state='all')
139142
for pr in pulls:
140143
try:
141144
pr_created = ensure_datetime(pr.created_at)
142145
if not (start_date <= pr_created <= end_date):
143146
continue
144147

148+
# Update repository timestamp with PR creation date
149+
if pr_created > repo_metrics.last_updated:
150+
repo_metrics.last_updated = pr_created
151+
152+
# If PR is merged, also check merge date
153+
if pr.merged:
154+
merge_date = ensure_datetime(pr.merged_at)
155+
if merge_date > repo_metrics.last_updated and merge_date <= end_date:
156+
repo_metrics.last_updated = merge_date
157+
145158
process_pr(pr, repo_metrics, org_metrics)
146159
except Exception as e:
147160
logging.warning(f"Error processing PR {pr.number}: {str(e)}")
148161
continue
149-
150-
# Update repository timestamp after processing
151-
repo_metrics.update_timestamp()
152162

153163
def process_pr(pr, repo_metrics: RepositoryMetrics, org_metrics: OrganizationMetrics):
154164
"""Process a single pull request with complete metrics tracking"""
165+
pr_timestamp = ensure_datetime(pr.created_at)
166+
repo_metrics.update_timestamp(pr_timestamp)
167+
155168
try:
156169
# Add PR context
157170
logging.debug(f"Processing PR #{pr.number} - '{pr.title}' by {pr.user.login if pr.user else 'unknown'}")
@@ -319,6 +332,9 @@ def process_pr(pr, repo_metrics: RepositoryMetrics, org_metrics: OrganizationMet
319332
if reviewer_team:
320333
repo_metrics.teams_involved.add(reviewer_team)
321334

335+
merge_timestamp = ensure_datetime(pr.merged_at)
336+
repo_metrics.update_timestamp(merge_timestamp)
337+
322338
except Exception as e:
323339
logging.error(f"Error processing PR {pr.number}: {str(e)}", exc_info=True)
324340

wellcode_cli/github/models/metrics.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from dataclasses import dataclass, field
2-
from typing import Dict, List, Set
2+
from typing import Dict, List, Set, Optional
33
from collections import defaultdict
44
from datetime import datetime, timezone
55
import statistics
@@ -303,26 +303,31 @@ class UserMetrics(BaseMetrics):
303303
class RepositoryMetrics(BaseMetrics):
304304
name: str
305305
default_branch: str = "main"
306-
prs_created: int = 0
307-
prs_merged: int = 0
308-
prs_merged_to_main: int = 0
309-
review_metrics: ReviewMetrics = field(default_factory=ReviewMetrics)
310306
code_metrics: CodeMetrics = field(default_factory=CodeMetrics)
307+
review_metrics: ReviewMetrics = field(default_factory=ReviewMetrics)
311308
time_metrics: TimeMetrics = field(default_factory=TimeMetrics)
312309
collaboration_metrics: CollaborationMetrics = field(default_factory=CollaborationMetrics)
313310
bottleneck_metrics: BottleneckMetrics = field(default_factory=BottleneckMetrics)
314311
contributors: Set[str] = field(default_factory=set)
315312
teams_involved: Set[str] = field(default_factory=set)
316-
last_updated: datetime = field(default_factory=datetime.now)
313+
prs_created: int = 0
314+
prs_merged: int = 0
315+
prs_merged_to_main: int = 0
316+
last_updated: Optional[datetime] = None
317317

318318
def update_teams(self, author_team: str, reviewer_team: str):
319319
if author_team:
320320
self.teams_involved.add(author_team)
321321
if reviewer_team:
322322
self.teams_involved.add(reviewer_team)
323323

324-
def update_timestamp(self):
325-
self.last_updated = datetime.now(timezone.utc)
324+
def update_timestamp(self, timestamp: Optional[datetime] = None):
325+
"""Update the last activity timestamp"""
326+
if timestamp is None:
327+
timestamp = datetime.now(timezone.utc)
328+
329+
if self.last_updated is None or timestamp > self.last_updated:
330+
self.last_updated = timestamp
326331

327332
@dataclass
328333
class OrganizationMetrics(BaseMetrics):
Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
from rich.console import Console
2+
from rich.panel import Panel
3+
from rich.box import ROUNDED
4+
from datetime import datetime, timezone
5+
import statistics
6+
7+
console = Console()
8+
9+
def format_time(hours: float) -> str:
10+
"""Convert hours to a human-readable format"""
11+
if hours < 1:
12+
return f"{int(hours * 60)} minutes"
13+
elif hours < 24:
14+
return f"{round(hours, 1)} hours"
15+
else:
16+
days = hours / 24
17+
return f"{round(days, 1)} days"
18+
19+
def display_linear_metrics(org_metrics):
20+
"""Display Linear metrics with a modern UI using Rich components."""
21+
# Header with organization info and time range
22+
now = datetime.now(timezone.utc)
23+
console.print(Panel(
24+
"[bold cyan]Linear Engineering Analytics[/]\n" +
25+
f"[dim]Organization: {org_metrics.name}[/]\n" +
26+
f"[dim]Report Generated: {now.strftime('%Y-%m-%d %H:%M')} UTC[/]",
27+
box=ROUNDED,
28+
style="cyan"
29+
))
30+
31+
# 1. Core Issue Metrics with health indicators
32+
total_issues = org_metrics.issues.total_created
33+
completed_issues = org_metrics.issues.total_completed
34+
completion_rate = (completed_issues / total_issues * 100) if total_issues > 0 else 0
35+
36+
health_indicator = "🟢" if completion_rate > 80 else "🟡" if completion_rate > 60 else "🔴"
37+
38+
console.print(Panel(
39+
f"{health_indicator} [bold green]Issues Created:[/] {total_issues}\n" +
40+
f"[bold yellow]Issues Completed:[/] {completed_issues} ({completion_rate:.1f}% completion rate)\n" +
41+
f"[bold red]Bugs Created:[/] {org_metrics.issues.bugs_created}\n" +
42+
f"[bold blue]Features Created:[/] {org_metrics.issues.features_created}",
43+
title="[bold]Issue Flow",
44+
box=ROUNDED
45+
))
46+
47+
# 2. Time Metrics with visual indicators
48+
cycle = org_metrics.cycle_time
49+
avg_cycle_time = statistics.mean(cycle.cycle_times) if cycle.cycle_times else 0
50+
cycle_health = "🟢" if avg_cycle_time < 24 else "🟡" if avg_cycle_time < 72 else "🔴"
51+
52+
console.print(Panel(
53+
f"{cycle_health} [bold]Cycle Time:[/] {format_time(avg_cycle_time)}\n" +
54+
f"[bold]Time to Start:[/] {format_time(statistics.mean(cycle.time_to_start) if cycle.time_to_start else 0)}\n" +
55+
f"[bold]Time in Progress:[/] {format_time(statistics.mean(cycle.time_in_progress) if cycle.time_in_progress else 0)}",
56+
title="[bold blue]Time Metrics",
57+
box=ROUNDED
58+
))
59+
60+
# 3. Estimation Accuracy
61+
est = org_metrics.estimation
62+
if est.total_estimated > 0:
63+
accuracy_rate = (est.accurate_estimates / est.total_estimated * 100)
64+
accuracy_health = "🟢" if accuracy_rate > 80 else "🟡" if accuracy_rate > 60 else "🔴"
65+
66+
console.print(Panel(
67+
f"{accuracy_health} [bold]Estimation Accuracy:[/] {accuracy_rate:.1f}%\n" +
68+
f"[bold green]Accurate Estimates:[/] {est.accurate_estimates}\n" +
69+
f"[bold red]Underestimates:[/] {est.underestimates}\n" +
70+
f"[bold yellow]Overestimates:[/] {est.overestimates}\n" +
71+
f"[bold]Average Variance:[/] {statistics.mean(est.estimation_variance) if est.estimation_variance else 0:.1f} hours",
72+
title="[bold yellow]Estimation Health",
73+
box=ROUNDED
74+
))
75+
76+
# 4. Team Performance
77+
if org_metrics.teams:
78+
team_panels = []
79+
for team_name, team in org_metrics.teams.items():
80+
completion_rate = (team.issues_completed / team.issues_created * 100) if team.issues_created > 0 else 0
81+
team_health = "🟢" if completion_rate > 80 else "🟡" if completion_rate > 60 else "🔴"
82+
83+
team_panels.append(
84+
f"{team_health} [bold cyan]{team_name}[/]\n" +
85+
f"Issues: {team.issues_created} created, {team.issues_completed} completed ({completion_rate:.1f}%)\n" +
86+
f"Cycle Time: {format_time(team.avg_cycle_time)}\n" +
87+
f"Estimation Accuracy: {team.estimation_accuracy:.1f}%"
88+
)
89+
90+
console.print(Panel(
91+
"\n\n".join(team_panels),
92+
title="[bold green]Team Performance",
93+
box=ROUNDED
94+
))
95+
96+
# 5. Project Health
97+
if org_metrics.projects:
98+
project_panels = []
99+
for project_key, project in org_metrics.projects.items():
100+
progress_indicator = "🟢" if project.progress >= 80 else "🟡" if project.progress >= 50 else "🔴"
101+
102+
# Calculate project-specific metrics
103+
completion_rate = (project.completed_issues / project.total_issues * 100) if project.total_issues > 0 else 0
104+
105+
project_panels.append(
106+
f"{progress_indicator} [bold cyan]{project.name}[/]\n" +
107+
f"Progress: {project.progress:.1f}%\n" +
108+
f"Issues: {project.total_issues} total, {project.completed_issues} completed ({completion_rate:.1f}%)\n" +
109+
f"Bugs: {project.bugs_count} | Features: {project.features_count}\n" +
110+
f"Teams Involved: {len(project.teams_involved)}"
111+
)
112+
113+
console.print(Panel(
114+
"\n\n".join(project_panels),
115+
title="[bold magenta]Project Health",
116+
box=ROUNDED
117+
))
118+
119+
# 6. Label Distribution
120+
display_label_summary(org_metrics.label_counts)
121+
122+
def display_label_summary(label_counts):
123+
"""Display a visual summary of issue labels."""
124+
if not label_counts:
125+
return
126+
127+
# Sort labels by count in descending order
128+
sorted_labels = sorted(label_counts.items(), key=lambda x: x[1], reverse=True)
129+
130+
# Calculate the maximum count for scaling
131+
max_count = max(count for _, count in sorted_labels)
132+
max_bar_length = 40 # Maximum length of the bar in characters
133+
134+
# Create the label summary
135+
label_lines = []
136+
for label, count in sorted_labels:
137+
# Calculate bar length proportional to count
138+
bar_length = int((count / max_count) * max_bar_length)
139+
bar = "█" * bar_length
140+
141+
# Choose color based on label name (you can customize this)
142+
color = "green" if "feature" in label.lower() else \
143+
"red" if "bug" in label.lower() else \
144+
"yellow" if "improvement" in label.lower() else \
145+
"blue"
146+
147+
label_lines.append(
148+
f"[{color}]{label:<20}[/] {bar} ({count})"
149+
)
150+
151+
console.print(Panel(
152+
"\n".join(label_lines),
153+
title="[bold cyan]Label Distribution",
154+
box=ROUNDED
155+
))

0 commit comments

Comments
 (0)