Skip to content

Commit

Permalink
Adds mentor counting to json output and adds missing config.
Browse files Browse the repository at this point in the history
This adds mentor counting output to json format. In addition this change makes
max number of comments to evaluate configurable as well as the cutoff for
heavily involved mentors.
  • Loading branch information
MaineC committed Mar 28, 2024
1 parent e5f7987 commit 9133bca
Show file tree
Hide file tree
Showing 7 changed files with 62 additions and 14 deletions.
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,9 @@ Below are the allowed configuration options:
| `IGNORE_USERS` | False | | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) |
| `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors |
| `MIN_MENTOR_COMMENTS` | False | 10 | Minimum number of comments to count as a mentor |
| `MAX_COMMENTS_EVAL` | False | 20 | Maximum number of comments per thread to evaluate for mentor stats |
| `HEAVILY_INVOLVED_CUTOFF` | False | 3 | Cutoff after which a mentor's comments in one issue are no longer counted against their total score |

## Further Documentation

- [Example workflows](./docs/example-workflows.md)
Expand Down
14 changes: 12 additions & 2 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,16 @@ class EnvVars:
hide_label_metrics (str): If set, the label metrics are hidden in the output
enable_mentor_count (str): If set to TRUE, compute number of mentors
min_mentor_comments (str): If set, defines the minimum number of comments for mentors
max_comments_eval (str): If set, defines the maximum number of comments to look at for mentor evaluation
heavily_involved_cutoff (str): If set, defines the cutoff after which heavily involved commentors in
one thread are no longer counted.
"""
def __init__(self, search_query: str, gh_token: str, labels_to_measure: List[str], ignore_user: List[str],
github_server_url: str, hide_author: str, hide_time_to_first_response: str,
hide_time_to_close: str, hide_time_to_answer: str,
hide_label_metrics: str, enable_mentor_count: str,
min_mentor_comments: str):
min_mentor_comments: str, max_comments_eval: str,
heavily_involved_cutoff: str):
self.search_query = search_query
self.gh_token = gh_token
self.labels_to_measure = labels_to_measure
Expand All @@ -48,6 +52,8 @@ def __init__(self, search_query: str, gh_token: str, labels_to_measure: List[str
self.hide_label_metrics = hide_label_metrics
self.enable_mentor_count = enable_mentor_count
self.min_mentor_comments = min_mentor_comments
self.max_comments_eval = max_comments_eval
self.heavily_involved_cutoff = heavily_involved_cutoff


def get_env_vars() -> EnvVars:
Expand Down Expand Up @@ -86,6 +92,8 @@ def get_env_vars() -> EnvVars:
hide_label_metrics = os.getenv("HIDE_LABEL_METRICS")
enable_mentor_count = os.getenv("ENABLE_MENTOR_COUNT", "FALSE")
min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10")
max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20")
heavily_involved_cutoff = os.getenv("HEAVILY_INVOLVED_CUTOFF", "3")

return EnvVars(
search_query,
Expand All @@ -99,5 +107,7 @@ def get_env_vars() -> EnvVars:
hide_time_to_answer,
hide_label_metrics,
enable_mentor_count,
min_mentor_comments
min_mentor_comments,
max_comments_eval,
heavily_involved_cutoff
)
18 changes: 12 additions & 6 deletions issue_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ def get_per_issue_metrics(
discussions: bool = False,
labels: Union[List[str], None] = None,
ignore_users: List[str] = None,
max_comments_to_eval: int = 20,
heavily_involved: int = 3
) -> tuple[List, int, int]:
"""
Calculate the metrics for each issue/pr/discussion in a list provided.
Expand Down Expand Up @@ -158,8 +160,8 @@ def get_per_issue_metrics(
None, issue, ignore_users
)
issue_with_metrics.mentor_activity = count_comments_per_user(
None, issue, ignore_users
# TODO review arguments max_comments_to_eval, heavily_involved
None, issue, ignore_users, None, None,
max_comments_to_eval, heavily_involved
)
issue_with_metrics.time_to_answer = measure_time_to_answer(issue)
if issue["closedAt"]:
Expand Down Expand Up @@ -188,9 +190,9 @@ def get_per_issue_metrics(
issue, None, pull_request, ready_for_review_at, ignore_users
)
issue_with_metrics.mentor_activity = count_comments_per_user(
issue, None, pull_request, ready_for_review_at, ignore_users
issue, None, pull_request, ready_for_review_at, ignore_users,
max_comments_to_eval, heavily_involved
)
# TODO review arguments max_comments_to_eval, heavily_involved
if labels:
issue_with_metrics.label_metrics = get_label_metrics(issue, labels)
if issue.state == "closed": # type: ignore
Expand Down Expand Up @@ -256,9 +258,11 @@ def main():
search_query = env_vars.search_query
token = env_vars.gh_token
ignore_users = env_vars.ignore_users
enable_mentor_count = env_vars.enable_mentor_count

enable_mentor_count = env_vars.enable_mentor_count
min_mentor_count = int(env_vars.min_mentor_comments)
max_comments_eval = int(env_vars.max_comments_eval)
heavily_involved_cutoff = int(env_vars.heavily_involved_cutoff)

# Get the repository owner and name from the search query
owner = get_owner(search_query)
Expand Down Expand Up @@ -298,6 +302,8 @@ def main():
discussions="type:discussions" in search_query,
labels=labels,
ignore_users=ignore_users,
max_comments_to_eval=max_comments_eval,
heavily_involved=heavily_involved_cutoff,
)

stats_time_to_first_response = get_stats_time_to_first_response(
Expand All @@ -311,7 +317,7 @@ def main():

num_mentor_count = 0
if enable_mentor_count == "TRUE":
num_mentor_count = get_mentor_count(issues_with_metrics, min_mentor_comments)
num_mentor_count = get_mentor_count(issues_with_metrics, min_mentor_count)

# Get stats describing the time in label for each label and store it in a dictionary
# where the key is the label and the value is the average time
Expand Down
3 changes: 3 additions & 0 deletions json_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def write_to_json(
stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None],
num_issues_opened: Union[int, None],
num_issues_closed: Union[int, None],
num_mentor_count: Union[int, None],
search_query: str,
) -> str:
"""
Expand All @@ -43,6 +44,7 @@ def write_to_json(
"average_time_to_answer": "1 day, 0:00:00",
"num_items_opened": 2,
"num_items_closed": 1,
"num_mentor_count": 5,
"total_item_count": 2,
"issues": [
{
Expand Down Expand Up @@ -129,6 +131,7 @@ def write_to_json(
"90_percentile_time_in_labels": p90_time_in_labels,
"num_items_opened": num_issues_opened,
"num_items_closed": num_issues_closed,
"num_mentor_count": num_mentor_count,
"total_item_count": len(issues_with_metrics),
}

Expand Down
22 changes: 20 additions & 2 deletions most_active_mentors.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@

from classes import IssueWithMetrics


def count_comments_per_user(
issue: Union[github3.issues.Issue, None], # type: ignore
discussion: Union[dict, None] = None,
Expand Down Expand Up @@ -112,6 +113,23 @@ def count_comments_per_user(
else:
mentor_count[review_comment.user.login] = 1

if discussion and len(discussion["comments"]["nodes"]) > 0:
for comment in discussion["comments"]["nodes"]:
if ignore_comment(
comment.user,
comment.user,
ignore_users,
comment.submitted_at,
comment.ready_for_review_at
):
continue

# increase the number of comments left by current user by 1
if comment.user.login in mentor_count:
mentor_count[comment.user.login] += 1
else:
mentor_count[comment.user.login] = 1

return mentor_count


Expand Down Expand Up @@ -153,8 +171,8 @@ def get_mentor_count(
"""

mentor_count = Counter({})
for issueWithMetrics in issues_with_metrics:
current_counter = Counter(issueWithMetrics.mentor_activity)
for issue_with_metrics in issues_with_metrics:
current_counter = Counter(issue_with_metrics.mentor_activity)
mentor_count = mentor_count + current_counter

active_mentor_count = 0
Expand Down
6 changes: 6 additions & 0 deletions test_json_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def test_write_to_json(self):
}
num_issues_opened = 2
num_issues_closed = 1
num_mentor_count = 5

expected_output = {
"average_time_to_first_response": "2 days, 12:00:00",
Expand All @@ -76,6 +77,7 @@ def test_write_to_json(self):
"90_percentile_time_in_labels": {"bug": "1 day, 16:24:12"},
"num_items_opened": 2,
"num_items_closed": 1,
"num_mentor_count": 5,
"total_item_count": 2,
"issues": [
{
Expand Down Expand Up @@ -110,6 +112,7 @@ def test_write_to_json(self):
stats_time_in_labels=stats_time_in_labels,
num_issues_opened=num_issues_opened,
num_issues_closed=num_issues_closed,
num_mentor_count=num_mentor_count,
search_query="is:issue repo:owner/repo",
),
json.dumps(expected_output),
Expand Down Expand Up @@ -148,6 +151,7 @@ def test_write_to_json_with_no_response(self):
}
num_issues_opened = 2
num_issues_closed = 0
num_mentor_count = 5

expected_output = {
"average_time_to_first_response": "None",
Expand All @@ -164,6 +168,7 @@ def test_write_to_json_with_no_response(self):
"90_percentile_time_in_labels": {},
"num_items_opened": 2,
"num_items_closed": 0,
"num_mentor_count": 5,
"total_item_count": 2,
"issues": [
{
Expand Down Expand Up @@ -198,6 +203,7 @@ def test_write_to_json_with_no_response(self):
stats_time_in_labels=stats_time_in_labels,
num_issues_opened=num_issues_opened,
num_issues_closed=num_issues_closed,
num_mentor_count=num_mentor_count,
search_query="is:issue repo:owner/repo",
),
json.dumps(expected_output),
Expand Down
10 changes: 6 additions & 4 deletions test_most_active_mentors.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,12 @@ def test_get_mentor_count(self):

# Create moc data
issues_with_metrics = [
IssueWithMetrics("Issue 1", "https://github.com/user/repo/issues/1",
"alice", None, mentor_activity=mentor_activity),
IssueWithMetrics("Issue 2", "https://github.com/user/repo/issues/2",
"bob", None, mentor_activity=mentor_activity),
IssueWithMetrics(
"Issue 1", "https://github.com/user/repo/issues/1",
"alice", None, mentor_activity=mentor_activity),
IssueWithMetrics(
"Issue 2", "https://github.com/user/repo/issues/2",
"bob", None, mentor_activity=mentor_activity),
]

# Call the function and check the result
Expand Down

0 comments on commit 9133bca

Please sign in to comment.