Skip to content

Commit

Permalink
Merge pull request #453 from dimagi/hy/reports-issue
Browse files Browse the repository at this point in the history
Negative average time to convert in funnel performance report.
  • Loading branch information
hemant10yadav authored Jan 9, 2025
2 parents b272ace + 944785e commit 6b92e4f
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 20 deletions.
23 changes: 15 additions & 8 deletions commcare_connect/program/helpers.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from datetime import timedelta

from django.db.models import (
Avg,
Case,
Expand All @@ -12,7 +14,7 @@
Value,
When,
)
from django.db.models.functions import Cast, Round
from django.db.models.functions import Cast, Coalesce, Round

from commcare_connect.opportunity.models import UserVisit, VisitValidationStatus
from commcare_connect.program.models import ManagedOpportunity, Program
Expand Down Expand Up @@ -50,7 +52,7 @@ def get_annotated_managed_opportunity(program: Program):
.annotate(
workers_invited=Count("opportunityaccess", distinct=True),
workers_passing_assessment=Count(
"opportunityaccess__assessment",
"opportunityaccess",
filter=Q(
opportunityaccess__assessment__passed=True,
),
Expand All @@ -62,12 +64,17 @@ def get_annotated_managed_opportunity(program: Program):
distinct=True,
),
percentage_conversion=calculate_safe_percentage("workers_starting_delivery", "workers_invited"),
average_time_to_convert=Avg(
ExpressionWrapper(
Subquery(earliest_visits) - F("opportunityaccess__invited_date"), output_field=DurationField()
average_time_to_convert=Coalesce(
Avg(
ExpressionWrapper(
earliest_visits - F("opportunityaccess__invited_date"),
output_field=DurationField(),
),
filter=FILTER_FOR_VALID_VISIT_DATE
& Q(opportunityaccess__invited_date__lte=Subquery(earliest_visits)),
distinct=True,
),
filter=FILTER_FOR_VALID_VISIT_DATE,
distinct=True,
Value(timedelta(seconds=0)),
),
)
)
Expand Down Expand Up @@ -111,7 +118,7 @@ def get_delivery_performance_report(program: Program, start_date, end_date):
distinct=True,
filter=date_filter & Q(opportunityaccess__uservisit__completed_work__isnull=False),
),
deliveries_per_day_per_worker=Case(
deliveries_per_worker=Case(
When(active_workers=0, then=Value(0)),
default=Round(F("total_payment_since_start_date") / F("active_workers"), 2),
output_field=FloatField(),
Expand Down
4 changes: 2 additions & 2 deletions commcare_connect/program/tables.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ class DeliveryPerformanceTable(tables.Table):
start_date = tables.DateColumn()
total_workers_starting_delivery = tables.Column(verbose_name=_("Workers Starting Delivery"))
active_workers = tables.Column(verbose_name=_("Active Workers"))
deliveries_per_day_per_worker = tables.Column(verbose_name=_("Deliveries per Day per Worker"))
deliveries_per_worker = tables.Column(verbose_name=_("Deliveries per Worker"))
records_flagged_percentage = tables.Column(verbose_name=_("% Records flagged"))

class Meta:
Expand All @@ -297,7 +297,7 @@ class Meta:
"start_date",
"total_workers_starting_delivery",
"active_workers",
"deliveries_per_day_per_worker",
"deliveries_per_worker",
"records_flagged_percentage",
)
orderable = False
Expand Down
53 changes: 43 additions & 10 deletions commcare_connect/program/tests/test_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class TestGetAnnotatedManagedOpportunity(BaseManagedOpportunityTest):
66.67,
timedelta(days=1),
),
("empty_scenario", [], [], 0, 0, 0, 0.0, None),
("empty_scenario", [], [], 0, 0, 0, 0.0, timedelta(days=0)),
("multiple_visits_scenario", [VisitValidationStatus.pending], [True], 1, 1, 1, 100.0, timedelta(days=1)),
(
"excluded_statuses",
Expand All @@ -79,7 +79,7 @@ class TestGetAnnotatedManagedOpportunity(BaseManagedOpportunityTest):
2,
0,
0.0,
None,
timedelta(days=0),
),
(
"failed_assessments",
Expand Down Expand Up @@ -121,16 +121,14 @@ def test_scenarios(
opps = get_annotated_managed_opportunity(self.program)
assert len(opps) == 1
annotated_opp = opps[0]
print(scenario, annotated_opp.average_time_to_convert, expected_avg_time_to_convert)
assert annotated_opp.workers_invited == expected_invited
assert annotated_opp.workers_passing_assessment == expected_passing
assert annotated_opp.workers_starting_delivery == expected_delivery
assert pytest.approx(annotated_opp.percentage_conversion, 0.01) == expected_conversion

if expected_avg_time_to_convert:
diff = abs(annotated_opp.average_time_to_convert - expected_avg_time_to_convert)
assert diff < timedelta(minutes=1)
else:
assert annotated_opp.average_time_to_convert is None
diff = abs(annotated_opp.average_time_to_convert - expected_avg_time_to_convert)
assert diff < timedelta(minutes=1)


@pytest.mark.django_db
Expand All @@ -141,7 +139,7 @@ class TestDeliveryPerformanceReport(BaseManagedOpportunityTest):
@pytest.mark.parametrize(
"scenario, visit_statuses, visit_date, flagged_statuses, expected_active_workers, "
"expected_total_workers, expected_records_flagged_percentage,"
"total_payment_units_with_flags,total_payment_since_start_date, delivery_per_day_per_worker",
"total_payment_units_with_flags,total_payment_since_start_date, deliveries_per_worker",
[
(
"basic_scenario",
Expand Down Expand Up @@ -226,7 +224,7 @@ def test_delivery_performance_report_scenarios(
expected_records_flagged_percentage,
total_payment_units_with_flags,
total_payment_since_start_date,
delivery_per_day_per_worker,
deliveries_per_worker,
):
for i, visit_status in enumerate(visit_statuses):
self.create_user_with_visit(
Expand All @@ -246,4 +244,39 @@ def test_delivery_performance_report_scenarios(
assert opps[0].records_flagged_percentage == expected_records_flagged_percentage
assert opps[0].total_payment_units_with_flags == total_payment_units_with_flags
assert opps[0].total_payment_since_start_date == total_payment_since_start_date
assert opps[0].deliveries_per_day_per_worker == delivery_per_day_per_worker
assert opps[0].deliveries_per_worker == deliveries_per_worker


@pytest.mark.django_db
def test_average_time_to_convert_for_negative_values():
program = ProgramFactory.create()
nm_org = OrganizationFactory.create()
opp = ManagedOpportunityFactory.create(program=program, organization=nm_org)
today = now()

valid_durations = []

for r in range(4):
user = UserFactory.create()
access = OpportunityAccessFactory.create(opportunity=opp, user=user, invited_date=now())
AssessmentFactory.create(opportunity=opp, user=user, opportunity_access=access, passed=True)

if r % 2 == 0:
visit_date = today - timedelta(days=1)
else:
visit_date = today + timedelta(days=4)
duration = visit_date - access.invited_date # Positive duration
valid_durations.append(duration) # Only count valid (positive) durations

UserVisitFactory.create(
user=user,
opportunity=opp,
opportunity_access=access,
visit_date=visit_date,
status=VisitValidationStatus.approved,
)

opp = get_annotated_managed_opportunity(program)[0]

expected_avg = sum(valid_durations, timedelta()) / len(valid_durations)
assert opp.average_time_to_convert == expected_avg

0 comments on commit 6b92e4f

Please sign in to comment.