11import random
22import time
3+ from dataclasses import asdict
34from datetime import UTC , datetime
45from typing import Any
56
1819from sentry .utils import metrics
1920from sentry .utils .exceptions import quiet_redis_noise
2021from sentry .utils .locking import UnableToAcquireLock
22+ from sentry .workflow_engine .buffer .batch_client import DelayedWorkflowClient
2123from sentry .workflow_engine .models import DataConditionGroup , Detector
2224from sentry .workflow_engine .tasks .utils import (
2325 EventNotFoundError ,
2426 build_workflow_event_data_from_event ,
2527)
26- from sentry .workflow_engine .types import WorkflowEventData
28+ from sentry .workflow_engine .types import WorkflowEventData , WorkflowNotProcessable
2729from sentry .workflow_engine .utils import log_context , scopedstats
2830
2931logger = log_context .get_logger (__name__ )
3032
3133
34+ # TODO - this seems like it should live in processors.workflow,
35+ # Then we can create a `process_new_issue` or `process_event` kind
36+ # of method. That will wrap a new `process_actions_with_logs`
37+ # (and `process_actions`) method. Allowing us to have a much cleaner
38+ # interface here, and between those processor methods.
39+ def process_workflows_with_logs (
40+ batch_client : DelayedWorkflowClient ,
41+ event_data : WorkflowEventData ,
42+ event_start_time : datetime ,
43+ detector : Detector | None = None ,
44+ ) -> None :
45+ """
46+ This method is used to create improved logging for `process_workflows`,
47+ where we can capture the `WorkflowNotProcessable` results, and log them.
48+
49+ This should create a log for each issue, so we can determine why a workflow
50+ did or did not trigger.
51+ """
52+ from sentry .workflow_engine .processors .workflow import process_workflows
53+
54+ evaluation = process_workflows (batch_client , event_data , event_start_time , detector )
55+
56+ if isinstance (evaluation , WorkflowNotProcessable ):
57+ logger .info ("process_workflows.evaluation" , extra = asdict (evaluation ))
58+ else :
59+ # TODO - Log the triggered workflows
60+ pass
61+
62+
3263@instrumented_task (
3364 name = "sentry.workflow_engine.tasks.process_workflow_activity" ,
3465 namespace = namespaces .workflow_engine_tasks ,
@@ -44,8 +75,6 @@ def process_workflow_activity(activity_id: int, group_id: int, detector_id: int)
4475 The task will get the Activity from the database, create a WorkflowEventData object,
4576 and then process the data in `process_workflows`.
4677 """
47- from sentry .workflow_engine .buffer .batch_client import DelayedWorkflowClient
48- from sentry .workflow_engine .processors .workflow import process_workflows
4978
5079 with transaction .atomic (router .db_for_write (Detector )):
5180 try :
@@ -69,9 +98,10 @@ def process_workflow_activity(activity_id: int, group_id: int, detector_id: int)
6998 )
7099 with quiet_redis_noise ():
71100 batch_client = DelayedWorkflowClient ()
72- process_workflows (
101+ process_workflows_with_logs (
73102 batch_client , event_data , event_start_time = activity .datetime , detector = detector
74103 )
104+
75105 metrics .incr (
76106 "workflow_engine.tasks.process_workflows.activity_update.executed" ,
77107 tags = {"activity_type" : activity .type , "detector_type" : detector .type },
@@ -103,11 +133,9 @@ def process_workflows_event(
103133 start_timestamp_seconds : float | None = None ,
104134 ** kwargs : dict [str , Any ],
105135) -> None :
106- from sentry .workflow_engine .buffer .batch_client import DelayedWorkflowClient
107- from sentry .workflow_engine .processors .workflow import process_workflows
108-
109136 recorder = scopedstats .Recorder ()
110137 start_time = time .time ()
138+
111139 with recorder .record ():
112140 try :
113141 event_data = build_workflow_event_data_from_event (
@@ -131,7 +159,7 @@ def process_workflows_event(
131159 )
132160 with quiet_redis_noise ():
133161 batch_client = DelayedWorkflowClient ()
134- process_workflows (batch_client , event_data , event_start_time = event_start_time )
162+ process_workflows_with_logs (batch_client , event_data , event_start_time = event_start_time )
135163 duration = time .time () - start_time
136164 is_slow = duration > 1.0
137165 # We want full coverage for particularly slow cases, plus a random sampling.
@@ -158,7 +186,6 @@ def schedule_delayed_workflows(**kwargs: Any) -> None:
158186 """
159187 Schedule delayed workflow buffers in a batch.
160188 """
161- from sentry .workflow_engine .buffer .batch_client import DelayedWorkflowClient
162189 from sentry .workflow_engine .processors .schedule import process_buffered_workflows
163190
164191 lock_name = "schedule_delayed_workflows"
0 commit comments