11import datetime
22
3- from treeherder .model .models import Group , GroupStatus , Job , Push
3+ from treeherder .model .models import Group , GroupStatus , Job , Push , TextLogError
4+
5+
6+ def classify (jobs_to_classify , jobs_to_unclassify ):
7+ # TODO: consider job.result=(busted, exception)
8+ if jobs_to_classify :
9+ target_jobs = Job .objects .filter (
10+ id__in = jobs_to_classify , result = "testfailed" , failure_classification_id__in = [1 , 6 ]
11+ )
12+ if target_jobs :
13+ target_jobs .update (failure_classification_id = 8 )
14+
15+ if jobs_to_unclassify :
16+ # TODO: query text_log_error for new_failure and use 6 if previously set
17+ new_jobs = (
18+ TextLogError .objects .filter (
19+ job__id__in = jobs_to_unclassify , new_failure = True , job__failure_classification_id = 8
20+ )
21+ .values ("job__id" )
22+ .distinct ()
23+ )
24+ jobs_to_newfailure = [j ["job__id" ] for j in new_jobs ]
25+ jobs_to_regular_failure = list (set (jobs_to_unclassify ) - set (jobs_to_newfailure ))
26+
27+ # classification_id: 6 == new failure needs classification, 1 == no classified
28+ if jobs_to_newfailure :
29+ target_jobs = Job .objects .filter (id__in = jobs_to_newfailure , result = "testfailed" )
30+ if target_jobs :
31+ target_jobs .update (failure_classification_id = 6 )
32+ if jobs_to_regular_failure :
33+ target_jobs = Job .objects .filter (id__in = jobs_to_regular_failure , result = "testfailed" )
34+ if target_jobs :
35+ target_jobs .update (failure_classification_id = 1 )
436
537
638def _check_and_mark_infra (current_job , job_ids , push_ids ):
@@ -42,7 +74,7 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
4274
4375 # ignore previous classified, we are looking for NEW extra jobs
4476 if len ([ej for ej in extra_jobs if ej ["failure_classification_id" ] != 8 ]) == 0 :
45- return
77+ return [], []
4678
4779 # ensure 50% 'success' rate
4880 # success here means the task ran and produced groups | is success
@@ -52,20 +84,24 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
5284 if job ["id" ] not in job_ids and job ["result" ] != "success" :
5385 extra_failed .append (job )
5486
87+ jobs_to_classify = []
88+ jobs_to_unclassify = []
89+
5590 # look for failure rate > 50% and exit early
5691 if len (extra_failed ) / len (extra_jobs ) > 0.5 :
5792 # as failure rate > 50%, if any jobs are fc_id=8 classify as fc_id=1
5893 for job in extra_failed :
5994 if job ["failure_classification_id" ] == 8 :
60- Job .objects .filter (id = job ["id" ]).update (failure_classification_id = 1 )
61- return
95+ jobs_to_unclassify .append (job ["id" ])
6296
6397 # any extra_jobs will be failures without groups (infra/timeout/etc.)
6498 # theoretically there could be many jobs here
6599 # mark extra_jobs as `intermittent_needs_classification`
66100 for job in extra_failed :
67101 if job ["failure_classification_id" ] not in [4 , 8 ]:
68- Job .objects .filter (id = job ["id" ]).update (failure_classification_id = 8 )
102+ jobs_to_classify .append (job ["id" ])
103+
104+ return jobs_to_classify , jobs_to_unclassify
69105
70106
71107def check_and_mark_intermittent (job_id ):
@@ -86,7 +122,7 @@ def check_and_mark_intermittent(job_id):
86122 # get list of pushes, find the current push and recent pushes
87123 idlist = (
88124 Push .objects .filter (repository__id = current_job .repository .id , time__gte = start_date )
89- .values ("id" )
125+ .values_list ("id" , flat = True )
90126 .order_by ("-id" )
91127 )
92128 counter = - 1
@@ -135,7 +171,8 @@ def check_and_mark_intermittent(job_id):
135171 # If no groups, look for infra
136172 distinct_job_ids = list (set ([f ["job_logs__job__id" ] for f in all_groups ]))
137173 if len (distinct_job_ids ) == 1 :
138- return _check_and_mark_infra (current_job , distinct_job_ids , ids )
174+ to_classify , to_unclassify = _check_and_mark_infra (current_job , distinct_job_ids , ids )
175+ return classify (to_classify , to_unclassify )
139176
140177 mappings = {}
141178 job_classifications = {}
@@ -151,6 +188,7 @@ def check_and_mark_intermittent(job_id):
151188 # we have a variant
152189 continue
153190
191+ # TODO: consider storing a list of job.id that are fc_id=8
154192 # store job:fc_id so we can reference what needs changed
155193 if item ["job_logs__job__id" ] not in job_classifications :
156194 job_classifications [item ["job_logs__job__id" ]] = item [
@@ -181,18 +219,14 @@ def check_and_mark_intermittent(job_id):
181219 current_changed_groups = {}
182220 for group in mappings .get (current_job .push .id , {}).get ("groups" , []):
183221 all_data = []
184- current_data = []
222+ current_data = [
223+ mappings [current_job .push .id ]["groups" ][group ][j ]
224+ for j in mappings [current_job .push .id ]["groups" ][group ]
225+ ]
185226 for id in mappings .keys ():
186227 all_data .extend (
187228 [mappings [id ]["groups" ][group ][j ] for j in mappings [id ]["groups" ].get (group , {})]
188229 )
189- if id == current_job .push .id :
190- current_data .extend (
191- [
192- mappings [id ]["groups" ][group ][j ]
193- for j in mappings [id ]["groups" ].get (group , {})
194- ]
195- )
196230
197231 # if new data changes results, update
198232 pass_rate = len ([s for s in all_data if s == GroupStatus .OK ]) / len (all_data )
@@ -203,9 +237,9 @@ def check_and_mark_intermittent(job_id):
203237 current_changed_groups [group ] = True
204238
205239 # all changed_groups need to be evaluated on previous 'failed' jobs to ensure all groups in that task are 'passing'
240+ jobs_to_classify = [] # mark as fcid=8 (known intermittent)
241+ jobs_to_unclassify = [] # previously parked as fcid=8, new failing data, now fcid=1
206242 for id in mappings .keys ():
207- jobs_to_classify = [] # mark as fcid=8 (known intermittent)
208- jobs_to_unclassify = [] # previously parked as fcid=8, new failing data, now fcid=1
209243 for job in mappings [id ]["jobs" ]:
210244 all_green = True
211245 current_all_green = True
@@ -229,19 +263,7 @@ def check_and_mark_intermittent(job_id):
229263 elif job_classifications [job ] == 8 :
230264 jobs_to_unclassify .append (job )
231265
232- # TODO: consider job.result=(busted, exception)
233- for job in jobs_to_classify :
234- target_job = Job .objects .filter (
235- id = job , result = "testfailed" , failure_classification_id__in = [1 , 6 ]
236- )
237- if target_job :
238- target_job .update (failure_classification_id = 8 )
239-
240- for job in jobs_to_unclassify :
241- target_job = Job .objects .filter (
242- id = job , result = "testfailed" , failure_classification_id = 8
243- )
244- if target_job :
245- target_job .update (failure_classification_id = 1 )
246-
247- return _check_and_mark_infra (current_job , distinct_job_ids , ids )
266+ to_classify , to_unclassify = _check_and_mark_infra (current_job , distinct_job_ids , ids )
267+ jobs_to_classify .extend (to_classify )
268+ jobs_to_unclassify .extend (to_unclassify )
269+ return classify (jobs_to_classify , jobs_to_unclassify )
0 commit comments