File tree Expand file tree Collapse file tree 1 file changed +2
-9
lines changed Expand file tree Collapse file tree 1 file changed +2
-9
lines changed Original file line number Diff line number Diff line change @@ -1338,14 +1338,8 @@ static int computeDefragCycleUs(void) {
13381338
13391339 /* Also adjust for any accumulated overage. */
13401340 dutyCycleUs -= defrag .timeproc_overage_us ;
1341+ if (dutyCycleUs < 0 ) dutyCycleUs = 0 ;
13411342 defrag .timeproc_overage_us = 0 ;
1342-
1343- if (dutyCycleUs < server .active_defrag_cycle_us ) {
1344- /* We never reduce our cycle time, that would increase overhead. Instead, we track this
1345- * as part of the overage, and increase wait time between cycles. */
1346- defrag .timeproc_overage_us = server .active_defrag_cycle_us - dutyCycleUs ;
1347- dutyCycleUs = server .active_defrag_cycle_us ;
1348- }
13491343 }
13501344 return dutyCycleUs ;
13511345}
@@ -1354,8 +1348,7 @@ static int computeDefragCycleUs(void) {
13541348 * computeDefragCycleUs computation. */
13551349static int computeDelayMs (monotime intendedEndtime ) {
13561350 defrag .timeproc_end_time = getMonotonicUs ();
1357- long overage = defrag .timeproc_end_time - intendedEndtime ;
1358- defrag .timeproc_overage_us += overage ; /* track over/under desired CPU */
1351+ defrag .timeproc_overage_us = defrag .timeproc_end_time - intendedEndtime ;
13591352 /* Allow negative overage (underage) to count against existing overage, but don't allow
13601353 * underage (from short stages) to be accumulated. */
13611354 if (defrag .timeproc_overage_us < 0 ) defrag .timeproc_overage_us = 0 ;
You can’t perform that action at this time.
0 commit comments