@@ -239,6 +239,47 @@ do { \
239
239
__ret; \
240
240
})
241
241
242
+ /*
243
+ * Some kfuncs are allowed only on the tasks that are subjects of the
244
+ * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
245
+ * restrictions, the following SCX_CALL_OP_*() variants should be used when
246
+ * invoking scx_ops operations that take task arguments. These can only be used
247
+ * for non-nesting operations due to the way the tasks are tracked.
248
+ *
249
+ * kfuncs which can only operate on such tasks can in turn use
250
+ * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
251
+ * the specific task.
252
+ */
253
+ #define SCX_CALL_OP_TASK (mask , op , task , args ...) \
254
+ do { \
255
+ BUILD_BUG_ON(mask & ~__SCX_KF_TERMINAL); \
256
+ current->scx.kf_tasks[0] = task; \
257
+ SCX_CALL_OP(mask, op, task, ##args); \
258
+ current->scx.kf_tasks[0] = NULL; \
259
+ } while (0)
260
+
261
+ #define SCX_CALL_OP_TASK_RET (mask , op , task , args ...) \
262
+ ({ \
263
+ __typeof__(scx_ops.op(task, ##args)) __ret; \
264
+ BUILD_BUG_ON(mask & ~__SCX_KF_TERMINAL); \
265
+ current->scx.kf_tasks[0] = task; \
266
+ __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
267
+ current->scx.kf_tasks[0] = NULL; \
268
+ __ret; \
269
+ })
270
+
271
+ #define SCX_CALL_OP_2TASKS_RET (mask , op , task0 , task1 , args ...) \
272
+ ({ \
273
+ __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
274
+ BUILD_BUG_ON(mask & ~__SCX_KF_TERMINAL); \
275
+ current->scx.kf_tasks[0] = task0; \
276
+ current->scx.kf_tasks[1] = task1; \
277
+ __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
278
+ current->scx.kf_tasks[0] = NULL; \
279
+ current->scx.kf_tasks[1] = NULL; \
280
+ __ret; \
281
+ })
282
+
242
283
/* @mask is constant, always inline to cull unnecessary branches */
243
284
static __always_inline bool scx_kf_allowed (u32 mask )
244
285
{
@@ -269,6 +310,22 @@ static __always_inline bool scx_kf_allowed(u32 mask)
269
310
return true;
270
311
}
271
312
313
+ /* see SCX_CALL_OP_TASK() */
314
+ static __always_inline bool scx_kf_allowed_on_arg_tasks (u32 mask ,
315
+ struct task_struct * p )
316
+ {
317
+ if (!scx_kf_allowed (__SCX_KF_RQ_LOCKED ))
318
+ return false;
319
+
320
+ if (unlikely ((p != current -> scx .kf_tasks [0 ] &&
321
+ p != current -> scx .kf_tasks [1 ]))) {
322
+ scx_ops_error ("called on a task not being operated on" );
323
+ return false;
324
+ }
325
+
326
+ return true;
327
+ }
328
+
272
329
/**
273
330
* scx_task_iter_init - Initialize a task iterator
274
331
* @iter: iterator to init
@@ -706,7 +763,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
706
763
WARN_ON_ONCE (* ddsp_taskp );
707
764
* ddsp_taskp = p ;
708
765
709
- SCX_CALL_OP (SCX_KF_ENQUEUE , enqueue , p , enq_flags );
766
+ SCX_CALL_OP_TASK (SCX_KF_ENQUEUE , enqueue , p , enq_flags );
710
767
711
768
/*
712
769
* If not directly dispatched, QUEUEING isn't clear yet and dispatch or
@@ -778,7 +835,7 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags
778
835
add_nr_running (rq , 1 );
779
836
780
837
if (SCX_HAS_OP (runnable ))
781
- SCX_CALL_OP (SCX_KF_REST , runnable , p , enq_flags );
838
+ SCX_CALL_OP_TASK (SCX_KF_REST , runnable , p , enq_flags );
782
839
783
840
do_enqueue_task (rq , p , enq_flags , sticky_cpu );
784
841
}
@@ -803,7 +860,7 @@ static void ops_dequeue(struct task_struct *p, u64 deq_flags)
803
860
BUG ();
804
861
case SCX_OPSS_QUEUED :
805
862
if (SCX_HAS_OP (dequeue ))
806
- SCX_CALL_OP (SCX_KF_REST , dequeue , p , deq_flags );
863
+ SCX_CALL_OP_TASK (SCX_KF_REST , dequeue , p , deq_flags );
807
864
808
865
if (atomic64_try_cmpxchg (& p -> scx .ops_state , & opss ,
809
866
SCX_OPSS_NONE ))
@@ -854,11 +911,11 @@ static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
854
911
*/
855
912
if (SCX_HAS_OP (stopping ) && task_current (rq , p )) {
856
913
update_curr_scx (rq );
857
- SCX_CALL_OP (SCX_KF_REST , stopping , p , false);
914
+ SCX_CALL_OP_TASK (SCX_KF_REST , stopping , p , false);
858
915
}
859
916
860
917
if (SCX_HAS_OP (quiescent ))
861
- SCX_CALL_OP (SCX_KF_REST , quiescent , p , deq_flags );
918
+ SCX_CALL_OP_TASK (SCX_KF_REST , quiescent , p , deq_flags );
862
919
863
920
if (deq_flags & SCX_DEQ_SLEEP )
864
921
p -> scx .flags |= SCX_TASK_DEQD_FOR_SLEEP ;
@@ -877,7 +934,7 @@ static void yield_task_scx(struct rq *rq)
877
934
struct task_struct * p = rq -> curr ;
878
935
879
936
if (SCX_HAS_OP (yield ))
880
- SCX_CALL_OP_RET (SCX_KF_REST , yield , p , NULL );
937
+ SCX_CALL_OP_2TASKS_RET (SCX_KF_REST , yield , p , NULL );
881
938
else
882
939
p -> scx .slice = 0 ;
883
940
}
@@ -887,7 +944,7 @@ static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
887
944
struct task_struct * from = rq -> curr ;
888
945
889
946
if (SCX_HAS_OP (yield ))
890
- return SCX_CALL_OP_RET (SCX_KF_REST , yield , from , to );
947
+ return SCX_CALL_OP_2TASKS_RET (SCX_KF_REST , yield , from , to );
891
948
else
892
949
return false;
893
950
}
@@ -1398,7 +1455,7 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
1398
1455
1399
1456
/* see dequeue_task_scx() on why we skip when !QUEUED */
1400
1457
if (SCX_HAS_OP (running ) && (p -> scx .flags & SCX_TASK_QUEUED ))
1401
- SCX_CALL_OP (SCX_KF_REST , running , p );
1458
+ SCX_CALL_OP_TASK (SCX_KF_REST , running , p );
1402
1459
1403
1460
watchdog_unwatch_task (p , true);
1404
1461
@@ -1454,7 +1511,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p)
1454
1511
1455
1512
/* see dequeue_task_scx() on why we skip when !QUEUED */
1456
1513
if (SCX_HAS_OP (stopping ) && (p -> scx .flags & SCX_TASK_QUEUED ))
1457
- SCX_CALL_OP (SCX_KF_REST , stopping , p , true);
1514
+ SCX_CALL_OP_TASK (SCX_KF_REST , stopping , p , true);
1458
1515
1459
1516
/*
1460
1517
* If we're being called from put_prev_task_balance(), balance_scx() may
@@ -1617,8 +1674,8 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
1617
1674
if (SCX_HAS_OP (select_cpu )) {
1618
1675
s32 cpu ;
1619
1676
1620
- cpu = SCX_CALL_OP_RET (SCX_KF_REST , select_cpu , p , prev_cpu ,
1621
- wake_flags );
1677
+ cpu = SCX_CALL_OP_TASK_RET (SCX_KF_REST , select_cpu , p , prev_cpu ,
1678
+ wake_flags );
1622
1679
if (ops_cpu_valid (cpu )) {
1623
1680
return cpu ;
1624
1681
} else {
@@ -1644,8 +1701,8 @@ static void set_cpus_allowed_scx(struct task_struct *p,
1644
1701
* designation pointless. Cast it away when calling the operation.
1645
1702
*/
1646
1703
if (SCX_HAS_OP (set_cpumask ))
1647
- SCX_CALL_OP (SCX_KF_REST , set_cpumask , p ,
1648
- (struct cpumask * )p -> cpus_ptr );
1704
+ SCX_CALL_OP_TASK (SCX_KF_REST , set_cpumask , p ,
1705
+ (struct cpumask * )p -> cpus_ptr );
1649
1706
}
1650
1707
1651
1708
static void reset_idle_masks (void )
@@ -1806,7 +1863,7 @@ static void scx_ops_enable_task(struct task_struct *p)
1806
1863
1807
1864
if (SCX_HAS_OP (enable )) {
1808
1865
struct scx_enable_args args = { };
1809
- SCX_CALL_OP (SCX_KF_REST , enable , p , & args );
1866
+ SCX_CALL_OP_TASK (SCX_KF_REST , enable , p , & args );
1810
1867
}
1811
1868
p -> scx .flags &= ~SCX_TASK_OPS_PREPPED ;
1812
1869
p -> scx .flags |= SCX_TASK_OPS_ENABLED ;
@@ -1845,7 +1902,7 @@ static void refresh_scx_weight(struct task_struct *p)
1845
1902
1846
1903
p -> scx .weight = sched_weight_to_cgroup (weight );
1847
1904
if (SCX_HAS_OP (set_weight ))
1848
- SCX_CALL_OP (SCX_KF_REST , set_weight , p , p -> scx .weight );
1905
+ SCX_CALL_OP_TASK (SCX_KF_REST , set_weight , p , p -> scx .weight );
1849
1906
}
1850
1907
1851
1908
void scx_pre_fork (struct task_struct * p )
@@ -1936,8 +1993,8 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p)
1936
1993
* different scheduler class. Keep the BPF scheduler up-to-date.
1937
1994
*/
1938
1995
if (SCX_HAS_OP (set_cpumask ))
1939
- SCX_CALL_OP (SCX_KF_REST , set_cpumask , p ,
1940
- (struct cpumask * )p -> cpus_ptr );
1996
+ SCX_CALL_OP_TASK (SCX_KF_REST , set_cpumask , p ,
1997
+ (struct cpumask * )p -> cpus_ptr );
1941
1998
}
1942
1999
1943
2000
static void check_preempt_curr_scx (struct rq * rq , struct task_struct * p ,int wake_flags ) {}
0 commit comments