@@ -302,16 +302,18 @@ static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
302302 bpf_spin_unlock (& cgv_tree_lock );
303303}
304304
305- void BPF_STRUCT_OPS (fcg_enqueue , struct task_struct * p , u64 enq_flags )
305+ s32 BPF_STRUCT_OPS (fcg_select_cpu , struct task_struct * p , s32 prev_cpu , u64 wake_flags )
306306{
307307 struct fcg_task_ctx * taskc ;
308- struct cgroup * cgrp ;
309- struct fcg_cgrp_ctx * cgc ;
308+ bool is_idle = false;
309+ s32 cpu ;
310+
311+ cpu = scx_bpf_select_cpu_dfl (p , prev_cpu , wake_flags , & is_idle );
310312
311313 taskc = bpf_task_storage_get (& task_ctx , p , 0 , 0 );
312314 if (!taskc ) {
313315 scx_bpf_error ("task_ctx lookup failed" );
314- return ;
316+ return cpu ;
315317 }
316318
317319 /*
@@ -321,7 +323,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
321323 * affinities so that we don't have to worry about per-cgroup dq's
322324 * containing tasks that can't be executed from some CPUs.
323325 */
324- if (( enq_flags & SCX_ENQ_LOCAL ) || p -> nr_cpus_allowed != nr_cpus ) {
326+ if (is_idle || p -> nr_cpus_allowed != nr_cpus ) {
325327 /*
326328 * Tell fcg_stopping() that this bypassed the regular scheduling
327329 * path and should be force charged to the cgroup. 0 is used to
@@ -338,14 +340,28 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
338340 * implement per-cgroup fallback dq's instead so that we have
339341 * more control over when tasks with custom cpumask get issued.
340342 */
341- if (( enq_flags & SCX_ENQ_LOCAL ) ||
343+ if (is_idle ||
342344 (p -> nr_cpus_allowed == 1 && (p -> flags & PF_KTHREAD ))) {
343345 stat_inc (FCG_STAT_LOCAL );
344- scx_bpf_dispatch (p , SCX_DSQ_LOCAL , SCX_SLICE_DFL , enq_flags );
346+ scx_bpf_dispatch (p , SCX_DSQ_LOCAL , SCX_SLICE_DFL , 0 );
345347 } else {
346348 stat_inc (FCG_STAT_GLOBAL );
347- scx_bpf_dispatch (p , SCX_DSQ_GLOBAL , SCX_SLICE_DFL , enq_flags );
349+ scx_bpf_dispatch (p , SCX_DSQ_GLOBAL , SCX_SLICE_DFL , 0 );
348350 }
351+ }
352+
353+ return cpu ;
354+ }
355+
356+ void BPF_STRUCT_OPS (fcg_enqueue , struct task_struct * p , u64 enq_flags )
357+ {
358+ struct fcg_task_ctx * taskc ;
359+ struct cgroup * cgrp ;
360+ struct fcg_cgrp_ctx * cgc ;
361+
362+ taskc = bpf_task_storage_get (& task_ctx , p , 0 , 0 );
363+ if (!taskc ) {
364+ scx_bpf_error ("task_ctx lookup failed" );
349365 return ;
350366 }
351367
@@ -756,8 +772,8 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
756772 }
757773}
758774
759- s32 BPF_STRUCT_OPS (fcg_prep_enable , struct task_struct * p ,
760- struct scx_enable_args * args )
775+ s32 BPF_STRUCT_OPS (fcg_init_task , struct task_struct * p ,
776+ struct scx_init_task_args * args )
761777{
762778 struct fcg_task_ctx * taskc ;
763779 struct fcg_cgrp_ctx * cgc ;
@@ -893,13 +909,14 @@ void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
893909
894910SEC (".struct_ops.link" )
895911struct sched_ext_ops flatcg_ops = {
912+ .select_cpu = (void * )fcg_select_cpu ,
896913 .enqueue = (void * )fcg_enqueue ,
897914 .dispatch = (void * )fcg_dispatch ,
898915 .runnable = (void * )fcg_runnable ,
899916 .running = (void * )fcg_running ,
900917 .stopping = (void * )fcg_stopping ,
901918 .quiescent = (void * )fcg_quiescent ,
902- .prep_enable = (void * )fcg_prep_enable ,
919+ .init_task = (void * )fcg_init_task ,
903920 .cgroup_set_weight = (void * )fcg_cgroup_set_weight ,
904921 .cgroup_init = (void * )fcg_cgroup_init ,
905922 .cgroup_exit = (void * )fcg_cgroup_exit ,
0 commit comments