Skip to content

Commit 4ff504a

Browse files
authored
Merge pull request #75 from sched-ext/htejun
scx: Build fix after kernel update
2 parents 0ed47cd + 552b75a commit 4ff504a

File tree

11 files changed

+138560
-76
lines changed

11 files changed

+138560
-76
lines changed

scheds/c/scx_flatcg.bpf.c

Lines changed: 28 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -302,16 +302,18 @@ static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
302302
bpf_spin_unlock(&cgv_tree_lock);
303303
}
304304

305-
void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
305+
s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
306306
{
307307
struct fcg_task_ctx *taskc;
308-
struct cgroup *cgrp;
309-
struct fcg_cgrp_ctx *cgc;
308+
bool is_idle = false;
309+
s32 cpu;
310+
311+
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
310312

311313
taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
312314
if (!taskc) {
313315
scx_bpf_error("task_ctx lookup failed");
314-
return;
316+
return cpu;
315317
}
316318

317319
/*
@@ -321,7 +323,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
321323
* affinities so that we don't have to worry about per-cgroup dq's
322324
* containing tasks that can't be executed from some CPUs.
323325
*/
324-
if ((enq_flags & SCX_ENQ_LOCAL) || p->nr_cpus_allowed != nr_cpus) {
326+
if (is_idle || p->nr_cpus_allowed != nr_cpus) {
325327
/*
326328
* Tell fcg_stopping() that this bypassed the regular scheduling
327329
* path and should be force charged to the cgroup. 0 is used to
@@ -338,14 +340,28 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
338340
* implement per-cgroup fallback dq's instead so that we have
339341
* more control over when tasks with custom cpumask get issued.
340342
*/
341-
if ((enq_flags & SCX_ENQ_LOCAL) ||
343+
if (is_idle ||
342344
(p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD))) {
343345
stat_inc(FCG_STAT_LOCAL);
344-
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
346+
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
345347
} else {
346348
stat_inc(FCG_STAT_GLOBAL);
347-
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
349+
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
348350
}
351+
}
352+
353+
return cpu;
354+
}
355+
356+
void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
357+
{
358+
struct fcg_task_ctx *taskc;
359+
struct cgroup *cgrp;
360+
struct fcg_cgrp_ctx *cgc;
361+
362+
taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
363+
if (!taskc) {
364+
scx_bpf_error("task_ctx lookup failed");
349365
return;
350366
}
351367

@@ -756,8 +772,8 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
756772
}
757773
}
758774

759-
s32 BPF_STRUCT_OPS(fcg_prep_enable, struct task_struct *p,
760-
struct scx_enable_args *args)
775+
s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
776+
struct scx_init_task_args *args)
761777
{
762778
struct fcg_task_ctx *taskc;
763779
struct fcg_cgrp_ctx *cgc;
@@ -893,13 +909,14 @@ void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
893909

894910
SEC(".struct_ops.link")
895911
struct sched_ext_ops flatcg_ops = {
912+
.select_cpu = (void *)fcg_select_cpu,
896913
.enqueue = (void *)fcg_enqueue,
897914
.dispatch = (void *)fcg_dispatch,
898915
.runnable = (void *)fcg_runnable,
899916
.running = (void *)fcg_running,
900917
.stopping = (void *)fcg_stopping,
901918
.quiescent = (void *)fcg_quiescent,
902-
.prep_enable = (void *)fcg_prep_enable,
919+
.init_task = (void *)fcg_init_task,
903920
.cgroup_set_weight = (void *)fcg_cgroup_set_weight,
904921
.cgroup_init = (void *)fcg_cgroup_init,
905922
.cgroup_exit = (void *)fcg_cgroup_exit,

scheds/c/scx_nest.bpf.c

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,6 @@ struct task_ctx {
9292
* if the task should attach to the core that it will execute on next.
9393
*/
9494
s32 prev_cpu;
95-
96-
/* Dispatch directly to local_dsq */
97-
bool force_local;
9895
};
9996

10097
struct {
@@ -231,8 +228,6 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
231228
return -ENOENT;
232229
}
233230

234-
// Unset below if we can't find a core to migrate to.
235-
tctx->force_local = true;
236231
tctx->prev_cpu = prev_cpu;
237232

238233
bpf_cpumask_and(p_mask, p->cpus_ptr, cast_mask(primary));
@@ -337,7 +332,6 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
337332
}
338333

339334
bpf_rcu_read_unlock();
340-
tctx->force_local = false;
341335
return prev_cpu;
342336

343337
promote_to_primary:
@@ -374,30 +368,21 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
374368
}
375369
bpf_rcu_read_unlock();
376370
update_attached(tctx, prev_cpu, cpu);
371+
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, 0);
377372
return cpu;
378373
}
379374

380375
void BPF_STRUCT_OPS(nest_enqueue, struct task_struct *p, u64 enq_flags)
381376
{
382377
struct task_ctx *tctx;
383378
u64 vtime = p->scx.dsq_vtime;
384-
s32 cpu = bpf_get_smp_processor_id();
385379

386380
tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
387381
if (!tctx) {
388382
scx_bpf_error("Unable to find task ctx");
389383
return;
390384
}
391385

392-
if (tctx->force_local || (enq_flags & SCX_ENQ_LOCAL)) {
393-
tctx->force_local = false;
394-
if (enq_flags & SCX_ENQ_LOCAL)
395-
update_attached(tctx, tctx->prev_cpu, cpu);
396-
397-
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, enq_flags);
398-
return;
399-
}
400-
401386
/*
402387
* Limit the amount of budget that an idling task can accumulate
403388
* to one slice.
@@ -495,8 +480,8 @@ void BPF_STRUCT_OPS(nest_stopping, struct task_struct *p, bool runnable)
495480
p->scx.dsq_vtime += (slice_ns - p->scx.slice) * 100 / p->scx.weight;
496481
}
497482

498-
s32 BPF_STRUCT_OPS(nest_prep_enable, struct task_struct *p,
499-
struct scx_enable_args *args)
483+
s32 BPF_STRUCT_OPS(nest_init_task, struct task_struct *p,
484+
struct scx_init_task_args *args)
500485
{
501486
struct task_ctx *tctx;
502487
struct bpf_cpumask *cpumask;
@@ -524,8 +509,7 @@ s32 BPF_STRUCT_OPS(nest_prep_enable, struct task_struct *p,
524509
return 0;
525510
}
526511

527-
void BPF_STRUCT_OPS(nest_enable, struct task_struct *p,
528-
struct scx_enable_args *args)
512+
void BPF_STRUCT_OPS(nest_enable, struct task_struct *p)
529513
{
530514
p->scx.dsq_vtime = vtime_now;
531515
}
@@ -682,7 +666,7 @@ struct sched_ext_ops nest_ops = {
682666
.dispatch = (void *)nest_dispatch,
683667
.running = (void *)nest_running,
684668
.stopping = (void *)nest_stopping,
685-
.prep_enable = (void *)nest_prep_enable,
669+
.init_task = (void *)nest_init_task,
686670
.enable = (void *)nest_enable,
687671
.init = (void *)nest_init,
688672
.exit = (void *)nest_exit,

scheds/c/scx_qmap.bpf.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -354,8 +354,8 @@ void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args
354354
__sync_fetch_and_add(&nr_reenqueued, cnt);
355355
}
356356

357-
s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p,
358-
struct scx_enable_args *args)
357+
s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p,
358+
struct scx_init_task_args *args)
359359
{
360360
if (p->tgid == disallow_tgid)
361361
p->scx.disallow = true;
@@ -391,7 +391,7 @@ struct sched_ext_ops qmap_ops = {
391391
.dispatch = (void *)qmap_dispatch,
392392
.core_sched_before = (void *)qmap_core_sched_before,
393393
.cpu_release = (void *)qmap_cpu_release,
394-
.prep_enable = (void *)qmap_prep_enable,
394+
.init_task = (void *)qmap_init_task,
395395
.init = (void *)qmap_init,
396396
.exit = (void *)qmap_exit,
397397
.flags = SCX_OPS_ENQ_LAST,

scheds/c/scx_simple.bpf.c

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -51,19 +51,22 @@ static inline bool vtime_before(u64 a, u64 b)
5151
return (s64)(a - b) < 0;
5252
}
5353

54-
void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
54+
s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
5555
{
56-
/*
57-
* If scx_select_cpu_dfl() is setting %SCX_ENQ_LOCAL, it indicates that
58-
* running @p on its CPU directly shouldn't affect fairness. Just queue
59-
* it on the local FIFO.
60-
*/
61-
if (enq_flags & SCX_ENQ_LOCAL) {
56+
bool is_idle = false;
57+
s32 cpu;
58+
59+
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
60+
if (is_idle) {
6261
stat_inc(0); /* count local queueing */
63-
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
64-
return;
62+
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
6563
}
6664

65+
return cpu;
66+
}
67+
68+
void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
69+
{
6770
stat_inc(1); /* count global queueing */
6871

6972
if (fifo_sched) {
@@ -120,8 +123,7 @@ void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable)
120123
p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
121124
}
122125

123-
void BPF_STRUCT_OPS(simple_enable, struct task_struct *p,
124-
struct scx_enable_args *args)
126+
void BPF_STRUCT_OPS(simple_enable, struct task_struct *p)
125127
{
126128
p->scx.dsq_vtime = vtime_now;
127129
}
@@ -141,6 +143,7 @@ void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei)
141143

142144
SEC(".struct_ops.link")
143145
struct sched_ext_ops simple_ops = {
146+
.select_cpu = (void *)simple_select_cpu,
144147
.enqueue = (void *)simple_enqueue,
145148
.dispatch = (void *)simple_dispatch,
146149
.running = (void *)simple_running,

scheds/c/scx_userland.bpf.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -300,8 +300,8 @@ void BPF_STRUCT_OPS(userland_update_idle, s32 cpu, bool idle)
300300
}
301301
}
302302

303-
s32 BPF_STRUCT_OPS(userland_prep_enable, struct task_struct *p,
304-
struct scx_enable_args *args)
303+
s32 BPF_STRUCT_OPS(userland_init_task, struct task_struct *p,
304+
struct scx_init_task_args *args)
305305
{
306306
if (bpf_task_storage_get(&task_ctx_stor, p, 0,
307307
BPF_LOCAL_STORAGE_GET_F_CREATE))
@@ -340,7 +340,7 @@ struct sched_ext_ops userland_ops = {
340340
.enqueue = (void *)userland_enqueue,
341341
.dispatch = (void *)userland_dispatch,
342342
.update_idle = (void *)userland_update_idle,
343-
.prep_enable = (void *)userland_prep_enable,
343+
.init_task = (void *)userland_init_task,
344344
.init = (void *)userland_init,
345345
.exit = (void *)userland_exit,
346346
.flags = SCX_OPS_ENQ_LAST | SCX_OPS_KEEP_BUILTIN_IDLE,

scheds/include/scx/common.bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
6868
const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
6969
void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
7070
void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
71+
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
7172
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
7273
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
7374
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym;

0 commit comments

Comments
 (0)