* [PATCH sched_ext/for-7.1] selftests/sched_ext: Update scx_bpf_dsq_move_to_local() in kselftests
@ 2026-03-14 6:51 Andrea Righi
2026-03-14 8:45 ` Tejun Heo
0 siblings, 1 reply; 2+ messages in thread
From: Andrea Righi @ 2026-03-14 6:51 UTC (permalink / raw)
To: Tejun Heo, David Vernet, Changwoo Min; +Cc: sched-ext, linux-kernel
After commit 860683763ebf ("sched_ext: Add enq_flags to
scx_bpf_dsq_move_to_local()") some of the kselftests are failing to
build:
exit.bpf.c:44:34: error: too few arguments provided to function-like macro invocation
44 | scx_bpf_dsq_move_to_local(DSQ_ID);
Update the kselftests adding the new argument to
scx_bpf_dsq_move_to_local().
Fixes: 860683763ebf ("sched_ext: Add enq_flags to scx_bpf_dsq_move_to_local()")
Signed-off-by: Andrea Righi <arighi@nvidia.com>
---
tools/testing/selftests/sched_ext/dequeue.bpf.c | 2 +-
tools/testing/selftests/sched_ext/exit.bpf.c | 2 +-
tools/testing/selftests/sched_ext/maximal.bpf.c | 2 +-
tools/testing/selftests/sched_ext/numa.bpf.c | 2 +-
tools/testing/selftests/sched_ext/peek_dsq.bpf.c | 8 ++++----
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c | 2 +-
6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/tools/testing/selftests/sched_ext/dequeue.bpf.c b/tools/testing/selftests/sched_ext/dequeue.bpf.c
index 597b88563d7de..624e2ccb06884 100644
--- a/tools/testing/selftests/sched_ext/dequeue.bpf.c
+++ b/tools/testing/selftests/sched_ext/dequeue.bpf.c
@@ -342,7 +342,7 @@ void BPF_STRUCT_OPS(dequeue_dispatch, s32 cpu, struct task_struct *prev)
bpf_task_release(p);
} else {
- scx_bpf_dsq_move_to_local(SHARED_DSQ);
+ scx_bpf_dsq_move_to_local(SHARED_DSQ, 0);
}
}
diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
index 4bc36182d3ffc..2e848820a44bb 100644
--- a/tools/testing/selftests/sched_ext/exit.bpf.c
+++ b/tools/testing/selftests/sched_ext/exit.bpf.c
@@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
if (exit_point == EXIT_DISPATCH)
EXIT_CLEANLY();
- scx_bpf_dsq_move_to_local(DSQ_ID);
+ scx_bpf_dsq_move_to_local(DSQ_ID, 0);
}
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
index 01cf4f3da4e09..a3aabeb82e6be 100644
--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
{
- scx_bpf_dsq_move_to_local(DSQ_ID);
+ scx_bpf_dsq_move_to_local(DSQ_ID, 0);
}
void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
diff --git a/tools/testing/selftests/sched_ext/numa.bpf.c b/tools/testing/selftests/sched_ext/numa.bpf.c
index a79d86ed54a1b..78cc49a7f9a67 100644
--- a/tools/testing/selftests/sched_ext/numa.bpf.c
+++ b/tools/testing/selftests/sched_ext/numa.bpf.c
@@ -68,7 +68,7 @@ void BPF_STRUCT_OPS(numa_dispatch, s32 cpu, struct task_struct *prev)
{
int node = __COMPAT_scx_bpf_cpu_node(cpu);
- scx_bpf_dsq_move_to_local(node);
+ scx_bpf_dsq_move_to_local(node, 0);
}
s32 BPF_STRUCT_OPS_SLEEPABLE(numa_init)
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.bpf.c b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
index 784f2f6c1af9b..96e3a336a8a60 100644
--- a/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
+++ b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
@@ -95,7 +95,7 @@ static int scan_dsq_pool(void)
record_peek_result(task->pid);
/* Try to move this task to local */
- if (!moved && scx_bpf_dsq_move_to_local(dsq_id) == 0) {
+ if (!moved && scx_bpf_dsq_move_to_local(dsq_id, 0) == 0) {
moved = 1;
break;
}
@@ -156,19 +156,19 @@ void BPF_STRUCT_OPS(peek_dsq_dispatch, s32 cpu, struct task_struct *prev)
dsq_peek_result2_pid = peek_result ? peek_result->pid : -1;
/* Now consume the task since we've peeked at it */
- scx_bpf_dsq_move_to_local(test_dsq_id);
+ scx_bpf_dsq_move_to_local(test_dsq_id, 0);
/* Mark phase 1 as complete */
phase1_complete = 1;
bpf_printk("Phase 1 complete, starting phase 2 stress testing");
} else if (!phase1_complete) {
/* Still in phase 1, use real DSQ */
- scx_bpf_dsq_move_to_local(real_dsq_id);
+ scx_bpf_dsq_move_to_local(real_dsq_id, 0);
} else {
/* Phase 2: Scan all DSQs in the pool and try to move a task */
if (!scan_dsq_pool()) {
/* No tasks found in DSQ pool, fall back to real DSQ */
- scx_bpf_dsq_move_to_local(real_dsq_id);
+ scx_bpf_dsq_move_to_local(real_dsq_id, 0);
}
}
}
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
index bfcb96cd4954b..7aa5dc6bfb936 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
@@ -53,7 +53,7 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
{
- if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
+ if (scx_bpf_dsq_move_to_local(VTIME_DSQ, 0))
consumed = true;
}
--
2.53.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH sched_ext/for-7.1] selftests/sched_ext: Update scx_bpf_dsq_move_to_local() in kselftests
2026-03-14 6:51 [PATCH sched_ext/for-7.1] selftests/sched_ext: Update scx_bpf_dsq_move_to_local() in kselftests Andrea Righi
@ 2026-03-14 8:45 ` Tejun Heo
0 siblings, 0 replies; 2+ messages in thread
From: Tejun Heo @ 2026-03-14 8:45 UTC (permalink / raw)
To: Andrea Righi, David Vernet, Changwoo Min
Cc: sched-ext, linux-kernel, Emil Tsalapatis
Hello,
Applied to sched_ext/for-7.1.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-03-14 8:45 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-14 6:51 [PATCH sched_ext/for-7.1] selftests/sched_ext: Update scx_bpf_dsq_move_to_local() in kselftests Andrea Righi
2026-03-14 8:45 ` Tejun Heo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox