* [PATCH 0/3] sched_ext: lockless peek operation for DSQs
@ 2025-10-02 2:57 Ryan Newton
2025-10-02 2:57 ` [PATCH 1/3] sched_ext: Add " Ryan Newton
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Ryan Newton @ 2025-10-02 2:57 UTC (permalink / raw)
To: linux-kernel; +Cc: tj, arighi, newton, Ryan Newton
This allows sched_ext schedulers an inexpensive operation to peek
at the first element in a queue (DSQ), without creating an iterator
and acquiring the lock on that queue.
Note that manual testing has thus far included a modified version of the
example qmap scheduler that exercises peek, as well as a modified
modified LAVD (from the SCX repo) that exercises peek. The attached test
passes >1000 stress tests when run in concurrent VMs, and when run
sequentially on the host kernel. Presently, tested on the below
workstation and server processors.
- AMD Ryzen Threadripper PRO 7975WX 32-Cores
- AMD EPYC 9D64 88-Core Processor
Initial experiments indicate a substantial speedup (on schbench) when
running an SCX scheduler with per-cpu DSQs and peeking each queue to
retrieve the task with the minimum vruntime across all the CPUs.
Ryan Newton (3):
sched_ext: Add lockless peek operation for DSQs
sched_ext: optimize first_task update logic
sched_ext: Add a selftest for scx_bpf_dsq_peek
include/linux/sched/ext.h | 1 +
kernel/sched/ext.c | 55 ++++-
tools/sched_ext/include/scx/common.bpf.h | 1 +
tools/sched_ext/include/scx/compat.bpf.h | 19 ++
tools/testing/selftests/sched_ext/Makefile | 1 +
.../selftests/sched_ext/peek_dsq.bpf.c | 133 +++++++++++++
tools/testing/selftests/sched_ext/peek_dsq.c | 188 ++++++++++++++++++
7 files changed, 396 insertions(+), 2 deletions(-)
create mode 100644 tools/testing/selftests/sched_ext/peek_dsq.bpf.c
create mode 100644 tools/testing/selftests/sched_ext/peek_dsq.c
--
2.51.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/3] sched_ext: Add lockless peek operation for DSQs
2025-10-02 2:57 [PATCH 0/3] sched_ext: lockless peek operation for DSQs Ryan Newton
@ 2025-10-02 2:57 ` Ryan Newton
2025-10-02 7:13 ` Andrea Righi
2025-10-02 2:57 ` [PATCH 2/3] sched_ext: optimize first_task update logic Ryan Newton
2025-10-02 2:57 ` [PATCH 3/3] sched_ext: Add a selftest for scx_bpf_dsq_peek Ryan Newton
2 siblings, 1 reply; 6+ messages in thread
From: Ryan Newton @ 2025-10-02 2:57 UTC (permalink / raw)
To: linux-kernel; +Cc: tj, arighi, newton
From: Ryan Newton <newton@meta.com>
The builtin DSQ queue data structures are meant to be used by a wide
range of different sched_ext schedulers with different demands on these
data structures. They might be per-cpu with low-contention, or
high-contention shared queues. Unfortunately, DSQs have a coarse-grained
lock around the whole data structure. Without going all the way to a
lock-free, more scalable implementation, a small step we can take to
reduce lock contention is to allow a lockless, small-fixed-cost peek at
the head of the queue.
This change allows certain custom SCX schedulers to cheaply peek at
queues, e.g. during load balancing, before locking them. But it
represents a few extra memory operations to update the pointer each
time the DSQ is modified, including a memory barrier on ARM so the write
appears correctly ordered.
This commit adds a first_task pointer field which is updated
atomically when the DSQ is modified, and allows any thread to peek at
the head of the queue without holding the lock.
Signed-off-by: Ryan Newton <newton@meta.com>
---
include/linux/sched/ext.h | 1 +
kernel/sched/ext.c | 37 ++++++++++++++++++++++++
tools/sched_ext/include/scx/common.bpf.h | 1 +
tools/sched_ext/include/scx/compat.bpf.h | 19 ++++++++++++
4 files changed, 58 insertions(+)
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index d82b7a9b0658..81478d4ae782 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -58,6 +58,7 @@ enum scx_dsq_id_flags {
*/
struct scx_dispatch_q {
raw_spinlock_t lock;
+ struct task_struct __rcu *first_task; /* lockless peek at head */
struct list_head list; /* tasks in dispatch order */
struct rb_root priq; /* used to order by p->scx.dsq_vtime */
u32 nr;
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 2b0e88206d07..fd0121c03311 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -885,6 +885,15 @@ static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
}
+/* while holding dsq->lock */
+static void dsq_update_first_task(struct scx_dispatch_q *dsq)
+{
+ struct task_struct *first_task;
+
+ first_task = nldsq_next_task(dsq, NULL, false);
+ rcu_assign_pointer(dsq->first_task, first_task);
+}
+
static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
struct task_struct *p, u64 enq_flags)
{
@@ -959,6 +968,9 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
list_add_tail(&p->scx.dsq_list.node, &dsq->list);
}
+ /* even the add_tail code path may have changed the first element */
+ dsq_update_first_task(dsq);
+
/* seq records the order tasks are queued, used by BPF DSQ iterator */
dsq->seq++;
p->scx.dsq_seq = dsq->seq;
@@ -1013,6 +1025,7 @@ static void task_unlink_from_dsq(struct task_struct *p,
list_del_init(&p->scx.dsq_list.node);
dsq_mod_nr(dsq, -1);
+ dsq_update_first_task(dsq);
}
static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
@@ -6084,6 +6097,29 @@ __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
kit->dsq = NULL;
}
+/**
+ * scx_bpf_dsq_peek - Lockless peek at the first element.
+ * @dsq_id: DSQ to examine.
+ *
+ * Read the first element in the DSQ. This is semantically equivalent to using
+ * the DSQ iterator, but is lockfree.
+ *
+ * Returns the pointer, or uses ERR_PTR() to encode an error as the pointer.
+ */
+__bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id)
+{
+ struct scx_sched *sch;
+ struct scx_dispatch_q *dsq;
+
+ /* KF_RCU_PROTECTED means no need to guard(rcu)() */
+ sch = rcu_dereference(scx_root);
+
+ if (unlikely(!sch))
+ return ERR_PTR(-ENODEV);
+ dsq = find_user_dsq(sch, dsq_id);
+ return rcu_dereference(dsq->first_task);
+}
+
__bpf_kfunc_end_defs();
static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
@@ -6641,6 +6677,7 @@ BTF_KFUNCS_START(scx_kfunc_ids_any)
BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
+BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 06e2551033cb..fbf3e7f9526c 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -75,6 +75,7 @@ u32 scx_bpf_reenqueue_local(void) __ksym;
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
+struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) __ksym __weak;
int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index dd9144624dc9..0af1922d66a8 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -130,6 +130,25 @@ int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym
false; \
})
+
+/*
+ * v6.19: Introduce lockless peek API for user DSQs.
+ *
+ * Preserve the following macro until v6.20.
+ */
+static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
+{
+ struct task_struct *p = NULL;
+ struct bpf_iter_scx_dsq it;
+
+ if (bpf_ksym_exists(scx_bpf_dsq_peek))
+ return scx_bpf_dsq_peek(dsq_id);
+ if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
+ p = bpf_iter_scx_dsq_next(&it);
+ bpf_iter_scx_dsq_destroy(&it);
+ return p;
+}
+
/**
* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
* in a compatible way. We will preserve this __COMPAT helper until v6.16.
--
2.51.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/3] sched_ext: optimize first_task update logic
2025-10-02 2:57 [PATCH 0/3] sched_ext: lockless peek operation for DSQs Ryan Newton
2025-10-02 2:57 ` [PATCH 1/3] sched_ext: Add " Ryan Newton
@ 2025-10-02 2:57 ` Ryan Newton
2025-10-02 7:27 ` Andrea Righi
2025-10-02 2:57 ` [PATCH 3/3] sched_ext: Add a selftest for scx_bpf_dsq_peek Ryan Newton
2 siblings, 1 reply; 6+ messages in thread
From: Ryan Newton @ 2025-10-02 2:57 UTC (permalink / raw)
To: linux-kernel; +Cc: tj, arighi, newton
From: Ryan Newton <newton@meta.com>
This is a follow-on optimization to the prior commit which added a
lockless peek operation on DSQs. That implementation is correct and
simple, but elides several optimizations.
Previously, we read the first_task using the same slowpath, irrespective
of where we enqueue the task. With this change, we instead base the
update on what we know about the calling context. On both insert and
removal we can break down whether the change (1) definitely, (2) never,
or (3) sometimes changes first task. In some cases we know what the new
first task will be, and can set it more directly.
Signed-off-by: Ryan Newton <newton@meta.com>
---
kernel/sched/ext.c | 26 ++++++++++++++++++++------
1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index fd0121c03311..1cb10aa9913a 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -953,8 +953,11 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
container_of(rbp, struct task_struct,
scx.dsq_priq);
list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
+ /* first task unchanged - no update needed */
} else {
list_add(&p->scx.dsq_list.node, &dsq->list);
+ /* new task is at head - use fastpath */
+ rcu_assign_pointer(dsq->first_task, p);
}
} else {
/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
@@ -962,15 +965,20 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
dsq->id);
- if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) {
list_add(&p->scx.dsq_list.node, &dsq->list);
- else
+ /* new task inserted at head - use fastpath */
+ rcu_assign_pointer(dsq->first_task, p);
+ } else {
+ bool was_empty;
+
+ was_empty = list_empty(&dsq->list);
list_add_tail(&p->scx.dsq_list.node, &dsq->list);
+ if (was_empty)
+ rcu_assign_pointer(dsq->first_task, p);
+ }
}
- /* even the add_tail code path may have changed the first element */
- dsq_update_first_task(dsq);
-
/* seq records the order tasks are queued, used by BPF DSQ iterator */
dsq->seq++;
p->scx.dsq_seq = dsq->seq;
@@ -1023,9 +1031,15 @@ static void task_unlink_from_dsq(struct task_struct *p,
p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
}
+ if (dsq->first_task == p) {
+ if (dsq->id & SCX_DSQ_FLAG_BUILTIN)
+ rcu_assign_pointer(dsq->first_task,
+ list_next_entry(p, scx.dsq_list.node));
+ else
+ dsq_update_first_task(dsq);
+ }
list_del_init(&p->scx.dsq_list.node);
dsq_mod_nr(dsq, -1);
- dsq_update_first_task(dsq);
}
static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
--
2.51.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/3] sched_ext: Add a selftest for scx_bpf_dsq_peek
2025-10-02 2:57 [PATCH 0/3] sched_ext: lockless peek operation for DSQs Ryan Newton
2025-10-02 2:57 ` [PATCH 1/3] sched_ext: Add " Ryan Newton
2025-10-02 2:57 ` [PATCH 2/3] sched_ext: optimize first_task update logic Ryan Newton
@ 2025-10-02 2:57 ` Ryan Newton
2 siblings, 0 replies; 6+ messages in thread
From: Ryan Newton @ 2025-10-02 2:57 UTC (permalink / raw)
To: linux-kernel; +Cc: tj, arighi, newton
From: Ryan Newton <newton@meta.com>
This is the most basic unit test: make sure an empty queue peeks as
empty, and when we put one element in the queue, make sure peek returns
that element.
However, even this simple test is a little complicated by the different
behavior of scx_bpf_dsq_insert in different calling contexts:
- insert is for direct dispatch in enqueue
- insert is delayed when called from select_cpu
In this case we split the insert and the peek that verifies the
result between enqueue/dispatch.
Note: An alternative would be to call `scx_bpf_dsq_move_to_local` on an
empty queue, which in turn calls `flush_dispatch_buf`, in order to flush
the buffered insert. Unfortunately, this is not viable within the
enqueue path, as it attempts a voluntary context switch within an RCU
read-side critical section.
Signed-off-by: Ryan Newton <newton@meta.com>
---
tools/testing/selftests/sched_ext/Makefile | 1 +
.../selftests/sched_ext/peek_dsq.bpf.c | 133 +++++++++++++
tools/testing/selftests/sched_ext/peek_dsq.c | 188 ++++++++++++++++++
3 files changed, 322 insertions(+)
create mode 100644 tools/testing/selftests/sched_ext/peek_dsq.bpf.c
create mode 100644 tools/testing/selftests/sched_ext/peek_dsq.c
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 9d9d6b4c38b0..5fe45f9c5f8f 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -174,6 +174,7 @@ auto-test-targets := \
minimal \
numa \
allowed_cpus \
+ peek_dsq \
prog_run \
reload_loop \
select_cpu_dfl \
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.bpf.c b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
new file mode 100644
index 000000000000..6bbd98799503
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A BPF program for testing DSQ operations including create, destroy,
+ * and peek operations. Uses a hybrid approach:
+ * - Syscall program for DSQ lifecycle (create/destroy)
+ * - Struct ops scheduler for task insertion/dequeue testing
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Ryan Newton <ryan.newton@alum.mit.edu>
+ */
+
+#include <scx/common.bpf.h>
+#include <scx/compat.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Global variables to store test results */
+int dsq_create_result = -1;
+int dsq_destroy_result = -1;
+int dsq_peek_result1 = -1;
+long dsq_inserted_pid = -1;
+int insert_test_cpu = -1; /* Set to the cpu that performs the test */
+long dsq_peek_result2 = -1;
+long dsq_peek_result2_pid = -1;
+long dsq_peek_result2_expected = -1;
+int test_dsq_id = 1234; /* Use a simple ID like create_dsq example */
+int real_dsq_id = 1235; /* DSQ for normal operation */
+int enqueue_count = -1;
+int dispatch_count = -1;
+int debug_ksym_exists = -1;
+
+
+/* Test if we're actually using the native or compat version */
+int check_dsq_insert_ksym(void)
+{
+ return bpf_ksym_exists(scx_bpf_dsq_insert) ? 1 : 0;
+}
+
+int check_dsq_peek_ksym(void)
+{
+ return bpf_ksym_exists(scx_bpf_dsq_peek) ? 1 : 0;
+}
+
+/* Struct_ops scheduler for testing DSQ peek operations */
+void BPF_STRUCT_OPS(peek_dsq_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ struct task_struct *peek_result;
+ int last_insert_test_cpu, cpu;
+
+ enqueue_count++;
+ cpu = bpf_get_smp_processor_id();
+ last_insert_test_cpu = __sync_val_compare_and_swap(
+ &insert_test_cpu, -1, cpu);
+
+ /* On the first task, just do the empty DSQ test and insert into test DSQ */
+ if (last_insert_test_cpu == -1) {
+ bpf_printk("peek_dsq_enqueue beginning peek test on cpu %d\n", cpu);
+
+ /* Test 1: Peek empty DSQ - should return NULL */
+ peek_result = __COMPAT_scx_bpf_dsq_peek(test_dsq_id);
+ dsq_peek_result1 = (long)peek_result; /* Should be 0 (NULL) */
+
+ /* Test 2: Insert task into test DSQ for testing in dispatch callback */
+ dsq_inserted_pid = p->pid;
+ scx_bpf_dsq_insert(p, test_dsq_id, 0, enq_flags);
+ dsq_peek_result2_expected = (long)p; /* Expected the task we just inserted */
+ } else
+ scx_bpf_dsq_insert(p, real_dsq_id, 0, enq_flags);
+}
+
+void BPF_STRUCT_OPS(peek_dsq_dispatch, s32 cpu, struct task_struct *prev)
+{
+ dispatch_count++;
+ /* Complete the peek test if we inserted a task but haven't tested peek yet */
+ if (insert_test_cpu == cpu && dsq_peek_result2 == -1) {
+ struct task_struct *peek_result;
+
+ bpf_printk("peek_dsq_dispatch completing second half of peek test on cpu %d\n",
+ cpu);
+
+ /* Test 3: Peek DSQ after insert - should return the task we inserted */
+ peek_result = __COMPAT_scx_bpf_dsq_peek(test_dsq_id);
+ /* Store the PID of the peeked task for comparison */
+ dsq_peek_result2 = (long)peek_result;
+ dsq_peek_result2_pid = peek_result ? peek_result->pid : -1;
+
+ /* Now consume the task since we've peeked at it */
+ scx_bpf_dsq_move_to_local(test_dsq_id);
+ } else
+ scx_bpf_dsq_move_to_local(real_dsq_id);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(peek_dsq_init)
+{
+ s32 err;
+
+ /* Always set debug values so we can see which version we're using */
+ debug_ksym_exists = check_dsq_peek_ksym();
+
+ /* Initialize state first */
+ insert_test_cpu = -1;
+ enqueue_count = 0;
+ dsq_create_result = 0; /* Reset to 0 before attempting */
+
+ /* Create a DSQ */
+ err = scx_bpf_create_dsq(test_dsq_id, -1);
+ if (!err)
+ err = scx_bpf_create_dsq(real_dsq_id, -1);
+ if (err) {
+ dsq_create_result = err;
+ scx_bpf_error("Failed to create DSQ %d: %d", test_dsq_id, err);
+ return err;
+ }
+
+ dsq_create_result = 1; /* Success */
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(peek_dsq_exit, struct scx_exit_info *ei)
+{
+ scx_bpf_destroy_dsq(test_dsq_id);
+ dsq_destroy_result = 1;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops peek_dsq_ops = {
+ .enqueue = (void *)peek_dsq_enqueue,
+ .dispatch = (void *)peek_dsq_dispatch,
+ .init = (void *)peek_dsq_init,
+ .exit = (void *)peek_dsq_exit,
+ .name = "peek_dsq",
+};
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.c b/tools/testing/selftests/sched_ext/peek_dsq.c
new file mode 100644
index 000000000000..ba9e03c2bd49
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/peek_dsq.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for DSQ operations including create, destroy, and peek operations.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Ryan Newton <ryan.newton@alum.mit.edu>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <string.h>
+#include <sched.h>
+#include "peek_dsq.bpf.skel.h"
+#include "scx_test.h"
+
+static bool workload_running = true;
+static pthread_t workload_thread;
+
+/**
+ * Background workload thread that sleeps and wakes rapidly to exercise
+ * the scheduler's enqueue operations and ensure DSQ operations get tested.
+ */
+static void *workload_thread_fn(void *arg)
+{
+ while (workload_running) {
+ /* Sleep for a very short time to trigger scheduler activity */
+ usleep(1000); /* 1ms sleep */
+ /* Yield to ensure we go through the scheduler */
+ sched_yield();
+ }
+ return NULL;
+}
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct peek_dsq *skel;
+
+ skel = peek_dsq__open();
+ SCX_FAIL_IF(!skel, "Failed to open");
+ SCX_ENUM_INIT(skel);
+ SCX_FAIL_IF(peek_dsq__load(skel), "Failed to load skel");
+
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct peek_dsq *skel = ctx;
+ bool failed = false;
+ int seconds = 2;
+ int err;
+
+ /* Enable the scheduler to test DSQ operations */
+ printf("Enabling scheduler to test DSQ insert operations...\n");
+
+ struct bpf_link *link =
+ bpf_map__attach_struct_ops(skel->maps.peek_dsq_ops);
+
+ if (!link) {
+ SCX_ERR("Failed to attach struct_ops");
+ return SCX_TEST_FAIL;
+ }
+
+ /* Start background workload thread to exercise the scheduler */
+ printf("Starting background workload thread...\n");
+ workload_running = true;
+ err = pthread_create(&workload_thread, NULL, workload_thread_fn, NULL);
+ if (err) {
+ SCX_ERR("Failed to create workload thread: %s", strerror(err));
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+
+ printf("Waiting for enqueue events.\n");
+ sleep(2);
+ while (skel->data->enqueue_count <= 0) {
+ printf(".");
+ fflush(stdout);
+ sleep(1);
+ seconds++;
+ if (seconds >= 30) {
+ printf("\n✗ Timeout waiting for enqueue events\n");
+ /* Stop workload thread and cleanup */
+ workload_running = false;
+ pthread_join(workload_thread, NULL);
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+
+ workload_running = false;
+ err = pthread_join(workload_thread, NULL);
+ if (err) {
+ SCX_ERR("Failed to join workload thread: %s", strerror(err));
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ printf("Background workload thread stopped.\n");
+
+ /* Detach the scheduler */
+ bpf_link__destroy(link);
+
+ /* Check if DSQ creation succeeded */
+ if (skel->data->dsq_create_result != 1) {
+ printf("✗ DSQ create failed: got %d, expected 1\n",
+ skel->data->dsq_create_result);
+ failed = true;
+ } else {
+ printf("✓ DSQ create succeeded\n");
+ }
+
+ printf("Enqueue/dispatch count over %d seconds: %d / %d\n", seconds,
+ skel->data->enqueue_count, skel->data->dispatch_count);
+ printf("Debug: ksym_exists=%d\n",
+ skel->data->debug_ksym_exists);
+
+ /* Check DSQ insert result */
+ printf("DSQ insert test done on cpu: %d\n", skel->data->insert_test_cpu);
+ if (skel->data->insert_test_cpu != -1)
+ printf("✓ DSQ insert succeeded !\n");
+ else {
+ printf("✗ DSQ insert failed or not attempted\n");
+ failed = true;
+ }
+
+ /* Check DSQ peek results */
+ printf(" DSQ peek result 1 (before insert): %d\n",
+ skel->data->dsq_peek_result1);
+ if (skel->data->dsq_peek_result1 == 0)
+ printf("✓ DSQ peek verification succeeded - peek returned NULL!\n");
+ else {
+ printf("✗ DSQ peek verification failed\n");
+ failed = true;
+ }
+
+ printf(" DSQ peek result 2 (after insert): %ld\n",
+ skel->data->dsq_peek_result2);
+ printf(" DSQ peek result 2, expected: %ld\n",
+ skel->data->dsq_peek_result2_expected);
+ if (skel->data->dsq_peek_result2 ==
+ skel->data->dsq_peek_result2_expected)
+ printf("✓ DSQ peek verification succeeded - peek returned the inserted task!\n");
+ else {
+ printf("✗ DSQ peek verification failed\n");
+ failed = true;
+ }
+
+ printf(" Inserted test task -> pid: %ld\n", skel->data->dsq_inserted_pid);
+ printf(" DSQ peek result 2 -> pid: %ld\n", skel->data->dsq_peek_result2_pid);
+
+ if (skel->data->dsq_destroy_result != 1) {
+ printf("✗ DSQ destroy failed: got %d, expected 1\n",
+ skel->data->dsq_destroy_result);
+ failed = true;
+ }
+
+ if (failed)
+ return SCX_TEST_FAIL;
+ else
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct peek_dsq *skel = ctx;
+
+ /* Ensure workload thread is stopped */
+ if (workload_running) {
+ workload_running = false;
+ pthread_join(workload_thread, NULL);
+ }
+
+ peek_dsq__destroy(skel);
+}
+
+struct scx_test peek_dsq = {
+ .name = "peek_dsq",
+ .description =
+ "Test DSQ create/destroy operations and future peek functionality",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&peek_dsq)
--
2.51.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 1/3] sched_ext: Add lockless peek operation for DSQs
2025-10-02 2:57 ` [PATCH 1/3] sched_ext: Add " Ryan Newton
@ 2025-10-02 7:13 ` Andrea Righi
0 siblings, 0 replies; 6+ messages in thread
From: Andrea Righi @ 2025-10-02 7:13 UTC (permalink / raw)
To: Ryan Newton; +Cc: linux-kernel, tj, newton
Hi Ryan,
On Wed, Oct 01, 2025 at 10:57:19PM -0400, Ryan Newton wrote:
> From: Ryan Newton <newton@meta.com>
>
> The builtin DSQ queue data structures are meant to be used by a wide
> range of different sched_ext schedulers with different demands on these
> data structures. They might be per-cpu with low-contention, or
> high-contention shared queues. Unfortunately, DSQs have a coarse-grained
> lock around the whole data structure. Without going all the way to a
> lock-free, more scalable implementation, a small step we can take to
> reduce lock contention is to allow a lockless, small-fixed-cost peek at
> the head of the queue.
>
> This change allows certain custom SCX schedulers to cheaply peek at
> queues, e.g. during load balancing, before locking them. But it
> represents a few extra memory operations to update the pointer each
> time the DSQ is modified, including a memory barrier on ARM so the write
> appears correctly ordered.
>
> This commit adds a first_task pointer field which is updated
> atomically when the DSQ is modified, and allows any thread to peek at
> the head of the queue without holding the lock.
>
> Signed-off-by: Ryan Newton <newton@meta.com>
> ---
> include/linux/sched/ext.h | 1 +
> kernel/sched/ext.c | 37 ++++++++++++++++++++++++
> tools/sched_ext/include/scx/common.bpf.h | 1 +
> tools/sched_ext/include/scx/compat.bpf.h | 19 ++++++++++++
> 4 files changed, 58 insertions(+)
>
> diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
> index d82b7a9b0658..81478d4ae782 100644
> --- a/include/linux/sched/ext.h
> +++ b/include/linux/sched/ext.h
> @@ -58,6 +58,7 @@ enum scx_dsq_id_flags {
> */
> struct scx_dispatch_q {
> raw_spinlock_t lock;
> + struct task_struct __rcu *first_task; /* lockless peek at head */
> struct list_head list; /* tasks in dispatch order */
> struct rb_root priq; /* used to order by p->scx.dsq_vtime */
> u32 nr;
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index 2b0e88206d07..fd0121c03311 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -885,6 +885,15 @@ static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
> __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
> }
>
> +/* while holding dsq->lock */
> +static void dsq_update_first_task(struct scx_dispatch_q *dsq)
> +{
> + struct task_struct *first_task;
> +
> + first_task = nldsq_next_task(dsq, NULL, false);
This requires holding dsq->lock, but...
> + rcu_assign_pointer(dsq->first_task, first_task);
> +}
> +
> static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
> struct task_struct *p, u64 enq_flags)
> {
> @@ -959,6 +968,9 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
> list_add_tail(&p->scx.dsq_list.node, &dsq->list);
> }
>
> + /* even the add_tail code path may have changed the first element */
> + dsq_update_first_task(dsq);
...we're not holding dsq->lock here when dsq->id == SCX_DSQ_LOCAL.
This seems to be fixed in PATCH 2/3, so I'd suggest squashing that change
into this one to avoid triggering lockdep warnings during bisect.
Moreover, I think splitting them doesn't add much value for the review, so
merging both makes more sense to me.
> +
> /* seq records the order tasks are queued, used by BPF DSQ iterator */
> dsq->seq++;
> p->scx.dsq_seq = dsq->seq;
> @@ -1013,6 +1025,7 @@ static void task_unlink_from_dsq(struct task_struct *p,
>
> list_del_init(&p->scx.dsq_list.node);
> dsq_mod_nr(dsq, -1);
> + dsq_update_first_task(dsq);
> }
>
> static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
> @@ -6084,6 +6097,29 @@ __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
> kit->dsq = NULL;
> }
>
> +/**
> + * scx_bpf_dsq_peek - Lockless peek at the first element.
> + * @dsq_id: DSQ to examine.
> + *
> + * Read the first element in the DSQ. This is semantically equivalent to using
> + * the DSQ iterator, but is lockfree.
> + *
> + * Returns the pointer, or uses ERR_PTR() to encode an error as the pointer.
> + */
> +__bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id)
> +{
> + struct scx_sched *sch;
> + struct scx_dispatch_q *dsq;
> +
> + /* KF_RCU_PROTECTED means no need to guard(rcu)() */
I think this comment can be dropped, the meaning should already be clear
from the KF_RCU_PROTECTED annotation.
> + sch = rcu_dereference(scx_root);
> +
> + if (unlikely(!sch))
> + return ERR_PTR(-ENODEV);
I'm wondering if we should just return NULL here, to simplify error
handling in the caller. In this way we just need to check for (p != NULL),
instead of (p != NULL) && !IS_ERR(p).
> + dsq = find_user_dsq(sch, dsq_id);
Hm.. let's do something like this:
if (unlikely(!dsq)) {
scx_error(sch, "non-existent DSQ ID 0x%016llx", dsq_id);
return NULL;
}
or bad things can happen. :)
This also implies that scx_bpf_dsq_peek() works only with user DSQs and
will always return NULL for built-in DSQs.
So, what about adding also:
if (unlikely((dsq_id & SCX_DSQ_FLAG_BUILTIN))) {
scx_error(sch, "invalid DSQ ID 0x%016llx (only user DSQs allowed)", dsq_id);
return NULL;
}
I think this would be a reasonable requirement. Typically once a task is
queued to a built-in DSQ (e.g., SCX_DSQ_LOCAL[_ON]), it is effectively
considered dispatched, so peeking at its state is rarely needed.
This would also allows us to get rid of updating the first_task pointer
with built-in DSQs, saving some memory ops in the hot paths.
> + return rcu_dereference(dsq->first_task);
> +}
> +
> __bpf_kfunc_end_defs();
>
> static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
> @@ -6641,6 +6677,7 @@ BTF_KFUNCS_START(scx_kfunc_ids_any)
> BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
> BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
> BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
> +BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL)
> BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
> BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
> BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
> diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
> index 06e2551033cb..fbf3e7f9526c 100644
> --- a/tools/sched_ext/include/scx/common.bpf.h
> +++ b/tools/sched_ext/include/scx/common.bpf.h
> @@ -75,6 +75,7 @@ u32 scx_bpf_reenqueue_local(void) __ksym;
> void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
> s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
> void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
> +struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) __ksym __weak;
> int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
> struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
> void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
> diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
> index dd9144624dc9..0af1922d66a8 100644
> --- a/tools/sched_ext/include/scx/compat.bpf.h
> +++ b/tools/sched_ext/include/scx/compat.bpf.h
> @@ -130,6 +130,25 @@ int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym
> false; \
> })
>
> +
> +/*
> + * v6.19: Introduce lockless peek API for user DSQs.
> + *
> + * Preserve the following macro until v6.20.
Usually we keep the __COMPAT_*() helpers for 2 major kernel versions, maybe
let's bump this to v6.21.
> + */
> +static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
> +{
> + struct task_struct *p = NULL;
> + struct bpf_iter_scx_dsq it;
> +
> + if (bpf_ksym_exists(scx_bpf_dsq_peek))
> + return scx_bpf_dsq_peek(dsq_id);
> + if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
> + p = bpf_iter_scx_dsq_next(&it);
> + bpf_iter_scx_dsq_destroy(&it);
> + return p;
> +}
> +
> /**
> * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
> * in a compatible way. We will preserve this __COMPAT helper until v6.16.
> --
> 2.51.0
>
Thanks,
-Andrea
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 2/3] sched_ext: optimize first_task update logic
2025-10-02 2:57 ` [PATCH 2/3] sched_ext: optimize first_task update logic Ryan Newton
@ 2025-10-02 7:27 ` Andrea Righi
0 siblings, 0 replies; 6+ messages in thread
From: Andrea Righi @ 2025-10-02 7:27 UTC (permalink / raw)
To: Ryan Newton; +Cc: linux-kernel, tj, newton
On Wed, Oct 01, 2025 at 10:57:20PM -0400, Ryan Newton wrote:
> From: Ryan Newton <newton@meta.com>
>
> This is a follow-on optimization to the prior commit which added a
> lockless peek operation on DSQs. That implementation is correct and
> simple, but elides several optimizations.
>
> Previously, we read the first_task using the same slowpath, irrespective
> of where we enqueue the task. With this change, we instead base the
> update on what we know about the calling context. On both insert and
> removal we can break down whether the change (1) definitely, (2) never,
> or (3) sometimes changes first task. In some cases we know what the new
> first task will be, and can set it more directly.
>
> Signed-off-by: Ryan Newton <newton@meta.com>
> ---
> kernel/sched/ext.c | 26 ++++++++++++++++++++------
> 1 file changed, 20 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index fd0121c03311..1cb10aa9913a 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -953,8 +953,11 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
> container_of(rbp, struct task_struct,
> scx.dsq_priq);
> list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
> + /* first task unchanged - no update needed */
> } else {
> list_add(&p->scx.dsq_list.node, &dsq->list);
> + /* new task is at head - use fastpath */
> + rcu_assign_pointer(dsq->first_task, p);
> }
> } else {
> /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
> @@ -962,15 +965,20 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
> scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
> dsq->id);
>
> - if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
> + if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) {
> list_add(&p->scx.dsq_list.node, &dsq->list);
> - else
> + /* new task inserted at head - use fastpath */
> + rcu_assign_pointer(dsq->first_task, p);
> + } else {
> + bool was_empty;
> +
> + was_empty = list_empty(&dsq->list);
> list_add_tail(&p->scx.dsq_list.node, &dsq->list);
> + if (was_empty)
> + rcu_assign_pointer(dsq->first_task, p);
> + }
> }
>
> - /* even the add_tail code path may have changed the first element */
> - dsq_update_first_task(dsq);
> -
> /* seq records the order tasks are queued, used by BPF DSQ iterator */
> dsq->seq++;
> p->scx.dsq_seq = dsq->seq;
> @@ -1023,9 +1031,15 @@ static void task_unlink_from_dsq(struct task_struct *p,
> p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
> }
>
> + if (dsq->first_task == p) {
> + if (dsq->id & SCX_DSQ_FLAG_BUILTIN)
> + rcu_assign_pointer(dsq->first_task,
> + list_next_entry(p, scx.dsq_list.node));
nit: no need to split in two lines, it should fit in the 100 characters per
line limit.
> + else
> + dsq_update_first_task(dsq);
> + }
However, from my comment in PATCH 1/3, if we allow to use
scx_bpf_dsq_peek() only with user DSQs this would become:
if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p)
dsq_update_first_task(dsq);
> list_del_init(&p->scx.dsq_list.node);
> dsq_mod_nr(dsq, -1);
> - dsq_update_first_task(dsq);
> }
>
> static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
> --
> 2.51.0
>
Thanks,
-Andrea
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2025-10-02 7:27 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-10-02 2:57 [PATCH 0/3] sched_ext: lockless peek operation for DSQs Ryan Newton
2025-10-02 2:57 ` [PATCH 1/3] sched_ext: Add " Ryan Newton
2025-10-02 7:13 ` Andrea Righi
2025-10-02 2:57 ` [PATCH 2/3] sched_ext: optimize first_task update logic Ryan Newton
2025-10-02 7:27 ` Andrea Righi
2025-10-02 2:57 ` [PATCH 3/3] sched_ext: Add a selftest for scx_bpf_dsq_peek Ryan Newton
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox