public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] sched_ext: Fix stale direct dispatch state in ddsp_dsq_id
@ 2026-04-02  8:57 Andrea Righi
  2026-04-02 20:10 ` Tejun Heo
  0 siblings, 1 reply; 3+ messages in thread
From: Andrea Righi @ 2026-04-02  8:57 UTC (permalink / raw)
  To: Tejun Heo, David Vernet, Changwoo Min
  Cc: Daniel Hodges, Patrick Somaru, sched-ext, linux-kernel

@p->scx.ddsp_dsq_id can be left set (non-SCX_DSQ_INVALID) triggering a
spurious warning in mark_direct_dispatch() when the next wakeup's
ops.select_cpu() calls scx_bpf_dsq_insert(), such as:

 WARNING: kernel/sched/ext.c:1273 at scx_dsq_insert_commit+0xcd/0x140

The root cause is that ddsp_dsq_id was only cleared in dispatch_enqueue(),
which is not reached in all paths that consume or cancel a direct dispatch
verdict. Instead, clear it at the right places:

 - direct_dispatch(): cache the direct dispatch state in local variables
   and clear it before dispatch_enqueue() on the synchronous path. For
   the deferred path, the direct dispatch state must remain set until
   process_ddsp_deferred_locals() consumes them.

 - process_ddsp_deferred_locals(): cache the dispatch state in local
   variables and clear it before calling dispatch_to_local_dsq(), which
   may migrate the task to another rq.

 - do_enqueue_task(): clear the dispatch state on the enqueue path
   (local/global/bypass fallbacks), where the direct dispatch verdict is
   ignored.

 - dequeue_task_scx(): clear the dispatch state after dispatch_dequeue()
   to handle both the deferred dispatch cancellation and the holding_cpu
   race, covering all cases where a pending direct dispatch is
   cancelled.

 - scx_disable_task(): clear the direct dispatch state when
   transitioning a task out of the current scheduler. Waking tasks may
   have had the direct dispatch state set by the outgoing scheduler's
   ops.select_cpu() and then been queued on a wake_list via
   ttwu_queue_wakelist(), when SCX_OPS_ALLOW_QUEUED_WAKEUP is set. Such
   tasks are not on the runqueue and are not iterated by scx_bypass(),
   so their direct dispatch state won't be cleared. Without this clear,
   when the new scheduler calls scx_enable_task() for these tasks, any
   subsequent ops.select_cpu() call that tries to direct dispatch the
   task will trigger the WARN_ON_ONCE() in mark_direct_dispatch().

Fixes: 5b26f7b920f76 ("sched_ext: Allow SCX_DSQ_LOCAL_ON for direct dispatches")
Cc: stable@vger.kernel.org # v6.12+
Cc: Daniel Hodges <hodgesd@meta.com>
Cc: Patrick Somaru <patsomaru@meta.com>
Signed-off-by: Andrea Righi <arighi@nvidia.com>
---
Changes in v2:
 - Move clearing of the direct dispatch state out of dispatch_enqueue() to the
   appropriate places (Tejun Heo)
 - Do not unconditionally clear the direct dispatch state in scx_enable_task()
 - Link to v1: https://lore.kernel.org/all/20260401215619.1188194-1-arighi@nvidia.com

 kernel/sched/ext.c | 44 +++++++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 17 deletions(-)

diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index d5bdcdb3f7004..71bb83fccd60d 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1109,15 +1109,6 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
 	dsq_mod_nr(dsq, 1);
 	p->scx.dsq = dsq;
 
-	/*
-	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
-	 * direct dispatch path, but we clear them here because the direct
-	 * dispatch verdict may be overridden on the enqueue path during e.g.
-	 * bypass.
-	 */
-	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
-	p->scx.ddsp_enq_flags = 0;
-
 	/*
 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
 	 * match waiters' load_acquire.
@@ -1283,16 +1274,23 @@ static void mark_direct_dispatch(struct scx_sched *sch,
 	p->scx.ddsp_enq_flags = enq_flags;
 }
 
+static inline void clear_direct_dispatch(struct task_struct *p)
+{
+	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
+	p->scx.ddsp_enq_flags = 0;
+}
+
 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
 			    u64 enq_flags)
 {
 	struct rq *rq = task_rq(p);
-	struct scx_dispatch_q *dsq =
-		find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
+	u64 dsq_id = p->scx.ddsp_dsq_id;
+	u64 ddsp_enq_flags = p->scx.ddsp_enq_flags | enq_flags;
+	struct scx_dispatch_q *dsq;
 
 	touch_core_sched_dispatch(rq, p);
 
-	p->scx.ddsp_enq_flags |= enq_flags;
+	dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p);
 
 	/*
 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
@@ -1303,6 +1301,12 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
 		unsigned long opss;
 
+		/*
+		 * Update the direct dispatch state and keep it until
+		 * process_ddsp_deferred_locals() consumes it.
+		 */
+		p->scx.ddsp_enq_flags = ddsp_enq_flags;
+
 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
 
 		switch (opss & SCX_OPSS_STATE_MASK) {
@@ -1329,8 +1333,8 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
 		return;
 	}
 
-	dispatch_enqueue(sch, dsq, p,
-			 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
+	clear_direct_dispatch(p);
+	dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
 }
 
 static bool scx_rq_online(struct rq *rq)
@@ -1439,6 +1443,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
 	 */
 	touch_core_sched(rq, p);
 	refill_task_slice_dfl(sch, p);
+	clear_direct_dispatch(p);
 	dispatch_enqueue(sch, dsq, p, enq_flags);
 }
 
@@ -1610,6 +1615,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
 	sub_nr_running(rq, 1);
 
 	dispatch_dequeue(rq, p);
+	clear_direct_dispatch(p);
 	return true;
 }
 
@@ -2293,13 +2299,15 @@ static void process_ddsp_deferred_locals(struct rq *rq)
 				struct task_struct, scx.dsq_list.node))) {
 		struct scx_sched *sch = scx_root;
 		struct scx_dispatch_q *dsq;
+		u64 dsq_id = p->scx.ddsp_dsq_id;
+		u64 enq_flags = p->scx.ddsp_enq_flags;
 
 		list_del_init(&p->scx.dsq_list.node);
+		clear_direct_dispatch(p);
 
-		dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
+		dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p);
 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
-			dispatch_to_local_dsq(sch, rq, dsq, p,
-					      p->scx.ddsp_enq_flags);
+			dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
 	}
 }
 
@@ -3147,6 +3155,8 @@ static bool task_dead_and_done(struct task_struct *p)
 
 	lockdep_assert_rq_held(rq);
 
+	clear_direct_dispatch(p);
+
 	/*
 	 * In do_task_dead(), a dying task sets %TASK_DEAD with preemption
 	 * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] sched_ext: Fix stale direct dispatch state in ddsp_dsq_id
  2026-04-02  8:57 [PATCH v2] sched_ext: Fix stale direct dispatch state in ddsp_dsq_id Andrea Righi
@ 2026-04-02 20:10 ` Tejun Heo
  2026-04-02 21:04   ` Andrea Righi
  0 siblings, 1 reply; 3+ messages in thread
From: Tejun Heo @ 2026-04-02 20:10 UTC (permalink / raw)
  To: Andrea Righi
  Cc: David Vernet, Changwoo Min, Daniel Hodges, Patrick Somaru,
	sched-ext, linux-kernel

Hello, Andrea.

On Thu, Apr 02, 2026 at 10:57:43AM +0200, Andrea Righi wrote:
> @p->scx.ddsp_dsq_id can be left set (non-SCX_DSQ_INVALID) triggering a
> spurious warning in mark_direct_dispatch() when the next wakeup's
> ops.select_cpu() calls scx_bpf_dsq_insert(), such as:
> 
>  WARNING: kernel/sched/ext.c:1273 at scx_dsq_insert_commit+0xcd/0x140
> 
> The root cause is that ddsp_dsq_id was only cleared in dispatch_enqueue(),
> which is not reached in all paths that consume or cancel a direct dispatch
> verdict. Instead, clear it at the right places:
> 
>  - direct_dispatch(): cache the direct dispatch state in local variables
>    and clear it before dispatch_enqueue() on the synchronous path. For
>    the deferred path, the direct dispatch state must remain set until
>    process_ddsp_deferred_locals() consumes them.
> 
>  - process_ddsp_deferred_locals(): cache the dispatch state in local
>    variables and clear it before calling dispatch_to_local_dsq(), which
>    may migrate the task to another rq.
> 
>  - do_enqueue_task(): clear the dispatch state on the enqueue path
>    (local/global/bypass fallbacks), where the direct dispatch verdict is
>    ignored.
> 
>  - dequeue_task_scx(): clear the dispatch state after dispatch_dequeue()
>    to handle both the deferred dispatch cancellation and the holding_cpu
>    race, covering all cases where a pending direct dispatch is
>    cancelled.
> 
>  - scx_disable_task(): clear the direct dispatch state when
>    transitioning a task out of the current scheduler. Waking tasks may
>    have had the direct dispatch state set by the outgoing scheduler's
>    ops.select_cpu() and then been queued on a wake_list via
>    ttwu_queue_wakelist(), when SCX_OPS_ALLOW_QUEUED_WAKEUP is set. Such
>    tasks are not on the runqueue and are not iterated by scx_bypass(),
>    so their direct dispatch state won't be cleared. Without this clear,
>    when the new scheduler calls scx_enable_task() for these tasks, any
>    subsequent ops.select_cpu() call that tries to direct dispatch the
>    task will trigger the WARN_ON_ONCE() in mark_direct_dispatch().

Can you add an abbreviated version of the above as functio comment on
clear_direct_dispatch()?

>  static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
>  			    u64 enq_flags)
>  {
...
> @@ -1303,6 +1301,12 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
>  	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
>  		unsigned long opss;
>  
> +		/*
> +		 * Update the direct dispatch state and keep it until
> +		 * process_ddsp_deferred_locals() consumes it.
> +		 */
> +		p->scx.ddsp_enq_flags = ddsp_enq_flags;

I know I suggested it but this looks kinda odd. How about we keep the
original p->scx.ddsp_enq_flags |= enq_flags above and then do

...

Cache enq_flags here?

> +	clear_direct_dispatch(p);
> +	dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
>  }
>  
>  static bool scx_rq_online(struct rq *rq)
...
> @@ -3147,6 +3155,8 @@ static bool task_dead_and_done(struct task_struct *p)
>  
>  	lockdep_assert_rq_held(rq);
>  
> +	clear_direct_dispatch(p);

This is task_dead_and_done(), not scx_disable_task(). Is this intended?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] sched_ext: Fix stale direct dispatch state in ddsp_dsq_id
  2026-04-02 20:10 ` Tejun Heo
@ 2026-04-02 21:04   ` Andrea Righi
  0 siblings, 0 replies; 3+ messages in thread
From: Andrea Righi @ 2026-04-02 21:04 UTC (permalink / raw)
  To: Tejun Heo
  Cc: David Vernet, Changwoo Min, Daniel Hodges, Patrick Somaru,
	sched-ext, linux-kernel

Hi Tejun,

On Thu, Apr 02, 2026 at 10:10:20AM -1000, Tejun Heo wrote:
> Hello, Andrea.
> 
> On Thu, Apr 02, 2026 at 10:57:43AM +0200, Andrea Righi wrote:
> > @p->scx.ddsp_dsq_id can be left set (non-SCX_DSQ_INVALID) triggering a
> > spurious warning in mark_direct_dispatch() when the next wakeup's
> > ops.select_cpu() calls scx_bpf_dsq_insert(), such as:
> > 
> >  WARNING: kernel/sched/ext.c:1273 at scx_dsq_insert_commit+0xcd/0x140
> > 
> > The root cause is that ddsp_dsq_id was only cleared in dispatch_enqueue(),
> > which is not reached in all paths that consume or cancel a direct dispatch
> > verdict. Instead, clear it at the right places:
> > 
> >  - direct_dispatch(): cache the direct dispatch state in local variables
> >    and clear it before dispatch_enqueue() on the synchronous path. For
> >    the deferred path, the direct dispatch state must remain set until
> >    process_ddsp_deferred_locals() consumes them.
> > 
> >  - process_ddsp_deferred_locals(): cache the dispatch state in local
> >    variables and clear it before calling dispatch_to_local_dsq(), which
> >    may migrate the task to another rq.
> > 
> >  - do_enqueue_task(): clear the dispatch state on the enqueue path
> >    (local/global/bypass fallbacks), where the direct dispatch verdict is
> >    ignored.
> > 
> >  - dequeue_task_scx(): clear the dispatch state after dispatch_dequeue()
> >    to handle both the deferred dispatch cancellation and the holding_cpu
> >    race, covering all cases where a pending direct dispatch is
> >    cancelled.
> > 
> >  - scx_disable_task(): clear the direct dispatch state when
> >    transitioning a task out of the current scheduler. Waking tasks may
> >    have had the direct dispatch state set by the outgoing scheduler's
> >    ops.select_cpu() and then been queued on a wake_list via
> >    ttwu_queue_wakelist(), when SCX_OPS_ALLOW_QUEUED_WAKEUP is set. Such
> >    tasks are not on the runqueue and are not iterated by scx_bypass(),
> >    so their direct dispatch state won't be cleared. Without this clear,
> >    when the new scheduler calls scx_enable_task() for these tasks, any
> >    subsequent ops.select_cpu() call that tries to direct dispatch the
> >    task will trigger the WARN_ON_ONCE() in mark_direct_dispatch().
> 
> Can you add an abbreviated version of the above as functio comment on
> clear_direct_dispatch()?

Ack.

> 
> >  static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
> >  			    u64 enq_flags)
> >  {
> ...
> > @@ -1303,6 +1301,12 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
> >  	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
> >  		unsigned long opss;
> >  
> > +		/*
> > +		 * Update the direct dispatch state and keep it until
> > +		 * process_ddsp_deferred_locals() consumes it.
> > +		 */
> > +		p->scx.ddsp_enq_flags = ddsp_enq_flags;
> 
> I know I suggested it but this looks kinda odd. How about we keep the
> original p->scx.ddsp_enq_flags |= enq_flags above and then do
> 
> ...
> 
> Cache enq_flags here?

Ack.

> 
> > +	clear_direct_dispatch(p);
> > +	dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
> >  }
> >  
> >  static bool scx_rq_online(struct rq *rq)
> ...
> > @@ -3147,6 +3155,8 @@ static bool task_dead_and_done(struct task_struct *p)
> >  
> >  	lockdep_assert_rq_held(rq);
> >  
> > +	clear_direct_dispatch(p);
> 
> This is task_dead_and_done(), not scx_disable_task(). Is this intended?

No, I messed up. My initial patch was based on for-7.1, I backported to
for-7.0-fixes and this chunk was applied to the wrong function. Thanks for
catching it!

I'll send a new version.

-Andrea

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-04-02 21:04 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-02  8:57 [PATCH v2] sched_ext: Fix stale direct dispatch state in ddsp_dsq_id Andrea Righi
2026-04-02 20:10 ` Tejun Heo
2026-04-02 21:04   ` Andrea Righi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox