The Linux Kernel Mailing List
 help / color / mirror / Atom feed
* [PATCH sched_ext/for-7.2] sched_ext: Add scx_task_iter_relock() and use it in scx_root_enable_workfn()
@ 2026-05-10 22:23 Tejun Heo
  2026-05-10 22:45 ` [PATCH v2 " Tejun Heo
  0 siblings, 1 reply; 2+ messages in thread
From: Tejun Heo @ 2026-05-10 22:23 UTC (permalink / raw)
  To: void, arighi, changwoo; +Cc: emil, sched-ext, linux-kernel, Tejun Heo

scx_root_enable_workfn()'s post-init block re-acquires scx_tasks_lock
briefly via a scoped_guard() for the tid hash insertion. c941d7391f25
("sched_ext: Close root-enable vs sched_ext_dead() race with
SCX_TASK_INIT_BEGIN") on for-7.1-fixes adds a post-init DEAD recheck that
holds the task's rq lock across the state-machine updates in the same
region. A naive merge would acquire scx_tasks_lock while the rq lock is
held, inverting the iter's outer/inner order (scx_tasks_lock then rq lock).

Add scx_task_iter_relock(iter, p), the counterpart to
scx_task_iter_unlock(), that re-acquires scx_tasks_lock and, if @p is
non-NULL, @p's rq lock. The locks are tracked in @iter so subsequent
iteration releases them.

Use it in scx_root_enable_workfn()'s post-init block and drop the
now-redundant scoped_guard on the hash insertion. The post-init region now
runs with both scx_tasks_lock and the task's rq lock held across the
state-machine updates and the hash insert.

Signed-off-by: Tejun Heo <tj@kernel.org>
---
Applied to sched_ext/for-7.2 to resolve the merge conflict from
for-7.1-fixes.

 kernel/sched/ext.c | 37 +++++++++++++++++++++++++++++--------
 1 file changed, 29 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index b685f45b4fd0..d23707d88cfb 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -902,6 +902,24 @@ static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
 	}
 }
 
+/**
+ * scx_task_iter_relock - Re-acquire scx_tasks_lock and, optionally, @p's rq
+ * @iter: iterator to relock
+ * @p: task whose rq to lock, or %NULL for scx_tasks_lock only
+ *
+ * Counterpart to scx_task_iter_unlock(). Locking @p's rq is optional. Once
+ * re-acquired, both locks are managed by the iterator from here on.
+ */
+static void scx_task_iter_relock(struct scx_task_iter *iter,
+				 struct task_struct *p)
+{
+	__scx_task_iter_maybe_relock(iter);
+	if (p) {
+		iter->rq = task_rq_lock(p, &iter->rf);
+		iter->locked_task = p;
+	}
+}
+
 /**
  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
  * @iter: iterator to exit
@@ -7072,19 +7090,22 @@ static void scx_root_enable_workfn(struct kthread_work *work)
 			goto err_disable_unlock_all;
 		}
 
+		/*
+		 * Hold both scx_tasks_lock and @p's rq across the state machine
+		 * updates and the tid hash insert below.
+		 */
+		scx_task_iter_relock(&sti, p);
+
 		scx_set_task_sched(p, sch);
 		scx_set_task_state(p, SCX_TASK_READY);
 
 		/*
-		 * Insert into the tid hash under scx_tasks_lock so we can't
-		 * race sched_ext_dead() and leave a stale entry for an already
-		 * exited task.
+		 * Insert into the tid hash. scx_tasks_lock is held by the iter;
+		 * list_empty() guards against sched_ext_dead() having taken @p
+		 * off the list while init ran unlocked.
 		 */
-		if (scx_tid_to_task_enabled()) {
-			guard(raw_spinlock_irq)(&scx_tasks_lock);
-			if (!list_empty(&p->scx.tasks_node))
-				scx_tid_hash_insert(p);
-		}
+		if (scx_tid_to_task_enabled() && !list_empty(&p->scx.tasks_node))
+			scx_tid_hash_insert(p);
 
 		put_task_struct(p);
 	}
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [PATCH v2 sched_ext/for-7.2] sched_ext: Add scx_task_iter_relock() and use it in scx_root_enable_workfn()
  2026-05-10 22:23 [PATCH sched_ext/for-7.2] sched_ext: Add scx_task_iter_relock() and use it in scx_root_enable_workfn() Tejun Heo
@ 2026-05-10 22:45 ` Tejun Heo
  0 siblings, 0 replies; 2+ messages in thread
From: Tejun Heo @ 2026-05-10 22:45 UTC (permalink / raw)
  To: void, arighi, changwoo; +Cc: emil, sched-ext, linux-kernel, Tejun Heo

scx_root_enable_workfn()'s post-init block re-acquires scx_tasks_lock
briefly via a scoped_guard() for the tid hash insertion. c941d7391f25
("sched_ext: Close root-enable vs sched_ext_dead() race with
SCX_TASK_INIT_BEGIN") on for-7.1-fixes adds a post-init DEAD recheck that
holds the task's rq lock across the state-machine updates in the same
region. A naive merge would acquire scx_tasks_lock while the rq lock is
held, inverting the iter's outer/inner order (scx_tasks_lock then rq lock).

Add scx_task_iter_relock(iter, p), the counterpart to
scx_task_iter_unlock(), that re-acquires scx_tasks_lock and, if @p is
non-NULL, @p's rq lock. The locks are tracked in @iter so subsequent
iteration releases them.

Use it in scx_root_enable_workfn()'s post-init block and drop the
now-redundant scoped_guard on the hash insertion. The post-init region now
runs with both scx_tasks_lock and the task's rq lock held across the init
failure check, the state-machine updates and the hash insert.

v2: Move scx_task_iter_relock() earlier to ease the for-7.1-fixes merge.

Signed-off-by: Tejun Heo <tj@kernel.org>
---
Applied to sched_ext/for-7.2 to resolve the merge conflict from
for-7.1-fixes.

 kernel/sched/ext.c | 34 ++++++++++++++++++++++++++--------
 1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index b685f45b4fd0..fbdc1819d4cb 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -902,6 +902,24 @@ static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
 	}
 }
 
+/**
+ * scx_task_iter_relock - Re-acquire scx_tasks_lock and, optionally, @p's rq
+ * @iter: iterator to relock
+ * @p: task whose rq to lock, or %NULL for scx_tasks_lock only
+ *
+ * Counterpart to scx_task_iter_unlock(). Locking @p's rq is optional. Once
+ * re-acquired, both locks are managed by the iterator from here on.
+ */
+static void scx_task_iter_relock(struct scx_task_iter *iter,
+				 struct task_struct *p)
+{
+	__scx_task_iter_maybe_relock(iter);
+	if (p) {
+		iter->rq = task_rq_lock(p, &iter->rf);
+		iter->locked_task = p;
+	}
+}
+
 /**
  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
  * @iter: iterator to exit
@@ -7064,6 +7082,9 @@ static void scx_root_enable_workfn(struct kthread_work *work)
 		scx_task_iter_unlock(&sti);
 
 		ret = scx_init_task(sch, p, false);
+
+		scx_task_iter_relock(&sti, p);
+
 		if (ret) {
 			put_task_struct(p);
 			scx_task_iter_stop(&sti);
@@ -7076,15 +7097,12 @@ static void scx_root_enable_workfn(struct kthread_work *work)
 		scx_set_task_state(p, SCX_TASK_READY);
 
 		/*
-		 * Insert into the tid hash under scx_tasks_lock so we can't
-		 * race sched_ext_dead() and leave a stale entry for an already
-		 * exited task.
+		 * Insert into the tid hash. scx_tasks_lock is held by the iter;
+		 * list_empty() guards against sched_ext_dead() having taken @p
+		 * off the list while init ran unlocked.
 		 */
-		if (scx_tid_to_task_enabled()) {
-			guard(raw_spinlock_irq)(&scx_tasks_lock);
-			if (!list_empty(&p->scx.tasks_node))
-				scx_tid_hash_insert(p);
-		}
+		if (scx_tid_to_task_enabled() && !list_empty(&p->scx.tasks_node))
+			scx_tid_hash_insert(p);
 
 		put_task_struct(p);
 	}
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-05-10 22:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-10 22:23 [PATCH sched_ext/for-7.2] sched_ext: Add scx_task_iter_relock() and use it in scx_root_enable_workfn() Tejun Heo
2026-05-10 22:45 ` [PATCH v2 " Tejun Heo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox