* [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
@ 2025-09-02 23:39 Tejun Heo
2025-09-03 8:14 ` Peter Zijlstra
` (2 more replies)
0 siblings, 3 replies; 8+ messages in thread
From: Tejun Heo @ 2025-09-02 23:39 UTC (permalink / raw)
To: David Vernet, Andrea Righi, Changwoo Min
Cc: sched-ext, Peter Zijlstra, Ingo Molnar, linux-kernel
SCX hooks into CPU cgroup controller operations and read-locks
scx_cgroup_rwsem to exclude them while enabling and disable schedulers.
While this works, it's unnecessarily complicated given that
cgroup_[un]lock() are available and thus the cgroup operations can be locked
out that way.
Drop scx_cgroup_rwsem locking from the tg on/offline and cgroup [can_]attach
operations. Instead, grab cgroup_lock() from scx_cgroup_lock(). Drop
scx_cgroup_finish_attach() which is no longer necessary. Drop the now
unnecessary rcu locking and css ref bumping in scx_cgroup_init() and
scx_cgroup_exit().
As scx_cgroup_set_weight/bandwidth() paths aren't protected by
cgroup_lock(), rename scx_cgroup_rwsem to scx_cgroup_ops_rwsem and retain
the locking there.
This is overall simpler and will also allow enable/disable paths to
synchronize against cgroup changes independent of the CPU controller.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
This results in a tiny simplification on the core side. Peter, if you don't
object, I'll route this through sched_ext/for-6.18.
Thanks.
kernel/sched/core.c | 2 -
kernel/sched/ext.c | 66 +++++++++++-----------------------------------------
kernel/sched/ext.h | 2 -
3 files changed, 14 insertions(+), 56 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9362,8 +9362,6 @@ static void cpu_cgroup_attach(struct cgr
cgroup_taskset_for_each(task, css, tset)
sched_move_task(task, false);
-
- scx_cgroup_finish_attach();
}
static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4084,7 +4084,7 @@ bool scx_can_stop_tick(struct rq *rq)
#ifdef CONFIG_EXT_GROUP_SCHED
-DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
+DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem);
static bool scx_cgroup_enabled;
void scx_tg_init(struct task_group *tg)
@@ -4101,8 +4101,6 @@ int scx_tg_online(struct task_group *tg)
WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
- percpu_down_read(&scx_cgroup_rwsem);
-
if (scx_cgroup_enabled) {
if (SCX_HAS_OP(sch, cgroup_init)) {
struct scx_cgroup_init_args args =
@@ -4122,7 +4120,6 @@ int scx_tg_online(struct task_group *tg)
tg->scx.flags |= SCX_TG_ONLINE;
}
- percpu_up_read(&scx_cgroup_rwsem);
return ret;
}
@@ -4132,15 +4129,11 @@ void scx_tg_offline(struct task_group *t
WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
- percpu_down_read(&scx_cgroup_rwsem);
-
if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
(tg->scx.flags & SCX_TG_INITED))
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
tg->css.cgroup);
tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
-
- percpu_up_read(&scx_cgroup_rwsem);
}
int scx_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4150,9 +4143,6 @@ int scx_cgroup_can_attach(struct cgroup_
struct task_struct *p;
int ret;
- /* released in scx_finish/cancel_attach() */
- percpu_down_read(&scx_cgroup_rwsem);
-
if (!scx_cgroup_enabled)
return 0;
@@ -4192,7 +4182,6 @@ err:
p->scx.cgrp_moving_from = NULL;
}
- percpu_up_read(&scx_cgroup_rwsem);
return ops_sanitize_err(sch, "cgroup_prep_move", ret);
}
@@ -4215,11 +4204,6 @@ void scx_cgroup_move_task(struct task_st
p->scx.cgrp_moving_from = NULL;
}
-void scx_cgroup_finish_attach(void)
-{
- percpu_up_read(&scx_cgroup_rwsem);
-}
-
void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
struct scx_sched *sch = scx_root;
@@ -4227,7 +4211,7 @@ void scx_cgroup_cancel_attach(struct cgr
struct task_struct *p;
if (!scx_cgroup_enabled)
- goto out_unlock;
+ return;
cgroup_taskset_for_each(p, css, tset) {
if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
@@ -4236,15 +4220,13 @@ void scx_cgroup_cancel_attach(struct cgr
p, p->scx.cgrp_moving_from, css->cgroup);
p->scx.cgrp_moving_from = NULL;
}
-out_unlock:
- percpu_up_read(&scx_cgroup_rwsem);
}
void scx_group_set_weight(struct task_group *tg, unsigned long weight)
{
struct scx_sched *sch = scx_root;
- percpu_down_read(&scx_cgroup_rwsem);
+ percpu_down_read(&scx_cgroup_ops_rwsem);
if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
tg->scx.weight != weight)
@@ -4253,7 +4235,7 @@ void scx_group_set_weight(struct task_gr
tg->scx.weight = weight;
- percpu_up_read(&scx_cgroup_rwsem);
+ percpu_up_read(&scx_cgroup_ops_rwsem);
}
void scx_group_set_idle(struct task_group *tg, bool idle)
@@ -4266,7 +4248,7 @@ void scx_group_set_bandwidth(struct task
{
struct scx_sched *sch = scx_root;
- percpu_down_read(&scx_cgroup_rwsem);
+ percpu_down_read(&scx_cgroup_ops_rwsem);
if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
(tg->scx.bw_period_us != period_us ||
@@ -4279,23 +4261,25 @@ void scx_group_set_bandwidth(struct task
tg->scx.bw_quota_us = quota_us;
tg->scx.bw_burst_us = burst_us;
- percpu_up_read(&scx_cgroup_rwsem);
+ percpu_up_read(&scx_cgroup_ops_rwsem);
}
static void scx_cgroup_lock(void)
{
- percpu_down_write(&scx_cgroup_rwsem);
+ percpu_down_write(&scx_cgroup_ops_rwsem);
+ cgroup_lock();
}
static void scx_cgroup_unlock(void)
{
- percpu_up_write(&scx_cgroup_rwsem);
+ cgroup_unlock();
+ percpu_up_write(&scx_cgroup_ops_rwsem);
}
#else /* CONFIG_EXT_GROUP_SCHED */
-static inline void scx_cgroup_lock(void) {}
-static inline void scx_cgroup_unlock(void) {}
+static void scx_cgroup_lock(void) {}
+static void scx_cgroup_unlock(void) {}
#endif /* CONFIG_EXT_GROUP_SCHED */
@@ -4411,15 +4395,12 @@ static void scx_cgroup_exit(struct scx_s
{
struct cgroup_subsys_state *css;
- percpu_rwsem_assert_held(&scx_cgroup_rwsem);
-
scx_cgroup_enabled = false;
/*
- * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
+ * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
* cgroups and exit all the inited ones, all online cgroups are exited.
*/
- rcu_read_lock();
css_for_each_descendant_post(css, &root_task_group.css) {
struct task_group *tg = css_tg(css);
@@ -4430,17 +4411,9 @@ static void scx_cgroup_exit(struct scx_s
if (!sch->ops.cgroup_exit)
continue;
- if (WARN_ON_ONCE(!css_tryget(css)))
- continue;
- rcu_read_unlock();
-
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
css->cgroup);
-
- rcu_read_lock();
- css_put(css);
}
- rcu_read_unlock();
}
static int scx_cgroup_init(struct scx_sched *sch)
@@ -4448,13 +4421,10 @@ static int scx_cgroup_init(struct scx_sc
struct cgroup_subsys_state *css;
int ret;
- percpu_rwsem_assert_held(&scx_cgroup_rwsem);
-
/*
- * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
+ * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
* cgroups and init, all online cgroups are initialized.
*/
- rcu_read_lock();
css_for_each_descendant_pre(css, &root_task_group.css) {
struct task_group *tg = css_tg(css);
struct scx_cgroup_init_args args = {
@@ -4473,10 +4443,6 @@ static int scx_cgroup_init(struct scx_sc
continue;
}
- if (WARN_ON_ONCE(!css_tryget(css)))
- continue;
- rcu_read_unlock();
-
ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
css->cgroup, &args);
if (ret) {
@@ -4485,11 +4451,7 @@ static int scx_cgroup_init(struct scx_sc
return ret;
}
tg->scx.flags |= SCX_TG_INITED;
-
- rcu_read_lock();
- css_put(css);
}
- rcu_read_unlock();
WARN_ON_ONCE(scx_cgroup_enabled);
scx_cgroup_enabled = true;
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -100,7 +100,6 @@ int scx_tg_online(struct task_group *tg)
void scx_tg_offline(struct task_group *tg);
int scx_cgroup_can_attach(struct cgroup_taskset *tset);
void scx_cgroup_move_task(struct task_struct *p);
-void scx_cgroup_finish_attach(void);
void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
void scx_group_set_idle(struct task_group *tg, bool idle);
@@ -111,7 +110,6 @@ static inline int scx_tg_online(struct t
static inline void scx_tg_offline(struct task_group *tg) {}
static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
static inline void scx_cgroup_move_task(struct task_struct *p) {}
-static inline void scx_cgroup_finish_attach(void) {}
static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-02 23:39 [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations Tejun Heo
@ 2025-09-03 8:14 ` Peter Zijlstra
2025-09-03 12:44 ` Andrea Righi
2025-09-03 21:49 ` Tejun Heo
2 siblings, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2025-09-03 8:14 UTC (permalink / raw)
To: Tejun Heo
Cc: David Vernet, Andrea Righi, Changwoo Min, sched-ext, Ingo Molnar,
linux-kernel
On Tue, Sep 02, 2025 at 01:39:34PM -1000, Tejun Heo wrote:
> SCX hooks into CPU cgroup controller operations and read-locks
> scx_cgroup_rwsem to exclude them while enabling and disable schedulers.
> While this works, it's unnecessarily complicated given that
> cgroup_[un]lock() are available and thus the cgroup operations can be locked
> out that way.
>
> Drop scx_cgroup_rwsem locking from the tg on/offline and cgroup [can_]attach
> operations. Instead, grab cgroup_lock() from scx_cgroup_lock(). Drop
> scx_cgroup_finish_attach() which is no longer necessary. Drop the now
> unnecessary rcu locking and css ref bumping in scx_cgroup_init() and
> scx_cgroup_exit().
>
> As scx_cgroup_set_weight/bandwidth() paths aren't protected by
> cgroup_lock(), rename scx_cgroup_rwsem to scx_cgroup_ops_rwsem and retain
> the locking there.
>
> This is overall simpler and will also allow enable/disable paths to
> synchronize against cgroup changes independent of the CPU controller.
>
> Signed-off-by: Tejun Heo <tj@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> ---
> This results in a tiny simplification on the core side. Peter, if you don't
> object, I'll route this through sched_ext/for-6.18.
No problem, that's fine.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-02 23:39 [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations Tejun Heo
2025-09-03 8:14 ` Peter Zijlstra
@ 2025-09-03 12:44 ` Andrea Righi
2025-09-03 16:39 ` Tejun Heo
2025-09-03 21:49 ` Tejun Heo
2 siblings, 1 reply; 8+ messages in thread
From: Andrea Righi @ 2025-09-03 12:44 UTC (permalink / raw)
To: Tejun Heo
Cc: David Vernet, Changwoo Min, sched-ext, Peter Zijlstra,
Ingo Molnar, linux-kernel
Hi Tejun,
On Tue, Sep 02, 2025 at 01:39:34PM -1000, Tejun Heo wrote:
> SCX hooks into CPU cgroup controller operations and read-locks
> scx_cgroup_rwsem to exclude them while enabling and disable schedulers.
> While this works, it's unnecessarily complicated given that
> cgroup_[un]lock() are available and thus the cgroup operations can be locked
> out that way.
>
> Drop scx_cgroup_rwsem locking from the tg on/offline and cgroup [can_]attach
> operations. Instead, grab cgroup_lock() from scx_cgroup_lock(). Drop
> scx_cgroup_finish_attach() which is no longer necessary. Drop the now
> unnecessary rcu locking and css ref bumping in scx_cgroup_init() and
> scx_cgroup_exit().
>
> As scx_cgroup_set_weight/bandwidth() paths aren't protected by
> cgroup_lock(), rename scx_cgroup_rwsem to scx_cgroup_ops_rwsem and retain
> the locking there.
>
> This is overall simpler and will also allow enable/disable paths to
> synchronize against cgroup changes independent of the CPU controller.
>
> Signed-off-by: Tejun Heo <tj@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> ---
...
>
> static void scx_cgroup_lock(void)
> {
> - percpu_down_write(&scx_cgroup_rwsem);
> + percpu_down_write(&scx_cgroup_ops_rwsem);
> + cgroup_lock();
> }
Shouldn't we acquire cgroup_lock() before scx_cgroup_ops_rwsem to avoid
a potential AB-BA deadlock?
>
> static void scx_cgroup_unlock(void)
> {
> - percpu_up_write(&scx_cgroup_rwsem);
> + cgroup_unlock();
> + percpu_up_write(&scx_cgroup_ops_rwsem);
> }
Thanks,
-Andrea
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-03 12:44 ` Andrea Righi
@ 2025-09-03 16:39 ` Tejun Heo
2025-09-03 20:48 ` Andrea Righi
0 siblings, 1 reply; 8+ messages in thread
From: Tejun Heo @ 2025-09-03 16:39 UTC (permalink / raw)
To: Andrea Righi
Cc: David Vernet, Changwoo Min, sched-ext, Peter Zijlstra,
Ingo Molnar, linux-kernel
Hello,
On Wed, Sep 03, 2025 at 02:44:58PM +0200, Andrea Righi wrote:
> > static void scx_cgroup_lock(void)
> > {
> > - percpu_down_write(&scx_cgroup_rwsem);
> > + percpu_down_write(&scx_cgroup_ops_rwsem);
> > + cgroup_lock();
> > }
>
> Shouldn't we acquire cgroup_lock() before scx_cgroup_ops_rwsem to avoid
> a potential AB-BA deadlock?
There's no existing ordering between the two locks, so any order should be
safe. The reason why I put it in this particular order is because any
cgroup_lock() holder has no reason to grab ops_rwsem now or in the future
while the opposite direction is still unlikely but theoretically more
possible.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-03 16:39 ` Tejun Heo
@ 2025-09-03 20:48 ` Andrea Righi
2025-09-03 20:57 ` Tejun Heo
0 siblings, 1 reply; 8+ messages in thread
From: Andrea Righi @ 2025-09-03 20:48 UTC (permalink / raw)
To: Tejun Heo
Cc: David Vernet, Changwoo Min, sched-ext, Peter Zijlstra,
Ingo Molnar, linux-kernel
On Wed, Sep 03, 2025 at 06:39:46AM -1000, Tejun Heo wrote:
> Hello,
>
> On Wed, Sep 03, 2025 at 02:44:58PM +0200, Andrea Righi wrote:
> > > static void scx_cgroup_lock(void)
> > > {
> > > - percpu_down_write(&scx_cgroup_rwsem);
> > > + percpu_down_write(&scx_cgroup_ops_rwsem);
> > > + cgroup_lock();
> > > }
> >
> > Shouldn't we acquire cgroup_lock() before scx_cgroup_ops_rwsem to avoid
> > a potential AB-BA deadlock?
>
> There's no existing ordering between the two locks, so any order should be
> safe. The reason why I put it in this particular order is because any
> cgroup_lock() holder has no reason to grab ops_rwsem now or in the future
> while the opposite direction is still unlikely but theoretically more
> possible.
Isn't scx_group_set_weight() called with cgroup_mutex held? In this case
the order is cgroup_lock() -> scx_cgroup_ops_rwsem, or am I missing
something?
Thanks,
-Andrea
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-03 20:48 ` Andrea Righi
@ 2025-09-03 20:57 ` Tejun Heo
2025-09-03 21:35 ` Andrea Righi
0 siblings, 1 reply; 8+ messages in thread
From: Tejun Heo @ 2025-09-03 20:57 UTC (permalink / raw)
To: Andrea Righi
Cc: David Vernet, Changwoo Min, sched-ext, Peter Zijlstra,
Ingo Molnar, linux-kernel
On Wed, Sep 03, 2025 at 10:48:09PM +0200, Andrea Righi wrote:
> On Wed, Sep 03, 2025 at 06:39:46AM -1000, Tejun Heo wrote:
> > Hello,
> >
> > On Wed, Sep 03, 2025 at 02:44:58PM +0200, Andrea Righi wrote:
> > > > static void scx_cgroup_lock(void)
> > > > {
> > > > - percpu_down_write(&scx_cgroup_rwsem);
> > > > + percpu_down_write(&scx_cgroup_ops_rwsem);
> > > > + cgroup_lock();
> > > > }
> > >
> > > Shouldn't we acquire cgroup_lock() before scx_cgroup_ops_rwsem to avoid
> > > a potential AB-BA deadlock?
> >
> > There's no existing ordering between the two locks, so any order should be
> > safe. The reason why I put it in this particular order is because any
> > cgroup_lock() holder has no reason to grab ops_rwsem now or in the future
> > while the opposite direction is still unlikely but theoretically more
> > possible.
>
> Isn't scx_group_set_weight() called with cgroup_mutex held? In this case
> the order is cgroup_lock() -> scx_cgroup_ops_rwsem, or am I missing
> something?
Oh, no, cgroup_lock() is only held for operations that change the cgroup
hierarchy - cgroup creation, deletion, controller enable/disable, task
migration and so on. Writes to control knobs doesn't acquire cgroup_lock().
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-03 20:57 ` Tejun Heo
@ 2025-09-03 21:35 ` Andrea Righi
0 siblings, 0 replies; 8+ messages in thread
From: Andrea Righi @ 2025-09-03 21:35 UTC (permalink / raw)
To: Tejun Heo
Cc: David Vernet, Changwoo Min, sched-ext, Peter Zijlstra,
Ingo Molnar, linux-kernel
On Wed, Sep 03, 2025 at 10:57:39AM -1000, Tejun Heo wrote:
> On Wed, Sep 03, 2025 at 10:48:09PM +0200, Andrea Righi wrote:
> > On Wed, Sep 03, 2025 at 06:39:46AM -1000, Tejun Heo wrote:
> > > Hello,
> > >
> > > On Wed, Sep 03, 2025 at 02:44:58PM +0200, Andrea Righi wrote:
> > > > > static void scx_cgroup_lock(void)
> > > > > {
> > > > > - percpu_down_write(&scx_cgroup_rwsem);
> > > > > + percpu_down_write(&scx_cgroup_ops_rwsem);
> > > > > + cgroup_lock();
> > > > > }
> > > >
> > > > Shouldn't we acquire cgroup_lock() before scx_cgroup_ops_rwsem to avoid
> > > > a potential AB-BA deadlock?
> > >
> > > There's no existing ordering between the two locks, so any order should be
> > > safe. The reason why I put it in this particular order is because any
> > > cgroup_lock() holder has no reason to grab ops_rwsem now or in the future
> > > while the opposite direction is still unlikely but theoretically more
> > > possible.
> >
> > Isn't scx_group_set_weight() called with cgroup_mutex held? In this case
> > the order is cgroup_lock() -> scx_cgroup_ops_rwsem, or am I missing
> > something?
>
> Oh, no, cgroup_lock() is only held for operations that change the cgroup
> hierarchy - cgroup creation, deletion, controller enable/disable, task
> migration and so on. Writes to control knobs doesn't acquire cgroup_lock().
Ah! That's the part I was missing, thanks for clarifying it. In that case:
Acked-by: Andrea Righi <arighi@nvidia.com>
-Andrea
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations
2025-09-02 23:39 [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations Tejun Heo
2025-09-03 8:14 ` Peter Zijlstra
2025-09-03 12:44 ` Andrea Righi
@ 2025-09-03 21:49 ` Tejun Heo
2 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2025-09-03 21:49 UTC (permalink / raw)
To: David Vernet, Andrea Righi, Changwoo Min
Cc: sched-ext, Peter Zijlstra, Ingo Molnar, linux-kernel
On Tue, Sep 02, 2025 at 01:39:34PM -1000, Tejun Heo wrote:
> SCX hooks into CPU cgroup controller operations and read-locks
> scx_cgroup_rwsem to exclude them while enabling and disable schedulers.
> While this works, it's unnecessarily complicated given that
> cgroup_[un]lock() are available and thus the cgroup operations can be locked
> out that way.
>
> Drop scx_cgroup_rwsem locking from the tg on/offline and cgroup [can_]attach
> operations. Instead, grab cgroup_lock() from scx_cgroup_lock(). Drop
> scx_cgroup_finish_attach() which is no longer necessary. Drop the now
> unnecessary rcu locking and css ref bumping in scx_cgroup_init() and
> scx_cgroup_exit().
>
> As scx_cgroup_set_weight/bandwidth() paths aren't protected by
> cgroup_lock(), rename scx_cgroup_rwsem to scx_cgroup_ops_rwsem and retain
> the locking there.
>
> This is overall simpler and will also allow enable/disable paths to
> synchronize against cgroup changes independent of the CPU controller.
>
> Signed-off-by: Tejun Heo <tj@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
Applied to sched_ext/for-6.18.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2025-09-03 21:49 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-09-02 23:39 [PATCH sched_ext/for-6.18] sched_ext: Use cgroup_lock/unlock() to synchronize against cgroup operations Tejun Heo
2025-09-03 8:14 ` Peter Zijlstra
2025-09-03 12:44 ` Andrea Righi
2025-09-03 16:39 ` Tejun Heo
2025-09-03 20:48 ` Andrea Righi
2025-09-03 20:57 ` Tejun Heo
2025-09-03 21:35 ` Andrea Righi
2025-09-03 21:49 ` Tejun Heo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).