From: Waiman Long <longman@redhat.com>
To: "Chen Ridong" <chenridong@huaweicloud.com>,
"Tejun Heo" <tj@kernel.org>,
"Johannes Weiner" <hannes@cmpxchg.org>,
"Michal Koutný" <mkoutny@suse.com>,
"Ingo Molnar" <mingo@redhat.com>,
"Peter Zijlstra" <peterz@infradead.org>,
"Juri Lelli" <juri.lelli@redhat.com>,
"Vincent Guittot" <vincent.guittot@linaro.org>,
"Steven Rostedt" <rostedt@goodmis.org>,
"Ben Segall" <bsegall@google.com>, "Mel Gorman" <mgorman@suse.de>,
"Valentin Schneider" <vschneid@redhat.com>,
"Frederic Weisbecker" <frederic@kernel.org>,
"Thomas Gleixner" <tglx@linutronix.de>,
"Shuah Khan" <shuah@kernel.org>
Cc: cgroups@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-kselftest@vger.kernel.org, Waiman Long <longman@redhat.com>
Subject: [PATCH v6 6/8] cgroup/cpuset: Move housekeeping_update()/rebuild_sched_domains() together
Date: Sat, 21 Feb 2026 13:54:16 -0500 [thread overview]
Message-ID: <20260221185418.29319-7-longman@redhat.com> (raw)
In-Reply-To: <20260221185418.29319-1-longman@redhat.com>
With the latest changes in sched/isolation.c, rebuild_sched_domains*()
requires the HK_TYPE_DOMAIN housekeeping cpumask to be properly
updated first, if needed, before the sched domains can be
rebuilt. So the two naturally fit together. Do that by creating a new
update_hk_sched_domains() helper to house both actions.
The name of the isolated_cpus_updating flag to control the
call to housekeeping_update() is now outdated. So change it to
update_housekeeping to better reflect its purpose. Also move the call
to update_hk_sched_domains() to the end of cpuset and hotplug operations
before releasing the cpuset_mutex.
Signed-off-by: Waiman Long <longman@redhat.com>
---
kernel/cgroup/cpuset.c | 51 ++++++++++++++++++++----------------------
1 file changed, 24 insertions(+), 27 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 05adf6697030..3d0d18bf182f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -130,10 +130,9 @@ static cpumask_var_t subpartitions_cpus; /* RWCS */
static cpumask_var_t isolated_cpus; /* CSCB */
/*
- * Set if isolated_cpus is being updated in the current cpuset_mutex
- * critical section.
+ * Set if housekeeping cpumasks are to be updated.
*/
-static bool isolated_cpus_updating; /* RWCS */
+static bool update_housekeeping; /* RWCS */
/*
* A flag to force sched domain rebuild at the end of an operation.
@@ -1188,7 +1187,7 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
return;
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
}
- isolated_cpus_updating = true;
+ update_housekeeping = true;
}
/*
@@ -1306,22 +1305,22 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
}
/*
- * update_isolation_cpumasks - Update external isolation related CPU masks
+ * update_hk_sched_domains - Update HK cpumasks & rebuild sched domains
*
- * The following external CPU masks will be updated if necessary:
- * - workqueue unbound cpumask
+ * Update housekeeping cpumasks and rebuild sched domains if necessary.
+ * This should be called at the end of cpuset or hotplug actions.
*/
-static void update_isolation_cpumasks(void)
+static void update_hk_sched_domains(void)
{
- int ret;
-
- if (!isolated_cpus_updating)
- return;
-
- ret = housekeeping_update(isolated_cpus);
- WARN_ON_ONCE(ret < 0);
-
- isolated_cpus_updating = false;
+ if (update_housekeeping) {
+ /* Updating HK cpumasks implies rebuild sched domains */
+ WARN_ON_ONCE(housekeeping_update(isolated_cpus));
+ update_housekeeping = false;
+ force_sd_rebuild = true;
+ }
+ /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
+ if (force_sd_rebuild)
+ rebuild_sched_domains_locked();
}
/**
@@ -1472,7 +1471,6 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
cs->remote_partition = true;
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks();
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1517,7 +1515,6 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_excpus(cs, cs->effective_xcpus);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks();
cpuset_force_rebuild();
/*
@@ -1588,7 +1585,6 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks();
if (adding || deleting)
cpuset_force_rebuild();
@@ -1932,7 +1928,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
partition_xcpus_add(new_prs, parent, tmp->delmask);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks();
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -2900,7 +2895,6 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks();
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
@@ -3190,9 +3184,8 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
}
free_cpuset(trialcs);
- if (force_sd_rebuild)
- rebuild_sched_domains_locked();
out_unlock:
+ update_hk_sched_domains();
cpuset_full_unlock();
if (of_cft(of)->private == FILE_MEMLIST)
schedule_flush_migrate_mm();
@@ -3300,6 +3293,7 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
cpuset_full_lock();
if (is_cpuset_online(cs))
retval = update_prstate(cs, val);
+ update_hk_sched_domains();
cpuset_full_unlock();
return retval ?: nbytes;
}
@@ -3474,6 +3468,7 @@ static void cpuset_css_killed(struct cgroup_subsys_state *css)
/* Reset valid partition back to member */
if (is_partition_valid(cs))
update_prstate(cs, PRS_MEMBER);
+ update_hk_sched_domains();
cpuset_full_unlock();
}
@@ -3881,10 +3876,12 @@ static void cpuset_handle_hotplug(void)
rcu_read_unlock();
}
- /* rebuild sched domains if necessary */
- if (force_sd_rebuild)
- rebuild_sched_domains_cpuslocked();
+ if (update_housekeeping || force_sd_rebuild) {
+ mutex_lock(&cpuset_mutex);
+ update_hk_sched_domains();
+ mutex_unlock(&cpuset_mutex);
+ }
free_tmpmasks(ptmp);
}
--
2.53.0
next prev parent reply other threads:[~2026-02-21 18:55 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-21 18:54 [PATCH v6 0/8] cgroup/cpuset: Fix partition related locking issues Waiman Long
2026-02-21 18:54 ` [PATCH v6 1/8] cgroup/cpuset: Fix incorrect change to effective_xcpus in partition_xcpus_del() Waiman Long
2026-02-21 18:54 ` [PATCH v6 2/8] cgroup/cpuset: Fix incorrect use of cpuset_update_tasks_cpumask() in update_cpumasks_hier() Waiman Long
2026-02-21 18:54 ` [PATCH v6 3/8] cgroup/cpuset: Clarify exclusion rules for cpuset internal variables Waiman Long
2026-02-26 15:00 ` Frederic Weisbecker
2026-02-21 18:54 ` [PATCH v6 4/8] cgroup/cpuset: Set isolated_cpus_updating only if isolated_cpus is changed Waiman Long
2026-02-26 15:07 ` Frederic Weisbecker
2026-02-21 18:54 ` [PATCH v6 5/8] kselftest/cgroup: Simplify test_cpuset_prs.sh by removing "S+" command Waiman Long
2026-02-21 18:54 ` Waiman Long [this message]
2026-02-26 15:51 ` [PATCH v6 6/8] cgroup/cpuset: Move housekeeping_update()/rebuild_sched_domains() together Frederic Weisbecker
2026-02-21 18:54 ` [PATCH v6 7/8] cgroup/cpuset: Defer housekeeping_update() calls from CPU hotplug to workqueue Waiman Long
2026-02-26 16:06 ` Frederic Weisbecker
2026-03-03 16:00 ` Waiman Long
2026-03-03 22:48 ` Frederic Weisbecker
2026-03-04 4:05 ` Waiman Long
2026-03-02 11:49 ` Frederic Weisbecker
2026-03-03 15:18 ` Jon Hunter
2026-03-03 16:09 ` Waiman Long
2026-03-04 3:58 ` Waiman Long
2026-03-04 11:07 ` Jon Hunter
2026-03-04 18:11 ` Waiman Long
2026-02-21 18:54 ` [PATCH v6 8/8] cgroup/cpuset: Call housekeeping_update() without holding cpus_read_lock Waiman Long
2026-03-02 12:14 ` Frederic Weisbecker
2026-03-02 14:15 ` Waiman Long
2026-03-02 15:40 ` Waiman Long
2026-02-23 20:57 ` [PATCH v6 0/8] cgroup/cpuset: Fix partition related locking issues Tejun Heo
2026-02-23 21:11 ` Waiman Long
2026-02-24 7:51 ` Chen Ridong
2026-03-02 12:21 ` Frederic Weisbecker
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260221185418.29319-7-longman@redhat.com \
--to=longman@redhat.com \
--cc=bsegall@google.com \
--cc=cgroups@vger.kernel.org \
--cc=chenridong@huaweicloud.com \
--cc=frederic@kernel.org \
--cc=hannes@cmpxchg.org \
--cc=juri.lelli@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=mkoutny@suse.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=shuah@kernel.org \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=vschneid@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox