* [PATCH v12 5/9] cgroup/cpuset: Rename update_unbound_workqueue_cpumask() to update_exclusion_cpumasks()
[not found] <20250915145920.140180-11-gmonaco@redhat.com>
@ 2025-09-15 14:59 ` Gabriele Monaco
2025-09-15 14:59 ` [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping Gabriele Monaco
2025-09-15 14:59 ` [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration Gabriele Monaco
2 siblings, 0 replies; 8+ messages in thread
From: Gabriele Monaco @ 2025-09-15 14:59 UTC (permalink / raw)
To: linux-kernel, Waiman Long, Tejun Heo, Johannes Weiner,
Michal Koutný, cgroups
Cc: Gabriele Monaco, Frederic Weisbecker
update_unbound_workqueue_cpumask() updates unbound workqueues settings
when there's a change in isolated CPUs, but it can be used for other
subsystems requiring updated when isolated CPUs change.
Generalise the name to update_exclusion_cpumasks() to prepare for other
functions unrelated to workqueues to be called in that spot.
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/cgroup/cpuset.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 27adb04df675..81a9239053a7 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1339,7 +1339,7 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
-static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+static void update_exclusion_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1470,7 +1470,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
list_add(&cs->remote_sibling, &remote_children);
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1511,7 +1511,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_effective_exclusive_cpumask(cs, NULL, NULL);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
/*
@@ -1580,7 +1580,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
if (adding || deleting)
cpuset_force_rebuild();
@@ -1943,7 +1943,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
WARN_ON_ONCE(parent->nr_subparts < 0);
}
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -2968,7 +2968,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
--
2.51.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping
[not found] <20250915145920.140180-11-gmonaco@redhat.com>
2025-09-15 14:59 ` [PATCH v12 5/9] cgroup/cpuset: Rename update_unbound_workqueue_cpumask() to update_exclusion_cpumasks() Gabriele Monaco
@ 2025-09-15 14:59 ` Gabriele Monaco
2025-09-15 20:43 ` Waiman Long
2025-09-16 8:31 ` Chen Ridong
2025-09-15 14:59 ` [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration Gabriele Monaco
2 siblings, 2 replies; 8+ messages in thread
From: Gabriele Monaco @ 2025-09-15 14:59 UTC (permalink / raw)
To: linux-kernel, Waiman Long, Tejun Heo, Johannes Weiner,
Michal Koutný, cgroups
Cc: Gabriele Monaco, Frederic Weisbecker
Currently the user can set up isolated cpus via cpuset and nohz_full in
such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
domain isolated nor nohz full). This can be a problem for other
subsystems (e.g. the timer wheel imgration).
Prevent this configuration by blocking any assignation that would cause
the union of domain isolated cpus and nohz_full to covers all CPUs.
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/cgroup/cpuset.c | 63 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 81a9239053a7..3cedc3580373 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
}
+/*
+ * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
+ * @prs: new or old partition_root_state
+ * @parent: parent cpuset
+ * Return: true if isolated_cpus needs modification, false otherwise
+ */
+static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
+{
+ if (!parent)
+ parent = &top_cpuset;
+ return prs != parent->partition_root_state;
+}
+
/*
* partition_xcpus_add - Add new exclusive CPUs to partition
* @new_prs: new partition_root_state
@@ -1339,6 +1352,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
+/*
+ * isolated_cpus_can_update - check for isolated & nohz_full conflicts
+ * @add_cpus: cpu mask for cpus that are going to be isolated
+ * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
+ * Return: false if there is conflict, true otherwise
+ *
+ * If nohz_full is enabled and we have isolated CPUs, their combination must
+ * still leave housekeeping CPUs.
+ */
+static bool isolated_cpus_can_update(struct cpumask *add_cpus,
+ struct cpumask *del_cpus)
+{
+ cpumask_var_t full_hk_cpus;
+ int res = true;
+
+ if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return true;
+
+ if (del_cpus && cpumask_weight_and(del_cpus,
+ housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
+ return true;
+
+ if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
+ return false;
+
+ cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
+ cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
+ if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
+ res = false;
+
+ free_cpumask_var(full_hk_cpus);
+ return res;
+}
+
static void update_exclusion_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1464,6 +1513,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
return PERR_INVCPUS;
+ if (isolated_cpus_should_update(new_prs, NULL) &&
+ !isolated_cpus_can_update(tmp->new_cpus, NULL))
+ return PERR_HKEEPING;
spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
@@ -1563,6 +1615,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
cs->prs_err = PERR_NOCPUS;
+ else if (isolated_cpus_should_update(prs, NULL) &&
+ !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
+ cs->prs_err = PERR_HKEEPING;
if (cs->prs_err)
goto invalidate;
}
@@ -1914,6 +1969,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
return err;
}
+ if (deleting && isolated_cpus_should_update(new_prs, parent) &&
+ !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) {
+ cs->prs_err = PERR_HKEEPING;
+ return PERR_HKEEPING;
+ }
+
/*
* Change the parent's effective_cpus & effective_xcpus (top cpuset
* only).
@@ -2934,6 +2995,8 @@ static int update_prstate(struct cpuset *cs, int new_prs)
* Need to update isolated_cpus.
*/
isolcpus_updated = true;
+ if (!isolated_cpus_can_update(cs->effective_xcpus, NULL))
+ err = PERR_HKEEPING;
} else {
/*
* Switching back to member is always allowed even if it
--
2.51.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration
[not found] <20250915145920.140180-11-gmonaco@redhat.com>
2025-09-15 14:59 ` [PATCH v12 5/9] cgroup/cpuset: Rename update_unbound_workqueue_cpumask() to update_exclusion_cpumasks() Gabriele Monaco
2025-09-15 14:59 ` [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping Gabriele Monaco
@ 2025-09-15 14:59 ` Gabriele Monaco
2025-09-15 20:51 ` John B. Wyatt IV
2025-09-16 13:41 ` Frederic Weisbecker
2 siblings, 2 replies; 8+ messages in thread
From: Gabriele Monaco @ 2025-09-15 14:59 UTC (permalink / raw)
To: linux-kernel, Anna-Maria Behnsen, Thomas Gleixner, Waiman Long,
Tejun Heo, Johannes Weiner, Michal Koutný, cgroups
Cc: Gabriele Monaco, John B. Wyatt IV, John B. Wyatt IV
The timer migration mechanism allows active CPUs to pull timers from
idle ones to improve the overall idle time. This is however undesired
when CPU intensive workloads run on isolated cores, as the algorithm
would move the timers from housekeeping to isolated cores, negatively
affecting the isolation.
Exclude isolated cores from the timer migration algorithm, extend the
concept of unavailable cores, currently used for offline ones, to
isolated ones:
* A core is unavailable if isolated or offline;
* A core is available if non isolated and online;
A core is considered unavailable as isolated if it belongs to:
* the isolcpus (domain) list
* an isolated cpuset
Except if it is:
* in the nohz_full list (already idle for the hierarchy)
* the nohz timekeeper core (must be available to handle global timers)
CPUs are added to the hierarchy during late boot, excluding isolated
ones, the hierarchy is also adapted when the cpuset isolation changes.
Due to how the timer migration algorithm works, any CPU part of the
hierarchy can have their global timers pulled by remote CPUs and have to
pull remote timers, only skipping pulling remote timers would break the
logic.
For this reason, prevent isolated CPUs from pulling remote global
timers, but also the other way around: any global timer started on an
isolated CPU will run there. This does not break the concept of
isolation (global timers don't come from outside the CPU) and, if
considered inappropriate, can usually be mitigated with other isolation
techniques (e.g. IRQ pinning).
This effect was noticed on a 128 cores machine running oslat on the
isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
and the CPU with lowest count in a timer migration hierarchy (here 1
and 65) appears as always active and continuously pulls global timers,
from the housekeeping CPUs. This ends up moving driver work (e.g.
delayed work) to isolated CPUs and causes latency spikes:
before the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 1203 10 3 4 ... 5 (us)
after the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 10 4 3 4 3 ... 5 (us)
Tested-by: John B. Wyatt IV <jwyatt@redhat.com>
Tested-by: John B. Wyatt IV <sageofredondo@gmail.com>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
include/linux/timer.h | 9 +++
kernel/cgroup/cpuset.c | 3 +
kernel/time/timer_migration.c | 108 +++++++++++++++++++++++++++++++++-
3 files changed, 119 insertions(+), 1 deletion(-)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 0414d9e6b4fc..62e1cea71125 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -188,4 +188,13 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask);
+#else
+static inline int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 3cedc3580373..6e9d86fab27e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1399,6 +1399,9 @@ static void update_exclusion_cpumasks(bool isolcpus_updated)
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
+ WARN_ON_ONCE(ret < 0);
}
/**
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 0a3a26e766d0..08e29fc01479 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -436,6 +437,29 @@ static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
return !(tmc->tmgroup && tmc->available);
}
+/*
+ * Returns true if @cpu should be excluded from the hierarchy as isolated.
+ * Domain isolated CPUs don't participate in timer migration, nohz_full CPUs
+ * are still part of the hierarchy but become idle (from a tick and timer
+ * migration perspective) when they stop their tick. This lets the timekeeping
+ * CPU handle their global timers. Marking also isolated CPUs as idle would be
+ * too costly, hence they are completely excluded from the hierarchy.
+ * This check is necessary, for instance, to prevent offline isolated CPUs from
+ * being incorrectly marked as available once getting back online.
+ *
+ * Additionally, the tick CPU can be isolated at boot, however
+ * we cannot mark it as unavailable to avoid having no global migrator
+ * for the nohz_full CPUs. This check is only necessary at boot time.
+ */
+static inline bool tmigr_is_isolated(int cpu)
+{
+ if (!tick_nohz_cpu_hotpluggable(cpu))
+ return false;
+ return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) ||
+ cpuset_cpu_is_isolated(cpu)) &&
+ housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE);
+}
+
/*
* Returns true, when @childmask corresponds to the group migrator or when the
* group is not active - so no migrator is set.
@@ -1451,6 +1475,8 @@ static int tmigr_clear_cpu_available(unsigned int cpu)
cpumask_clear_cpu(cpu, tmigr_available_cpumask);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (!tmc->available)
+ return 0;
tmc->available = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
@@ -1478,8 +1504,12 @@ static int tmigr_set_cpu_available(unsigned int cpu)
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
+ if (tmigr_is_isolated(cpu))
+ return 0;
cpumask_set_cpu(cpu, tmigr_available_cpumask);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (tmc->available)
+ return 0;
trace_tmigr_cpu_available(tmc);
tmc->idle = timer_base_is_idle();
if (!tmc->idle)
@@ -1489,6 +1519,81 @@ static int tmigr_set_cpu_available(unsigned int cpu)
return 0;
}
+static void tmigr_cpu_isolate(struct work_struct *ignored)
+{
+ tmigr_clear_cpu_available(smp_processor_id());
+}
+
+static void tmigr_cpu_unisolate(struct work_struct *ignored)
+{
+ tmigr_set_cpu_available(smp_processor_id());
+}
+
+/**
+ * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
+ * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
+ *
+ * This function can be called from cpuset code to provide the new set of
+ * isolated CPUs that should be excluded from the hierarchy.
+ * Online CPUs not present in exclude_cpumask but already excluded are brought
+ * back to the hierarchy.
+ * Functions to isolate/unisolate need to be called locally and can sleep.
+ */
+int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ struct work_struct __percpu *works __free(free_percpu) =
+ alloc_percpu(struct work_struct);
+ cpumask_var_t cpumask_unisol __free(free_cpumask_var) = CPUMASK_NULL;
+ cpumask_var_t cpumask_isol __free(free_cpumask_var) = CPUMASK_NULL;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ if (!alloc_cpumask_var(&cpumask_isol, GFP_KERNEL))
+ return -ENOMEM;
+ if (!alloc_cpumask_var(&cpumask_unisol, GFP_KERNEL))
+ return -ENOMEM;
+ if (!works)
+ return -ENOMEM;
+
+ cpumask_andnot(cpumask_unisol, cpu_online_mask, exclude_cpumask);
+ cpumask_andnot(cpumask_unisol, cpumask_unisol, tmigr_available_cpumask);
+ /* Set up the mask earlier to avoid races with the migrator CPU */
+ cpumask_or(tmigr_available_cpumask, tmigr_available_cpumask, cpumask_unisol);
+ for_each_cpu(cpu, cpumask_unisol) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, tmigr_cpu_unisolate);
+ schedule_work_on(cpu, work);
+ }
+
+ cpumask_and(cpumask_isol, exclude_cpumask, tmigr_available_cpumask);
+ cpumask_and(cpumask_isol, cpumask_isol, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
+ /*
+ * Handle this here and not in the cpuset code because exclude_cpumask
+ * might include also the tick CPU if included in isolcpus.
+ */
+ for_each_cpu(cpu, cpumask_isol) {
+ if (!tick_nohz_cpu_hotpluggable(cpu)) {
+ cpumask_clear_cpu(cpu, cpumask_isol);
+ break;
+ }
+ }
+ /* Set up the mask earlier to avoid races with the migrator CPU */
+ cpumask_andnot(tmigr_available_cpumask, tmigr_available_cpumask, cpumask_isol);
+ for_each_cpu(cpu, cpumask_isol) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, tmigr_cpu_isolate);
+ schedule_work_on(cpu, work);
+ }
+
+ for_each_cpu_or(cpu, cpumask_isol, cpumask_unisol)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ return 0;
+}
+
/*
* NOHZ can only be enabled after clocksource_done_booting(). Don't
* bother trashing the cache in the tree before.
@@ -1496,7 +1601,8 @@ static int tmigr_set_cpu_available(unsigned int cpu)
static int __init tmigr_late_init(void)
{
return cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
- tmigr_set_cpu_available, tmigr_clear_cpu_available);
+ tmigr_set_cpu_available,
+ tmigr_clear_cpu_available);
}
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
--
2.51.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping
2025-09-15 14:59 ` [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping Gabriele Monaco
@ 2025-09-15 20:43 ` Waiman Long
2025-09-16 8:31 ` Chen Ridong
1 sibling, 0 replies; 8+ messages in thread
From: Waiman Long @ 2025-09-15 20:43 UTC (permalink / raw)
To: Gabriele Monaco, linux-kernel, Tejun Heo, Johannes Weiner,
Michal Koutný, cgroups
Cc: Frederic Weisbecker
On 9/15/25 10:59 AM, Gabriele Monaco wrote:
> Currently the user can set up isolated cpus via cpuset and nohz_full in
> such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
> domain isolated nor nohz full). This can be a problem for other
> subsystems (e.g. the timer wheel imgration).
>
> Prevent this configuration by blocking any assignation that would cause
> the union of domain isolated cpus and nohz_full to covers all CPUs.
>
> Acked-by: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> ---
> kernel/cgroup/cpuset.c | 63 ++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 63 insertions(+)
>
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 81a9239053a7..3cedc3580373 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
> cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
> }
>
> +/*
> + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
> + * @prs: new or old partition_root_state
> + * @parent: parent cpuset
> + * Return: true if isolated_cpus needs modification, false otherwise
> + */
> +static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
> +{
> + if (!parent)
> + parent = &top_cpuset;
> + return prs != parent->partition_root_state;
> +}
> +
> /*
> * partition_xcpus_add - Add new exclusive CPUs to partition
> * @new_prs: new partition_root_state
> @@ -1339,6 +1352,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
> return isolcpus_updated;
> }
>
> +/*
> + * isolated_cpus_can_update - check for isolated & nohz_full conflicts
> + * @add_cpus: cpu mask for cpus that are going to be isolated
> + * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
> + * Return: false if there is conflict, true otherwise
> + *
> + * If nohz_full is enabled and we have isolated CPUs, their combination must
> + * still leave housekeeping CPUs.
> + */
> +static bool isolated_cpus_can_update(struct cpumask *add_cpus,
> + struct cpumask *del_cpus)
> +{
> + cpumask_var_t full_hk_cpus;
> + int res = true;
> +
> + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
> + return true;
> +
> + if (del_cpus && cpumask_weight_and(del_cpus,
> + housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
> + return true;
> +
> + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
> + return false;
> +
> + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
> + housekeeping_cpumask(HK_TYPE_DOMAIN));
> + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
> + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
> + if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
> + res = false;
> +
> + free_cpumask_var(full_hk_cpus);
> + return res;
> +}
> +
> static void update_exclusion_cpumasks(bool isolcpus_updated)
> {
> int ret;
> @@ -1464,6 +1513,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
> if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
> cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
> return PERR_INVCPUS;
> + if (isolated_cpus_should_update(new_prs, NULL) &&
> + !isolated_cpus_can_update(tmp->new_cpus, NULL))
> + return PERR_HKEEPING;
>
> spin_lock_irq(&callback_lock);
> isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
> @@ -1563,6 +1615,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
> else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
> cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
> cs->prs_err = PERR_NOCPUS;
> + else if (isolated_cpus_should_update(prs, NULL) &&
> + !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
> + cs->prs_err = PERR_HKEEPING;
> if (cs->prs_err)
> goto invalidate;
> }
> @@ -1914,6 +1969,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
> return err;
> }
>
> + if (deleting && isolated_cpus_should_update(new_prs, parent) &&
> + !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) {
> + cs->prs_err = PERR_HKEEPING;
> + return PERR_HKEEPING;
> + }
> +
> /*
> * Change the parent's effective_cpus & effective_xcpus (top cpuset
> * only).
> @@ -2934,6 +2995,8 @@ static int update_prstate(struct cpuset *cs, int new_prs)
> * Need to update isolated_cpus.
> */
> isolcpus_updated = true;
> + if (!isolated_cpus_can_update(cs->effective_xcpus, NULL))
> + err = PERR_HKEEPING;
> } else {
> /*
> * Switching back to member is always allowed even if it
Reviewed-by: Waiman Long <longman@redhat.com>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration
2025-09-15 14:59 ` [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration Gabriele Monaco
@ 2025-09-15 20:51 ` John B. Wyatt IV
2025-09-16 5:29 ` Gabriele Monaco
2025-09-16 13:41 ` Frederic Weisbecker
1 sibling, 1 reply; 8+ messages in thread
From: John B. Wyatt IV @ 2025-09-15 20:51 UTC (permalink / raw)
To: Gabriele Monaco
Cc: linux-kernel, Anna-Maria Behnsen, Thomas Gleixner, Waiman Long,
Tejun Heo, Johannes Weiner, Michal Koutný, cgroups,
John B. Wyatt IV
On Mon, Sep 15, 2025 at 04:59:30PM +0200, Gabriele Monaco wrote:
Your patchset continues to pass when applied against v6.17-rc4-rt3 on a
preview of RHEL 10.2.
rtla osnoise top -c 1 -e sched:sched_switch -s 20 -T 1 -t -d 30m -q
duration: 0 00:30:00 | time is in us
CPU Period Runtime Noise % CPU Aval Max Noise Max Single HW NMI IRQ Softirq Thread
1 #1799 1799000001 3351316 99.81371 2336 9 400 0 1799011 0 23795
> This effect was noticed on a 128 cores machine running oslat on the
> isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
> and the CPU with lowest count in a timer migration hierarchy (here 1
> and 65) appears as always active and continuously pulls global timers,
> from the housekeeping CPUs. This ends up moving driver work (e.g.
> delayed work) to isolated CPUs and causes latency spikes:
>
If you do another version; you may want to amend the cover letter to include
this affect can be noticed with a machine with as few as 20cores/40threads
with isocpus set to: 1-9,11-39 with rtla-osnoise-top
Tested-by: John B. Wyatt IV <jwyatt@redhat.com>
Tested-by: John B. Wyatt IV <sageofredondo@gmail.com>
--
Sincerely,
John Wyatt
Software Engineer, Core Kernel
Red Hat
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration
2025-09-15 20:51 ` John B. Wyatt IV
@ 2025-09-16 5:29 ` Gabriele Monaco
0 siblings, 0 replies; 8+ messages in thread
From: Gabriele Monaco @ 2025-09-16 5:29 UTC (permalink / raw)
To: John B. Wyatt IV
Cc: linux-kernel, Anna-Maria Behnsen, Thomas Gleixner, Waiman Long,
Tejun Heo, Johannes Weiner, Michal Koutný, cgroups,
John B. Wyatt IV
2025-09-15T20:51:21Z John B. Wyatt IV <jwyatt@redhat.com>:
> On Mon, Sep 15, 2025 at 04:59:30PM +0200, Gabriele Monaco wrote:
>
> Your patchset continues to pass when applied against v6.17-rc4-rt3 on a
> preview of RHEL 10.2.
>
> rtla osnoise top -c 1 -e sched:sched_switch -s 20 -T 1 -t -d 30m -q
>
> duration: 0 00:30:00 | time is in us
> CPU Period Runtime Noise % CPU Aval Max Noise Max Single HW NMI IRQ Softirq Thread
> 1 #1799 1799000001 3351316 99.81371 2336 9 400 0 1799011 0 23795
>
>> This effect was noticed on a 128 cores machine running oslat on the
>> isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
>> and the CPU with lowest count in a timer migration hierarchy (here 1
>> and 65) appears as always active and continuously pulls global timers,
>> from the housekeeping CPUs. This ends up moving driver work (e.g.
>> delayed work) to isolated CPUs and causes latency spikes:
>>
>
> If you do another version; you may want to amend the cover letter to include
> this affect can be noticed with a machine with as few as 20cores/40threads
> with isocpus set to: 1-9,11-39 with rtla-osnoise-top
>
> Tested-by: John B. Wyatt IV <jwyatt@redhat.com>
> Tested-by: John B. Wyatt IV <sageofredondo@gmail.com>
>
Thanks John for testing again, I'll mention your results with the next version.
Cheers,
Gabriele
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping
2025-09-15 14:59 ` [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping Gabriele Monaco
2025-09-15 20:43 ` Waiman Long
@ 2025-09-16 8:31 ` Chen Ridong
1 sibling, 0 replies; 8+ messages in thread
From: Chen Ridong @ 2025-09-16 8:31 UTC (permalink / raw)
To: Gabriele Monaco, linux-kernel, Waiman Long, Tejun Heo,
Johannes Weiner, Michal Koutný, cgroups
Cc: Frederic Weisbecker
On 2025/9/15 22:59, Gabriele Monaco wrote:
> Currently the user can set up isolated cpus via cpuset and nohz_full in
> such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
> domain isolated nor nohz full). This can be a problem for other
> subsystems (e.g. the timer wheel imgration).
>
> Prevent this configuration by blocking any assignation that would cause
> the union of domain isolated cpus and nohz_full to covers all CPUs.
>
> Acked-by: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> ---
> kernel/cgroup/cpuset.c | 63 ++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 63 insertions(+)
>
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 81a9239053a7..3cedc3580373 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
> cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
> }
>
> +/*
> + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
> + * @prs: new or old partition_root_state
> + * @parent: parent cpuset
> + * Return: true if isolated_cpus needs modification, false otherwise
> + */
> +static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
> +{
> + if (!parent)
> + parent = &top_cpuset;
> + return prs != parent->partition_root_state;
> +}
> +
Hi all,
I'm a bit confused about the logic for updating isolated CPUs.
As I understand it, the isolated_cpus set should be updated in two scenarios:
1. When changing to an isolated partition.
2. When a valid isolated partition becomes invalid or changes its membership.
However, I find the current approach of comparing the parent's partition_root_state with prs to
determine whether to update the isolated CPUs somewhat difficult to follow.
Wouldn't a more straightforward approach be something like this?
static bool isolated_cpus_should_update(int old_prs, int new_prs)
{
if (old_prs == new_prs)
return false;
/* Changing to an isolated partition */
if (new_prs == PRS_ISOLATED)
return true;
/* Isolated partition changing to another state */
if (old_prs == PRS_ISOLATED)
return true;
return false;
}
I'd greatly appreciate it if someone could help clarify this. Thank you.
> /*
> * partition_xcpus_add - Add new exclusive CPUs to partition
> * @new_prs: new partition_root_state
> @@ -1339,6 +1352,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
> return isolcpus_updated;
> }
>
> +/*
> + * isolated_cpus_can_update - check for isolated & nohz_full conflicts
> + * @add_cpus: cpu mask for cpus that are going to be isolated
> + * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
> + * Return: false if there is conflict, true otherwise
> + *
> + * If nohz_full is enabled and we have isolated CPUs, their combination must
> + * still leave housekeeping CPUs.
> + */
> +static bool isolated_cpus_can_update(struct cpumask *add_cpus,
> + struct cpumask *del_cpus)
> +{
> + cpumask_var_t full_hk_cpus;
> + int res = true;
> +
> + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
> + return true;
> +
> + if (del_cpus && cpumask_weight_and(del_cpus,
> + housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
> + return true;
> +
> + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
> + return false;
> +
> + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
> + housekeeping_cpumask(HK_TYPE_DOMAIN));
> + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
> + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
> + if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
> + res = false;
> +
> + free_cpumask_var(full_hk_cpus);
> + return res;
> +}
> +
> static void update_exclusion_cpumasks(bool isolcpus_updated)
> {
> int ret;
> @@ -1464,6 +1513,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
> if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
> cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
> return PERR_INVCPUS;
> + if (isolated_cpus_should_update(new_prs, NULL) &&
> + !isolated_cpus_can_update(tmp->new_cpus, NULL))
> + return PERR_HKEEPING;
>
> spin_lock_irq(&callback_lock);
> isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
> @@ -1563,6 +1615,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
> else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
> cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
> cs->prs_err = PERR_NOCPUS;
> + else if (isolated_cpus_should_update(prs, NULL) &&
> + !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
> + cs->prs_err = PERR_HKEEPING;
> if (cs->prs_err)
> goto invalidate;
> }
> @@ -1914,6 +1969,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
> return err;
> }
>
> + if (deleting && isolated_cpus_should_update(new_prs, parent) &&
> + !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) {
> + cs->prs_err = PERR_HKEEPING;
> + return PERR_HKEEPING;
> + }
> +
> /*
> * Change the parent's effective_cpus & effective_xcpus (top cpuset
> * only).
> @@ -2934,6 +2995,8 @@ static int update_prstate(struct cpuset *cs, int new_prs)
> * Need to update isolated_cpus.
> */
> isolcpus_updated = true;
> + if (!isolated_cpus_can_update(cs->effective_xcpus, NULL))
> + err = PERR_HKEEPING;
> } else {
> /*
> * Switching back to member is always allowed even if it
--
Best regards,
Ridong
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration
2025-09-15 14:59 ` [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration Gabriele Monaco
2025-09-15 20:51 ` John B. Wyatt IV
@ 2025-09-16 13:41 ` Frederic Weisbecker
1 sibling, 0 replies; 8+ messages in thread
From: Frederic Weisbecker @ 2025-09-16 13:41 UTC (permalink / raw)
To: Gabriele Monaco
Cc: linux-kernel, Anna-Maria Behnsen, Thomas Gleixner, Waiman Long,
Tejun Heo, Johannes Weiner, Michal Koutný, cgroups,
John B. Wyatt IV, John B. Wyatt IV
Le Mon, Sep 15, 2025 at 04:59:30PM +0200, Gabriele Monaco a écrit :
> Tested-by: John B. Wyatt IV <jwyatt@redhat.com>
> Tested-by: John B. Wyatt IV <sageofredondo@gmail.com>
Two people, one tester? :-)
> +/**
> + * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
> + * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
> + *
> + * This function can be called from cpuset code to provide the new set of
> + * isolated CPUs that should be excluded from the hierarchy.
> + * Online CPUs not present in exclude_cpumask but already excluded are brought
> + * back to the hierarchy.
> + * Functions to isolate/unisolate need to be called locally and can sleep.
> + */
> +int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
> +{
> + struct work_struct __percpu *works __free(free_percpu) =
> + alloc_percpu(struct work_struct);
> + cpumask_var_t cpumask_unisol __free(free_cpumask_var) = CPUMASK_NULL;
> + cpumask_var_t cpumask_isol __free(free_cpumask_var) = CPUMASK_NULL;
> + int cpu;
> +
> + lockdep_assert_cpus_held();
> +
> + if (!alloc_cpumask_var(&cpumask_isol, GFP_KERNEL))
> + return -ENOMEM;
> + if (!alloc_cpumask_var(&cpumask_unisol, GFP_KERNEL))
> + return -ENOMEM;
> + if (!works)
> + return -ENOMEM;
> +
> + cpumask_andnot(cpumask_unisol, cpu_online_mask, exclude_cpumask);
> + cpumask_andnot(cpumask_unisol, cpumask_unisol, tmigr_available_cpumask);
> + /* Set up the mask earlier to avoid races with the migrator CPU */
> + cpumask_or(tmigr_available_cpumask, tmigr_available_cpumask, cpumask_unisol);
> + for_each_cpu(cpu, cpumask_unisol) {
> + struct work_struct *work = per_cpu_ptr(works, cpu);
> +
> + INIT_WORK(work, tmigr_cpu_unisolate);
> + schedule_work_on(cpu, work);
> + }
> +
> + cpumask_and(cpumask_isol, exclude_cpumask, tmigr_available_cpumask);
> + cpumask_and(cpumask_isol, cpumask_isol, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
> + /*
> + * Handle this here and not in the cpuset code because exclude_cpumask
> + * might include also the tick CPU if included in isolcpus.
> + */
> + for_each_cpu(cpu, cpumask_isol) {
> + if (!tick_nohz_cpu_hotpluggable(cpu)) {
> + cpumask_clear_cpu(cpu, cpumask_isol);
> + break;
> + }
> + }
> + /* Set up the mask earlier to avoid races with the migrator CPU */
> + cpumask_andnot(tmigr_available_cpumask, tmigr_available_cpumask, cpumask_isol);
> + for_each_cpu(cpu, cpumask_isol) {
> + struct work_struct *work = per_cpu_ptr(works, cpu);
> +
> + INIT_WORK(work, tmigr_cpu_isolate);
> + schedule_work_on(cpu, work);
> + }
This is racy at various levels:
* The tmigr_available_cpumask clear can race with the cpumask_set_cpu() in
tmigr_cpu_unisolate(), risking overwrites when CPUs are on the same bitmap
chunk (bitmap operations aren't atomic).
* tmigr_cpu_isolate() and tmigr_cpu_unisolate() can now run concurrently and
then cpumask_set_cpu() can race with cpumask_clear_cpu() on
tmigr_available_cpumask, risking overwrites, though the real problem is
on the precedent point.
* tmigr_cpu_isolate() can race with tmigr_cpu_isolate() on other CPUs so
the calls to cpumask_clear_cpu() can race. That's fine because
tmigr_available_cpumask is already set to those CPUs but that still
leaves un uncomfortable taste. That would leave an excuse for KSCAN to warn
for example.
* Similar with tmigr_cpu_unisolate() racing together.
So, something like that should be added?
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 08e29fc01479..6615e56c8b0d 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1473,7 +1473,6 @@ static int tmigr_clear_cpu_available(unsigned int cpu)
int migrator;
u64 firstexp;
- cpumask_clear_cpu(cpu, tmigr_available_cpumask);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
if (!tmc->available)
return 0;
@@ -1489,11 +1488,11 @@ static int tmigr_clear_cpu_available(unsigned int cpu)
}
if (firstexp != KTIME_MAX) {
- migrator = cpumask_any(tmigr_available_cpumask);
+ migrator = cpumask_any_but(tmigr_available_cpumask, cpu);
work_on_cpu(migrator, tmigr_trigger_active, NULL);
}
- return 0;
+ return 1;
}
static int tmigr_set_cpu_available(unsigned int cpu)
@@ -1506,7 +1505,7 @@ static int tmigr_set_cpu_available(unsigned int cpu)
if (tmigr_is_isolated(cpu))
return 0;
- cpumask_set_cpu(cpu, tmigr_available_cpumask);
+
scoped_guard(raw_spinlock_irq, &tmc->lock) {
if (tmc->available)
return 0;
@@ -1516,7 +1515,19 @@ static int tmigr_set_cpu_available(unsigned int cpu)
__tmigr_cpu_activate(tmc);
tmc->available = true;
}
- return 0;
+ return 1;
+}
+
+static int tmigr_online_cpu(unsigned int cpu)
+{
+ if (tmigr_set_cpu_available(cpu) > 0)
+ cpumask_set_cpu(cpu, tmigr_available_cpumask);
+}
+
+static int tmigr_offline_cpu(unsigned int cpu)
+{
+ if (tmigr_clear_cpu_available(cpu) > 0)
+ cpumask_clear_cpu(cpu, tmigr_available_cpumask);
}
static void tmigr_cpu_isolate(struct work_struct *ignored)
@@ -1601,8 +1612,7 @@ int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
static int __init tmigr_late_init(void)
{
return cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
- tmigr_set_cpu_available,
- tmigr_clear_cpu_available);
+ tmigr_online_cpu, tmigr_offline_cpu);
}
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
--
Frederic Weisbecker
SUSE Labs
^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2025-09-16 13:41 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20250915145920.140180-11-gmonaco@redhat.com>
2025-09-15 14:59 ` [PATCH v12 5/9] cgroup/cpuset: Rename update_unbound_workqueue_cpumask() to update_exclusion_cpumasks() Gabriele Monaco
2025-09-15 14:59 ` [PATCH v12 7/9] cgroup/cpuset: Fail if isolated and nohz_full don't leave any housekeeping Gabriele Monaco
2025-09-15 20:43 ` Waiman Long
2025-09-16 8:31 ` Chen Ridong
2025-09-15 14:59 ` [PATCH v12 9/9] timers: Exclude isolated cpus from timer migration Gabriele Monaco
2025-09-15 20:51 ` John B. Wyatt IV
2025-09-16 5:29 ` Gabriele Monaco
2025-09-16 13:41 ` Frederic Weisbecker
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox