* [PATCH v4 1/5] timers: Rename tmigr 'online' bit to 'available'
2025-05-06 9:15 [PATCH v4 0/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
@ 2025-05-06 9:15 ` Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 2/5] timers: Add the available mask in timer migration Gabriele Monaco
` (3 subsequent siblings)
4 siblings, 0 replies; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-06 9:15 UTC (permalink / raw)
To: linux-kernel, Frederic Weisbecker, Thomas Gleixner, Waiman Long
Cc: Gabriele Monaco
The timer migration hierarchy excludes offline CPUs via the
tmigr_is_not_available function, which is essentially checking the
online bit for the CPU.
Rename the online bit to available and all references in function names
and tracepoint to generalise the concept of available CPUs.
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
include/trace/events/timer_migration.h | 4 ++--
kernel/time/timer_migration.c | 22 +++++++++++-----------
kernel/time/timer_migration.h | 2 +-
3 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
index 47db5eaf2f9a..61171b13c687 100644
--- a/include/trace/events/timer_migration.h
+++ b/include/trace/events/timer_migration.h
@@ -173,14 +173,14 @@ DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
TP_ARGS(tmc)
);
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available,
TP_PROTO(struct tmigr_cpu *tmc),
TP_ARGS(tmc)
);
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable,
TP_PROTO(struct tmigr_cpu *tmc),
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 2f6330831f08..7efd897c7959 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -427,7 +427,7 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
{
- return !(tmc->tmgroup && tmc->online);
+ return !(tmc->tmgroup && tmc->available);
}
/*
@@ -926,7 +926,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
* updated the event takes care when hierarchy is completely
* idle. Otherwise the migrator does it as the event is enqueued.
*/
- if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
+ if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
now < tmc->cpuevt.nextevt.expires) {
raw_spin_unlock_irq(&tmc->lock);
return;
@@ -973,7 +973,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
* (See also section "Required event and timerqueue update after a
* remote expiry" in the documentation at the top)
*/
- if (!tmc->online || !tmc->idle) {
+ if (!tmc->available || !tmc->idle) {
timer_unlock_remote_bases(cpu);
goto unlock;
}
@@ -1435,19 +1435,19 @@ static long tmigr_trigger_active(void *unused)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- WARN_ON_ONCE(!tmc->online || tmc->idle);
+ WARN_ON_ONCE(!tmc->available || tmc->idle);
return 0;
}
-static int tmigr_cpu_offline(unsigned int cpu)
+static int tmigr_cpu_unavailable(unsigned int cpu)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
int migrator;
u64 firstexp;
raw_spin_lock_irq(&tmc->lock);
- tmc->online = false;
+ tmc->available = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
/*
@@ -1455,7 +1455,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
* offline; Therefore nextevt value is set to KTIME_MAX
*/
firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
- trace_tmigr_cpu_offline(tmc);
+ trace_tmigr_cpu_unavailable(tmc);
raw_spin_unlock_irq(&tmc->lock);
if (firstexp != KTIME_MAX) {
@@ -1466,7 +1466,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
return 0;
}
-static int tmigr_cpu_online(unsigned int cpu)
+static int tmigr_cpu_available(unsigned int cpu)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
@@ -1475,11 +1475,11 @@ static int tmigr_cpu_online(unsigned int cpu)
return -EINVAL;
raw_spin_lock_irq(&tmc->lock);
- trace_tmigr_cpu_online(tmc);
+ trace_tmigr_cpu_available(tmc);
tmc->idle = timer_base_is_idle();
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
- tmc->online = true;
+ tmc->available = true;
raw_spin_unlock_irq(&tmc->lock);
return 0;
}
@@ -1850,7 +1850,7 @@ static int __init tmigr_init(void)
goto err;
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
- tmigr_cpu_online, tmigr_cpu_offline);
+ tmigr_cpu_available, tmigr_cpu_unavailable);
if (ret)
goto err;
diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h
index ae19f70f8170..70879cde6fdd 100644
--- a/kernel/time/timer_migration.h
+++ b/kernel/time/timer_migration.h
@@ -97,7 +97,7 @@ struct tmigr_group {
*/
struct tmigr_cpu {
raw_spinlock_t lock;
- bool online;
+ bool available;
bool idle;
bool remote;
struct tmigr_group *tmgroup;
--
2.49.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-06 9:15 [PATCH v4 0/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 1/5] timers: Rename tmigr 'online' bit to 'available' Gabriele Monaco
@ 2025-05-06 9:15 ` Gabriele Monaco
2025-05-06 16:07 ` Frederic Weisbecker
2025-05-06 9:15 ` [PATCH v4 3/5] cgroup/cpuset: Rename update_unbound_workqueue_cpumask to update_exclusion_cpumasks Gabriele Monaco
` (2 subsequent siblings)
4 siblings, 1 reply; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-06 9:15 UTC (permalink / raw)
To: linux-kernel, Frederic Weisbecker, Thomas Gleixner, Waiman Long
Cc: Gabriele Monaco
Keep track of the CPUs available for timer migration in a cpumask. This
prepares the ground to generalise the concept of unavailable CPUs.
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/time/timer_migration.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 7efd897c7959..25439f961ccf 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -422,6 +422,9 @@ static unsigned int tmigr_crossnode_level __read_mostly;
static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
+/* CPUs available for timer migration */
+static cpumask_var_t tmigr_available_cpumask;
+
#define TMIGR_NONE 0xFF
#define BIT_CNT 8
@@ -1449,6 +1452,7 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
raw_spin_lock_irq(&tmc->lock);
tmc->available = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+ cpumask_clear_cpu(cpu, tmigr_available_cpumask);
/*
* CPU has to handle the local events on his own, when on the way to
@@ -1459,7 +1463,7 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
raw_spin_unlock_irq(&tmc->lock);
if (firstexp != KTIME_MAX) {
- migrator = cpumask_any_but(cpu_online_mask, cpu);
+ migrator = cpumask_any(tmigr_available_cpumask);
work_on_cpu(migrator, tmigr_trigger_active, NULL);
}
@@ -1480,6 +1484,7 @@ static int tmigr_cpu_available(unsigned int cpu)
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
tmc->available = true;
+ cpumask_set_cpu(cpu, tmigr_available_cpumask);
raw_spin_unlock_irq(&tmc->lock);
return 0;
}
@@ -1801,6 +1806,11 @@ static int __init tmigr_init(void)
if (ncpus == 1)
return 0;
+ if (!zalloc_cpumask_var(&tmigr_available_cpumask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* Calculate the required hierarchy levels. Unfortunately there is no
* reliable information available, unless all possible CPUs have been
--
2.49.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-06 9:15 ` [PATCH v4 2/5] timers: Add the available mask in timer migration Gabriele Monaco
@ 2025-05-06 16:07 ` Frederic Weisbecker
2025-05-07 7:57 ` Gabriele Monaco
0 siblings, 1 reply; 14+ messages in thread
From: Frederic Weisbecker @ 2025-05-06 16:07 UTC (permalink / raw)
To: Gabriele Monaco; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
Le Tue, May 06, 2025 at 11:15:37AM +0200, Gabriele Monaco a écrit :
> Keep track of the CPUs available for timer migration in a cpumask. This
> prepares the ground to generalise the concept of unavailable CPUs.
>
> Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> ---
> kernel/time/timer_migration.c | 12 +++++++++++-
> 1 file changed, 11 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
> index 7efd897c7959..25439f961ccf 100644
> --- a/kernel/time/timer_migration.c
> +++ b/kernel/time/timer_migration.c
> @@ -422,6 +422,9 @@ static unsigned int tmigr_crossnode_level __read_mostly;
>
> static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
>
> +/* CPUs available for timer migration */
> +static cpumask_var_t tmigr_available_cpumask;
> +
> #define TMIGR_NONE 0xFF
> #define BIT_CNT 8
>
> @@ -1449,6 +1452,7 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
> raw_spin_lock_irq(&tmc->lock);
> tmc->available = false;
> WRITE_ONCE(tmc->wakeup, KTIME_MAX);
> + cpumask_clear_cpu(cpu, tmigr_available_cpumask);
>
> /*
> * CPU has to handle the local events on his own, when on the way to
> @@ -1459,7 +1463,7 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
> raw_spin_unlock_irq(&tmc->lock);
>
> if (firstexp != KTIME_MAX) {
> - migrator = cpumask_any_but(cpu_online_mask, cpu);
> + migrator = cpumask_any(tmigr_available_cpumask);
Considering nohz_full CPUs should be still available.
I don't think there is anything ensuring that, in nohz_full mode,
there must be at least one housekeeping CPU that is not domain
isolated.
For example if we have two CPUs with CPU 0 being domain isolated
and CPU 1 being nohz_full, then there is no migrator to handle CPU 1's
global timers.
Thanks.
> work_on_cpu(migrator, tmigr_trigger_active, NULL);
> }
--
Frederic Weisbecker
SUSE Labs
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-06 16:07 ` Frederic Weisbecker
@ 2025-05-07 7:57 ` Gabriele Monaco
2025-05-07 12:25 ` Frederic Weisbecker
0 siblings, 1 reply; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-07 7:57 UTC (permalink / raw)
To: Frederic Weisbecker; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
On Tue, 2025-05-06 at 18:07 +0200, Frederic Weisbecker wrote:
> Le Tue, May 06, 2025 at 11:15:37AM +0200, Gabriele Monaco a écrit :
> > Keep track of the CPUs available for timer migration in a cpumask.
> > This
> > prepares the ground to generalise the concept of unavailable CPUs.
> >
> > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> > ---
> > kernel/time/timer_migration.c | 12 +++++++++++-
> > 1 file changed, 11 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/time/timer_migration.c
> > b/kernel/time/timer_migration.c
> > index 7efd897c7959..25439f961ccf 100644
> > --- a/kernel/time/timer_migration.c
> > +++ b/kernel/time/timer_migration.c
> > @@ -422,6 +422,9 @@ static unsigned int tmigr_crossnode_level
> > __read_mostly;
> >
> > static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
> >
> > +/* CPUs available for timer migration */
> > +static cpumask_var_t tmigr_available_cpumask;
> > +
> > #define TMIGR_NONE 0xFF
> > #define BIT_CNT 8
> >
> > @@ -1449,6 +1452,7 @@ static int tmigr_cpu_unavailable(unsigned int
> > cpu)
> > raw_spin_lock_irq(&tmc->lock);
> > tmc->available = false;
> > WRITE_ONCE(tmc->wakeup, KTIME_MAX);
> > + cpumask_clear_cpu(cpu, tmigr_available_cpumask);
> >
> > /*
> > * CPU has to handle the local events on his own, when on
> > the way to
> > @@ -1459,7 +1463,7 @@ static int tmigr_cpu_unavailable(unsigned int
> > cpu)
> > raw_spin_unlock_irq(&tmc->lock);
> >
> > if (firstexp != KTIME_MAX) {
> > - migrator = cpumask_any_but(cpu_online_mask, cpu);
> > + migrator = cpumask_any(tmigr_available_cpumask);
>
> Considering nohz_full CPUs should be still available.
>
> I don't think there is anything ensuring that, in nohz_full mode,
> there must be at least one housekeeping CPU that is not domain
> isolated.
>
> For example if we have two CPUs with CPU 0 being domain isolated
> and CPU 1 being nohz_full, then there is no migrator to handle CPU
> 1's
> global timers.
>
Mmh, good point, didn't think about having the domain isolated and
nohz_full maps disjointed..
When that's really the case how do you think we should fall back?
In the situation you describe, no one is going to be able to handle
global timers on the nohz_full CPUs, right?
When this situation really occurs, we could keep one of the domain
isolated CPUs in the hierarchy.
Now, I see on x86 CPU0 cannot be offlined and is not added to
nohz_full, which would make things considerably easier, but ARM doesn't
seem to work the same way.
We could elect a lucky winner (e.g. first or last becoming domain
isolated) and swap it whenever it becomes offline, until we actually
run out of those (no online cpu non-nohz_full is left), but I believe
this shouldn't happen..
Does this make sense to you?
Thanks,
Gabriele
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-07 7:57 ` Gabriele Monaco
@ 2025-05-07 12:25 ` Frederic Weisbecker
2025-05-07 12:46 ` Gabriele Monaco
0 siblings, 1 reply; 14+ messages in thread
From: Frederic Weisbecker @ 2025-05-07 12:25 UTC (permalink / raw)
To: Gabriele Monaco; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
Le Wed, May 07, 2025 at 09:57:38AM +0200, Gabriele Monaco a écrit :
>
>
> On Tue, 2025-05-06 at 18:07 +0200, Frederic Weisbecker wrote:
> > Le Tue, May 06, 2025 at 11:15:37AM +0200, Gabriele Monaco a écrit :
> > > Keep track of the CPUs available for timer migration in a cpumask.
> > > This
> > > prepares the ground to generalise the concept of unavailable CPUs.
> > >
> > > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> > > ---
> > > kernel/time/timer_migration.c | 12 +++++++++++-
> > > 1 file changed, 11 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/kernel/time/timer_migration.c
> > > b/kernel/time/timer_migration.c
> > > index 7efd897c7959..25439f961ccf 100644
> > > --- a/kernel/time/timer_migration.c
> > > +++ b/kernel/time/timer_migration.c
> > > @@ -422,6 +422,9 @@ static unsigned int tmigr_crossnode_level
> > > __read_mostly;
> > >
> > > static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
> > >
> > > +/* CPUs available for timer migration */
> > > +static cpumask_var_t tmigr_available_cpumask;
> > > +
> > > #define TMIGR_NONE 0xFF
> > > #define BIT_CNT 8
> > >
> > > @@ -1449,6 +1452,7 @@ static int tmigr_cpu_unavailable(unsigned int
> > > cpu)
> > > raw_spin_lock_irq(&tmc->lock);
> > > tmc->available = false;
> > > WRITE_ONCE(tmc->wakeup, KTIME_MAX);
> > > + cpumask_clear_cpu(cpu, tmigr_available_cpumask);
> > >
> > > /*
> > > * CPU has to handle the local events on his own, when on
> > > the way to
> > > @@ -1459,7 +1463,7 @@ static int tmigr_cpu_unavailable(unsigned int
> > > cpu)
> > > raw_spin_unlock_irq(&tmc->lock);
> > >
> > > if (firstexp != KTIME_MAX) {
> > > - migrator = cpumask_any_but(cpu_online_mask, cpu);
> > > + migrator = cpumask_any(tmigr_available_cpumask);
> >
> > Considering nohz_full CPUs should be still available.
> >
> > I don't think there is anything ensuring that, in nohz_full mode,
> > there must be at least one housekeeping CPU that is not domain
> > isolated.
> >
> > For example if we have two CPUs with CPU 0 being domain isolated
> > and CPU 1 being nohz_full, then there is no migrator to handle CPU
> > 1's
> > global timers.
> >
>
> Mmh, good point, didn't think about having the domain isolated and
> nohz_full maps disjointed..
>
> When that's really the case how do you think we should fall back?
>
> In the situation you describe, no one is going to be able to handle
> global timers on the nohz_full CPUs, right?
>
> When this situation really occurs, we could keep one of the domain
> isolated CPUs in the hierarchy.
> Now, I see on x86 CPU0 cannot be offlined and is not added to
> nohz_full, which would make things considerably easier, but ARM doesn't
> seem to work the same way.
>
> We could elect a lucky winner (e.g. first or last becoming domain
> isolated) and swap it whenever it becomes offline, until we actually
> run out of those (no online cpu non-nohz_full is left), but I believe
> this shouldn't happen..
>
> Does this make sense to you?
Well, nohz_full= and isolcpus=, when they are passed together, must contain the
same set of CPUs. And if there is no housekeeping CPU then one is forced, so
it's well handled at this point.
But if nohz_full= is passed on boot and cpusets later create an isolated
partition which spans the housekeeping set, then the isolated partition must
be rejected.
Thanks.
--
Frederic Weisbecker
SUSE Labs
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-07 12:25 ` Frederic Weisbecker
@ 2025-05-07 12:46 ` Gabriele Monaco
2025-05-07 13:40 ` Frederic Weisbecker
0 siblings, 1 reply; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-07 12:46 UTC (permalink / raw)
To: Frederic Weisbecker; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
On Wed, 2025-05-07 at 14:25 +0200, Frederic Weisbecker wrote:
> Le Wed, May 07, 2025 at 09:57:38AM +0200, Gabriele Monaco a écrit :
> >
> >
> > On Tue, 2025-05-06 at 18:07 +0200, Frederic Weisbecker wrote:
> > > Le Tue, May 06, 2025 at 11:15:37AM +0200, Gabriele Monaco a écrit
> > > :
> > > > Keep track of the CPUs available for timer migration in a
> > > > cpumask.
> > > > This
> > > > prepares the ground to generalise the concept of unavailable
> > > > CPUs.
> > > >
> > > > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> > > > ---
> > > > kernel/time/timer_migration.c | 12 +++++++++++-
> > > > 1 file changed, 11 insertions(+), 1 deletion(-)
> > > >
> > > > diff --git a/kernel/time/timer_migration.c
> > > > b/kernel/time/timer_migration.c
> > > > index 7efd897c7959..25439f961ccf 100644
> > > > --- a/kernel/time/timer_migration.c
> > > > +++ b/kernel/time/timer_migration.c
> > > > @@ -422,6 +422,9 @@ static unsigned int tmigr_crossnode_level
> > > > __read_mostly;
> > > >
> > > > static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
> > > >
> > > > +/* CPUs available for timer migration */
> > > > +static cpumask_var_t tmigr_available_cpumask;
> > > > +
> > > > #define TMIGR_NONE 0xFF
> > > > #define BIT_CNT 8
> > > >
> > > > @@ -1449,6 +1452,7 @@ static int tmigr_cpu_unavailable(unsigned
> > > > int
> > > > cpu)
> > > > raw_spin_lock_irq(&tmc->lock);
> > > > tmc->available = false;
> > > > WRITE_ONCE(tmc->wakeup, KTIME_MAX);
> > > > + cpumask_clear_cpu(cpu, tmigr_available_cpumask);
> > > >
> > > > /*
> > > > * CPU has to handle the local events on his own, when
> > > > on
> > > > the way to
> > > > @@ -1459,7 +1463,7 @@ static int tmigr_cpu_unavailable(unsigned
> > > > int
> > > > cpu)
> > > > raw_spin_unlock_irq(&tmc->lock);
> > > >
> > > > if (firstexp != KTIME_MAX) {
> > > > - migrator = cpumask_any_but(cpu_online_mask,
> > > > cpu);
> > > > + migrator =
> > > > cpumask_any(tmigr_available_cpumask);
> > >
> > > Considering nohz_full CPUs should be still available.
> > >
> > > I don't think there is anything ensuring that, in nohz_full mode,
> > > there must be at least one housekeeping CPU that is not domain
> > > isolated.
> > >
> > > For example if we have two CPUs with CPU 0 being domain isolated
> > > and CPU 1 being nohz_full, then there is no migrator to handle
> > > CPU
> > > 1's
> > > global timers.
> > >
> >
> > Mmh, good point, didn't think about having the domain isolated and
> > nohz_full maps disjointed..
> >
> > When that's really the case how do you think we should fall back?
> >
> > In the situation you describe, no one is going to be able to handle
> > global timers on the nohz_full CPUs, right?
> >
> > When this situation really occurs, we could keep one of the domain
> > isolated CPUs in the hierarchy.
> > Now, I see on x86 CPU0 cannot be offlined and is not added to
> > nohz_full, which would make things considerably easier, but ARM
> > doesn't
> > seem to work the same way.
> >
> > We could elect a lucky winner (e.g. first or last becoming domain
> > isolated) and swap it whenever it becomes offline, until we
> > actually
> > run out of those (no online cpu non-nohz_full is left), but I
> > believe
> > this shouldn't happen..
> >
> > Does this make sense to you?
>
> Well, nohz_full= and isolcpus=, when they are passed together, must
> contain the
> same set of CPUs. And if there is no housekeeping CPU then one is
> forced, so
> it's well handled at this point.
I'm not so sure about this one though.
As far as I understand [1], is preventing the user from setting
different CPUs while doing isolcpus=nohz, and nohz_full= (which is now
equivalent). But I seem to be able to do isolcpus=0-3 and nohz_full=4-7
without any problem and I believe I'd hit the issue you're mentioning.
(The same would work if I swap the masks as 0 cannot be nohz_full).
# vng -a isolcpus=0-7 -a nohz_full=8-15 head
/sys/devices/system/cpu/{isolated,nohz_full}
==> /sys/devices/system/cpu/isolated <==
0-7
==> /sys/devices/system/cpu/nohz_full <==
8-15
(where probably some CPUs are set up to do housekeeping stuff anyway,
but if we just look at the masks, we won't notice)
Then I assume this should not be allowed either, should it?
Or am I missing something here?
>
> But if nohz_full= is passed on boot and cpusets later create an
> isolated
> partition which spans the housekeeping set, then the isolated
> partition must
> be rejected.
Mmh, that would make things easier actually.
I assume there's no real use case for that kind of hybrid setup with
half CPUs nohz_full and half domain isolated..
Thanks,
Gabriele
[1] -
https://elixir.bootlin.com/linux/v6.14.5/source/kernel/sched/isolation.c#L163
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-07 12:46 ` Gabriele Monaco
@ 2025-05-07 13:40 ` Frederic Weisbecker
2025-05-07 13:54 ` Gabriele Monaco
0 siblings, 1 reply; 14+ messages in thread
From: Frederic Weisbecker @ 2025-05-07 13:40 UTC (permalink / raw)
To: Gabriele Monaco; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
Le Wed, May 07, 2025 at 02:46:39PM +0200, Gabriele Monaco a écrit :
>
> I'm not so sure about this one though.
> As far as I understand [1], is preventing the user from setting
> different CPUs while doing isolcpus=nohz, and nohz_full= (which is now
> equivalent). But I seem to be able to do isolcpus=0-3 and nohz_full=4-7
> without any problem and I believe I'd hit the issue you're mentioning.
Duh!
> (The same would work if I swap the masks as 0 cannot be nohz_full).
Unfortunately 0 can be nohz_full...
>
> # vng -a isolcpus=0-7 -a nohz_full=8-15 head
> /sys/devices/system/cpu/{isolated,nohz_full}
>
> ==> /sys/devices/system/cpu/isolated <==
> 0-7
>
> ==> /sys/devices/system/cpu/nohz_full <==
> 8-15
>
> (where probably some CPUs are set up to do housekeeping stuff anyway,
> but if we just look at the masks, we won't notice)
>
> Then I assume this should not be allowed either, should it?
> Or am I missing something here?
Exactly then. housekeeping_setup() already handles cases when
there is no housekeeping left. I guess that section could be
made aware of nohz_full + isolcpus not leaving any housekeeping left.
>
> >
> > But if nohz_full= is passed on boot and cpusets later create an
> > isolated
> > partition which spans the housekeeping set, then the isolated
> > partition must
> > be rejected.
>
> Mmh, that would make things easier actually.
> I assume there's no real use case for that kind of hybrid setup with
> half CPUs nohz_full and half domain isolated..
I guess we can accept nohz_full + isolated partition as long as a housekeeping
CPU remains.
Thanks.
--
Frederic Weisbecker
SUSE Labs
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-07 13:40 ` Frederic Weisbecker
@ 2025-05-07 13:54 ` Gabriele Monaco
2025-05-07 14:27 ` Frederic Weisbecker
0 siblings, 1 reply; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-07 13:54 UTC (permalink / raw)
To: Frederic Weisbecker; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
On Wed, 2025-05-07 at 15:40 +0200, Frederic Weisbecker wrote:
> Le Wed, May 07, 2025 at 02:46:39PM +0200, Gabriele Monaco a écrit :
> >
> > I'm not so sure about this one though.
> > As far as I understand [1], is preventing the user from setting
> > different CPUs while doing isolcpus=nohz, and nohz_full= (which is
> > now
> > equivalent). But I seem to be able to do isolcpus=0-3 and
> > nohz_full=4-7
> > without any problem and I believe I'd hit the issue you're
> > mentioning.
>
> Duh!
>
> > (The same would work if I swap the masks as 0 cannot be nohz_full).
>
> Unfortunately 0 can be nohz_full...
Well, I haven't found what enforces it, but I wasn't able to set 0 as
nohz_full, no matter what I tried on x86 and arm64. Not sure if 0 was
just by chance in this case (I'm guessing it has something to do with
tick_do_timer_cpu, I'm not quite familiar with this code).
I was trying to see if we can make some assumption in the tmigr but
what you propose (enforce fully housekeeping CPUs everywhere) seems
much neater.
>
> >
> > # vng -a isolcpus=0-7 -a nohz_full=8-15 head
> > /sys/devices/system/cpu/{isolated,nohz_full}
> >
> > ==> /sys/devices/system/cpu/isolated <==
> > 0-7
> >
> > ==> /sys/devices/system/cpu/nohz_full <==
> > 8-15
> >
> > (where probably some CPUs are set up to do housekeeping stuff
> > anyway,
> > but if we just look at the masks, we won't notice)
> >
> > Then I assume this should not be allowed either, should it?
> > Or am I missing something here?
>
> Exactly then. housekeeping_setup() already handles cases when
> there is no housekeeping left. I guess that section could be
> made aware of nohz_full + isolcpus not leaving any housekeeping left.
>
> >
> > >
> > > But if nohz_full= is passed on boot and cpusets later create an
> > > isolated
> > > partition which spans the housekeeping set, then the isolated
> > > partition must
> > > be rejected.
> >
> > Mmh, that would make things easier actually.
> > I assume there's no real use case for that kind of hybrid setup
> > with
> > half CPUs nohz_full and half domain isolated..
>
> I guess we can accept nohz_full + isolated partition as long as a
> housekeeping
> CPU remains.
Yeah makes sense, I'll explore that.
Thanks,
Gabriele
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v4 2/5] timers: Add the available mask in timer migration
2025-05-07 13:54 ` Gabriele Monaco
@ 2025-05-07 14:27 ` Frederic Weisbecker
0 siblings, 0 replies; 14+ messages in thread
From: Frederic Weisbecker @ 2025-05-07 14:27 UTC (permalink / raw)
To: Gabriele Monaco; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
Le Wed, May 07, 2025 at 03:54:32PM +0200, Gabriele Monaco a écrit :
>
>
> On Wed, 2025-05-07 at 15:40 +0200, Frederic Weisbecker wrote:
> > Le Wed, May 07, 2025 at 02:46:39PM +0200, Gabriele Monaco a écrit :
> > >
> > > I'm not so sure about this one though.
> > > As far as I understand [1], is preventing the user from setting
> > > different CPUs while doing isolcpus=nohz, and nohz_full= (which is
> > > now
> > > equivalent). But I seem to be able to do isolcpus=0-3 and
> > > nohz_full=4-7
> > > without any problem and I believe I'd hit the issue you're
> > > mentioning.
> >
> > Duh!
> >
> > > (The same would work if I swap the masks as 0 cannot be nohz_full).
> >
> > Unfortunately 0 can be nohz_full...
>
> Well, I haven't found what enforces it, but I wasn't able to set 0 as
> nohz_full, no matter what I tried on x86 and arm64. Not sure if 0 was
> just by chance in this case (I'm guessing it has something to do with
> tick_do_timer_cpu, I'm not quite familiar with this code).
Ah looks like you need CONFIG_PM_SLEEP_SMP_NONZERO_CPU. IIUC it's only
powerpc.
You can try to deactivate CONFIG_SUSPEND and CONFIG_HIBERNATE_CALLBACKS
otherwise.
Thanks.
--
Frederic Weisbecker
SUSE Labs
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH v4 3/5] cgroup/cpuset: Rename update_unbound_workqueue_cpumask to update_exclusion_cpumasks
2025-05-06 9:15 [PATCH v4 0/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 1/5] timers: Rename tmigr 'online' bit to 'available' Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 2/5] timers: Add the available mask in timer migration Gabriele Monaco
@ 2025-05-06 9:15 ` Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 4/5] timers: Add timer_base_remote_is_idle to query from remote cpus Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 5/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
4 siblings, 0 replies; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-06 9:15 UTC (permalink / raw)
To: linux-kernel, Frederic Weisbecker, Thomas Gleixner, Waiman Long
Cc: Gabriele Monaco
The function calls the workqueue_unbound_exclude_cpumask function when
there's a change in isolated CPUs. The function can be used for other
subsystems requiring updated when isolated CPUs change.
Generalise the name to update_exclusion_cpumasks to prepare for other
functions unrelated to workqueues to be called in that spot.
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/cgroup/cpuset.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 306b60430091..95316d39c282 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1323,7 +1323,7 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
-static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+static void update_exclusion_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1454,7 +1454,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
list_add(&cs->remote_sibling, &remote_children);
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1495,7 +1495,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_effective_exclusive_cpumask(cs, NULL, NULL);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
/*
@@ -1563,7 +1563,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
if (adding || deleting)
cpuset_force_rebuild();
@@ -1906,7 +1906,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
WARN_ON_ONCE(parent->nr_subparts < 0);
}
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -2931,7 +2931,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_exclusion_cpumasks(isolcpus_updated);
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
--
2.49.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH v4 4/5] timers: Add timer_base_remote_is_idle to query from remote cpus
2025-05-06 9:15 [PATCH v4 0/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
` (2 preceding siblings ...)
2025-05-06 9:15 ` [PATCH v4 3/5] cgroup/cpuset: Rename update_unbound_workqueue_cpumask to update_exclusion_cpumasks Gabriele Monaco
@ 2025-05-06 9:15 ` Gabriele Monaco
2025-05-06 9:15 ` [PATCH v4 5/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
4 siblings, 0 replies; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-06 9:15 UTC (permalink / raw)
To: linux-kernel, Frederic Weisbecker, Thomas Gleixner, Waiman Long
Cc: Gabriele Monaco
The function timer_base_is_idle allows to query if the current CPU's
timer base is set to idle. There's currently no way to get the same
information for a remote CPU.
Add timer_base_remote_is_idle that given a CPU number returns the timer
base idle state of that CPU.
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/time/tick-internal.h | 1 +
kernel/time/timer.c | 11 +++++++++++
2 files changed, 12 insertions(+)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index faac36de35b9..75580f7c69c6 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -167,6 +167,7 @@ extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
extern void timer_lock_remote_bases(unsigned int cpu);
extern void timer_unlock_remote_bases(unsigned int cpu);
extern bool timer_base_is_idle(void);
+extern bool timer_base_remote_is_idle(unsigned int cpu);
extern void timer_expire_remote(unsigned int cpu);
# endif
#else /* CONFIG_NO_HZ_COMMON */
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 4d915c0a263c..f7fbd3b3cb83 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -2162,6 +2162,17 @@ bool timer_base_is_idle(void)
return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle);
}
+/**
+ * timer_base_remote_is_idle() - Return whether timer base is set idle for cpu
+ * @cpu: Remote CPU
+ *
+ * Returns value of local timer base is_idle value for remote cpu.
+ */
+bool timer_base_remote_is_idle(unsigned int cpu)
+{
+ return per_cpu(timer_bases[BASE_LOCAL].is_idle, cpu);
+}
+
static void __run_timer_base(struct timer_base *base);
/**
--
2.49.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH v4 5/5] timers: Exclude isolated cpus from timer migation
2025-05-06 9:15 [PATCH v4 0/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
` (3 preceding siblings ...)
2025-05-06 9:15 ` [PATCH v4 4/5] timers: Add timer_base_remote_is_idle to query from remote cpus Gabriele Monaco
@ 2025-05-06 9:15 ` Gabriele Monaco
2025-05-06 16:00 ` Frederic Weisbecker
4 siblings, 1 reply; 14+ messages in thread
From: Gabriele Monaco @ 2025-05-06 9:15 UTC (permalink / raw)
To: linux-kernel, Frederic Weisbecker, Thomas Gleixner, Waiman Long
Cc: Gabriele Monaco
The timer migration mechanism allows active CPUs to pull timers from
idle ones to improve the overall idle time. This is however undesired
when CPU intensive workloads run on isolated cores, as the algorithm
would move the timers from housekeeping to isolated cores, negatively
affecting the isolation.
This effect was noticed on a 128 cores machine running oslat on the
isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
and the CPU with lowest count in a timer migration hierarchy (here 1
and 65) appears as always active and continuously pulls global timers,
from the housekeeping CPUs. This ends up moving driver work (e.g.
delayed work) to isolated CPUs and causes latency spikes:
before the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 1203 10 3 4 ... 5 (us)
after the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 10 4 3 4 3 ... 5 (us)
Exclude isolated cores from the timer migration algorithm, extend the
concept of unavailable cores, currently used for offline ones, to
isolated ones:
* A core is unavailable if isolated or offline;
* A core is available if isolated and offline;
A core is considered unavailable as idle if:
* is in the isolcpus list
* is in the nohz_full list
* is in an isolated cpuset
Due to how the timer migration algorithm works, any CPU part of the
hierarchy can have their global timers pulled by remote CPUs and have to
pull remote timers, only skipping pulling remote timers would break the
logic.
For this reason, we prevent isolated CPUs from pulling remote global
timers, but also the other way around: any global timer started on an
isolated CPU will run there. This does not break the concept of
isolation (global timers don't come from outside the CPU) and, if
considered inappropriate, can usually be mitigated with other isolation
techniques (e.g. IRQ pinning).
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
include/linux/timer.h | 6 ++++++
kernel/cgroup/cpuset.c | 2 ++
kernel/time/timer_migration.c | 31 ++++++++++++++++++++++++++++---
3 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 10596d7c3a34..4722e075d984 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,4 +190,10 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask);
+#else
+static inline void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask) { }
+#endif
+
#endif
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 95316d39c282..866b4b818811 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1334,6 +1334,8 @@ static void update_exclusion_cpumasks(bool isolcpus_updated)
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ tmigr_isolated_exclude_cpumask(isolated_cpus);
}
/**
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 25439f961ccf..e4b394d78a8d 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -1445,7 +1446,7 @@ static long tmigr_trigger_active(void *unused)
static int tmigr_cpu_unavailable(unsigned int cpu)
{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
int migrator;
u64 firstexp;
@@ -1472,15 +1473,24 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
static int tmigr_cpu_available(unsigned int cpu)
{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
/* Check whether CPU data was successfully initialized */
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
+ /*
+ * Isolated CPUs don't participate in timer migration.
+ * Checking here guarantees that CPUs isolated at boot (e.g. isolcpus)
+ * are not marked as available when they first become online.
+ * During runtime, any offline isolated CPU is also not incorrectly
+ * marked as available once it gets back online.
+ */
+ if (cpu_is_isolated(cpu))
+ return 0;
raw_spin_lock_irq(&tmc->lock);
trace_tmigr_cpu_available(tmc);
- tmc->idle = timer_base_is_idle();
+ tmc->idle = timer_base_remote_is_idle(cpu);
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
tmc->available = true;
@@ -1489,6 +1499,21 @@ static int tmigr_cpu_available(unsigned int cpu)
return 0;
}
+void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask)
+{
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ for_each_cpu_and(cpu, exclude_cpumask, tmigr_available_cpumask)
+ tmigr_cpu_unavailable(cpu);
+
+ for_each_cpu_andnot(cpu, cpu_online_mask, exclude_cpumask) {
+ if (!cpumask_test_cpu(cpu, tmigr_available_cpumask))
+ tmigr_cpu_available(cpu);
+ }
+}
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
--
2.49.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* Re: [PATCH v4 5/5] timers: Exclude isolated cpus from timer migation
2025-05-06 9:15 ` [PATCH v4 5/5] timers: Exclude isolated cpus from timer migation Gabriele Monaco
@ 2025-05-06 16:00 ` Frederic Weisbecker
0 siblings, 0 replies; 14+ messages in thread
From: Frederic Weisbecker @ 2025-05-06 16:00 UTC (permalink / raw)
To: Gabriele Monaco; +Cc: linux-kernel, Thomas Gleixner, Waiman Long
Le Tue, May 06, 2025 at 11:15:40AM +0200, Gabriele Monaco a écrit :
> @@ -1472,15 +1473,24 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
>
> static int tmigr_cpu_available(unsigned int cpu)
> {
> - struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
> + struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
>
> /* Check whether CPU data was successfully initialized */
> if (WARN_ON_ONCE(!tmc->tmgroup))
> return -EINVAL;
>
> + /*
> + * Isolated CPUs don't participate in timer migration.
> + * Checking here guarantees that CPUs isolated at boot (e.g. isolcpus)
> + * are not marked as available when they first become online.
> + * During runtime, any offline isolated CPU is also not incorrectly
> + * marked as available once it gets back online.
> + */
> + if (cpu_is_isolated(cpu))
I would like nohz_full to remain an exception here. It already handles
well (even better than domain isolated CPUs) global timers by behaving like idle
CPUs. Because when the tick is stopped on nohz_full, the global timers are
then handled by housekeeping. We are doing something different with domain
isolated CPUs because those must still handle their own global timers.
So please keep nohz_full CPUs inside the tree (that includes CPUs that are
_both_ nohz_full and domain isolated).
> + return 0;
> raw_spin_lock_irq(&tmc->lock);
> trace_tmigr_cpu_available(tmc);
> - tmc->idle = timer_base_is_idle();
> + tmc->idle = timer_base_remote_is_idle(cpu);
This is racy:
CPU 0 CPU 1
----- -----
tick_nohz_stop_tick()
timer_base_try_to_set_idle()
__get_next_timer_interrupt()
tmigr_cpu_deactivate()
tmigr_isolated_exclude_cpumask()
tmigr_cpu_available()
tmc->idle = timer_base_remote_is_idle(cpu);
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
base_global->is_idle = true;
CPU 0 can now become the migrator even when it's idle sleeping forever.
My suggestion is to not rely remotely on is_idle. This can only
be racy. You can trigger tmigr_cpu_available() through smp_call_function_many()
instead.
Thanks.
> if (!tmc->idle)
> __tmigr_cpu_activate(tmc);
> tmc->available = true;
> @@ -1489,6 +1499,21 @@ static int tmigr_cpu_available(unsigned int cpu)
> return 0;
> }
>
> +void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask)
> +{
> + int cpu;
> +
> + lockdep_assert_cpus_held();
> +
> + for_each_cpu_and(cpu, exclude_cpumask, tmigr_available_cpumask)
> + tmigr_cpu_unavailable(cpu);
> +
> + for_each_cpu_andnot(cpu, cpu_online_mask, exclude_cpumask) {
> + if (!cpumask_test_cpu(cpu, tmigr_available_cpumask))
> + tmigr_cpu_available(cpu);
> + }
> +}
> +
> static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
> int node)
> {
> --
> 2.49.0
>
--
Frederic Weisbecker
SUSE Labs
^ permalink raw reply [flat|nested] 14+ messages in thread