* [PATCH v2 1/4] powerpc: Use cpumask_next_wrap instead
2026-04-27 4:47 [PATCH v2 0/4] powerpc: A few misc cpumask changes Shrikanth Hegde
@ 2026-04-27 4:47 ` Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 2/4] powerpc: Simplify cpumask api usage for cpuinfo display Shrikanth Hegde
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Shrikanth Hegde @ 2026-04-27 4:47 UTC (permalink / raw)
To: maddy, linuxppc-dev, yury.norov, linux, linux-kernel
Cc: sshegde, chleroy, Yury Norov
cpu = cpumask_next(cpu, mask)
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(mask)
Above block is identical to:
cpu = cpumask_next_wrap(cpu, mask)
Replace it, No change in functionality or performance.
Slightly simpler code.
Reviewed-by: Yury Norov <ynorov@nvidia.com>
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/kernel/irq.c | 5 +----
arch/powerpc/mm/book3s64/hash_utils.c | 4 +---
2 files changed, 2 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a0e8b998c9b5..f69de08ad347 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -370,10 +370,7 @@ int irq_choose_cpu(const struct cpumask *mask)
do_round_robin:
raw_spin_lock_irqsave(&irq_rover_lock, flags);
- irq_rover = cpumask_next(irq_rover, cpu_online_mask);
- if (irq_rover >= nr_cpu_ids)
- irq_rover = cpumask_first(cpu_online_mask);
-
+ irq_rover = cpumask_next_wrap(irq_rover, cpu_online_mask);
cpuid = irq_rover;
raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 9dc5889d6ecb..e4fcf929cb33 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1299,9 +1299,7 @@ static void stress_hpt_timer_fn(struct timer_list *timer)
if (!firmware_has_feature(FW_FEATURE_LPAR))
tlbiel_all();
- next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
- if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first(cpu_online_mask);
+ next_cpu = cpumask_next_wrap(raw_smp_processor_id(), cpu_online_mask);
stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10);
add_timer_on(&stress_hpt_timer, next_cpu);
}
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH v2 2/4] powerpc: Simplify cpumask api usage for cpuinfo display
2026-04-27 4:47 [PATCH v2 0/4] powerpc: A few misc cpumask changes Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 1/4] powerpc: Use cpumask_next_wrap instead Shrikanth Hegde
@ 2026-04-27 4:47 ` Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 3/4] powerpc/perf: Use cpumask_intersects api for checking disable path Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 4/4] powerpc/xive: Add warning if target CPU not found Shrikanth Hegde
3 siblings, 0 replies; 5+ messages in thread
From: Shrikanth Hegde @ 2026-04-27 4:47 UTC (permalink / raw)
To: maddy, linuxppc-dev, yury.norov, linux, linux-kernel
Cc: sshegde, chleroy, Yury Norov
- cpumask_next can take -1 as valid argument. So simplify cpuinfo
iterator.
- Use cpumask_last to find if this_cpu is last online CPU.
/proc/cpuinfo shows same info with patch.
Reviewed-by: Yury Norov <ynorov@nvidia.com>
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/kernel/setup-common.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 8a86b0efcb1c..aecabe9cf139 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -323,7 +323,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_putc(m, '\n');
/* If this is the last cpu, print the summary */
- if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
+ if (cpu_id == cpumask_last(cpu_online_mask))
show_cpuinfo_summary(m);
return 0;
@@ -331,10 +331,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
- if (*pos == 0) /* just in case, cpu 0 is not the first */
- *pos = cpumask_first(cpu_online_mask);
- else
- *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
if ((*pos) < nr_cpu_ids)
return (void *)(unsigned long)(*pos + 1);
return NULL;
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH v2 3/4] powerpc/perf: Use cpumask_intersects api for checking disable path
2026-04-27 4:47 [PATCH v2 0/4] powerpc: A few misc cpumask changes Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 1/4] powerpc: Use cpumask_next_wrap instead Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 2/4] powerpc: Simplify cpumask api usage for cpuinfo display Shrikanth Hegde
@ 2026-04-27 4:47 ` Shrikanth Hegde
2026-04-27 4:47 ` [PATCH v2 4/4] powerpc/xive: Add warning if target CPU not found Shrikanth Hegde
3 siblings, 0 replies; 5+ messages in thread
From: Shrikanth Hegde @ 2026-04-27 4:47 UTC (permalink / raw)
To: maddy, linuxppc-dev, yury.norov, linux, linux-kernel; +Cc: sshegde, chleroy
First online CPU in the node disables the nest counters by
making an OPAL call. Any other CPU in that node, will bail out.
Instead of using a temporary mask to find out if any cpu in the
node is visited or not, it is better to use the cpumask_intersects
api to achieve the same.
Similarly a temporary cpumask is used to check if a core is already part
of core_imc_cpumask. Use the same cpumask_intersects api there.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/perf/imc-pmu.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index c1563b4eaa94..e3822f36c419 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -421,7 +421,6 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
static int ppc_nest_imc_cpu_online(unsigned int cpu)
{
const struct cpumask *l_cpumask;
- static struct cpumask tmp_mask;
int res;
/* Get the cpumask of this node */
@@ -431,7 +430,7 @@ static int ppc_nest_imc_cpu_online(unsigned int cpu)
* If this is not the first online CPU on this node, then
* just return.
*/
- if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask))
+ if (cpumask_intersects(l_cpumask, &nest_imc_cpumask))
return 0;
/*
@@ -647,14 +646,13 @@ static bool is_core_imc_mem_inited(int cpu)
static int ppc_core_imc_cpu_online(unsigned int cpu)
{
const struct cpumask *l_cpumask;
- static struct cpumask tmp_mask;
int ret = 0;
/* Get the cpumask for this core */
l_cpumask = cpu_sibling_mask(cpu);
/* If a cpu for this core is already set, then, don't do anything */
- if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))
+ if (cpumask_intersects(l_cpumask, &core_imc_cpumask))
return 0;
if (!is_core_imc_mem_inited(cpu)) {
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH v2 4/4] powerpc/xive: Add warning if target CPU not found
2026-04-27 4:47 [PATCH v2 0/4] powerpc: A few misc cpumask changes Shrikanth Hegde
` (2 preceding siblings ...)
2026-04-27 4:47 ` [PATCH v2 3/4] powerpc/perf: Use cpumask_intersects api for checking disable path Shrikanth Hegde
@ 2026-04-27 4:47 ` Shrikanth Hegde
3 siblings, 0 replies; 5+ messages in thread
From: Shrikanth Hegde @ 2026-04-27 4:47 UTC (permalink / raw)
To: maddy, linuxppc-dev, yury.norov, linux, linux-kernel; +Cc: sshegde, chleroy
Add a warn_once to warn if the CPU target is not found. This could help
to find about any such usecase.
This is a very rare case, which either means mask was empty or
atomic update failed for all online CPUs. So it is worth printing that
path for potential fix.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/sysdev/xive/common.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index c120be73d149..dadd1f46ec93 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -564,6 +564,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
return cpu;
}
+ WARN_ONCE(1, "target CPU not found in mask: %*pbl\n", cpumask_pr_args(mask));
return -1;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread