From: Pingfan Liu <piliu@redhat.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Baoquan He <bhe@redhat.com>, Pingfan Liu <piliu@redhat.com>,
kexec@lists.infradead.org,
Mahesh Salgaonkar <mahesh@linux.ibm.com>,
Ming Lei <ming.lei@redhat.com>,
Wen Xiong <wenxiong@linux.ibm.com>,
Nicholas Piggin <npiggin@gmail.com>
Subject: [PATCHv8 4/5] powerpc/cpu: Skip impossible cpu during iteration on a core
Date: Mon, 9 Oct 2023 19:30:35 +0800 [thread overview]
Message-ID: <20231009113036.45988-5-piliu@redhat.com> (raw)
In-Reply-To: <20231009113036.45988-1-piliu@redhat.com>
The threads in a core have equal status, so the code introduces a for
loop pattern to execute the same task on each thread:
for (i = first_thread; i < first_thread + threads_per_core; i++)
Now that some threads may not be in the cpu_possible_mask, the iteration
skips those threads by checking the mask. In this way, the unpopulated
pcpu struct can be skipped and left unaccessed.
Signed-off-by: Pingfan Liu <piliu@redhat.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Mahesh Salgaonkar <mahesh@linux.ibm.com>
Cc: Wen Xiong <wenxiong@linux.ibm.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: kexec@lists.infradead.org
To: linuxppc-dev@lists.ozlabs.org
---
arch/powerpc/include/asm/cputhreads.h | 6 +++++
arch/powerpc/kernel/smp.c | 2 +-
arch/powerpc/kvm/book3s_hv.c | 7 ++----
arch/powerpc/platforms/powernv/idle.c | 32 ++++++++++++------------
arch/powerpc/platforms/powernv/subcore.c | 5 +++-
5 files changed, 29 insertions(+), 23 deletions(-)
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index f26c430f3982..fdb71ff7f6a9 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -65,6 +65,12 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+#define for_each_possible_cpu_in_core(start, iter) \
+ for (iter = start; iter < start + threads_per_core; iter++) \
+ if (unlikely(!cpu_possible(iter))) \
+ continue; \
+ else
+
/*
* tlb_thread_siblings are siblings which share a TLB. This is not
* architected, is not something a hypervisor could emulate and a future
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index fbbb695bae3d..2936f7a2240d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -933,7 +933,7 @@ static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct threa
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
- for (i = first_thread; i < first_thread + threads_per_core; i++) {
+ for_each_possible_cpu_in_core(first_thread, i) {
int i_group_start = get_cpu_thread_group_start(i, tg);
if (unlikely(i_group_start == -1)) {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 130bafdb1430..ff4b3f8affba 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -6235,12 +6235,9 @@ static int kvm_init_subcore_bitmap(void)
return -ENOMEM;
- for (j = 0; j < threads_per_core; j++) {
- int cpu = first_cpu + j;
-
- paca_ptrs[cpu]->sibling_subcore_state =
+ for_each_possible_cpu_in_core(first_cpu, j)
+ paca_ptrs[j]->sibling_subcore_state =
sibling_subcore_state;
- }
}
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index ad41dffe4d92..79d81ce5cf4c 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -823,36 +823,36 @@ void pnv_power9_force_smt4_catch(void)
cpu = smp_processor_id();
cpu0 = cpu & ~(threads_per_core - 1);
- for (thr = 0; thr < threads_per_core; ++thr) {
- if (cpu != cpu0 + thr)
- atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
+ for_each_possible_cpu_in_core(cpu0, thr) {
+ if (cpu != thr)
+ atomic_inc(&paca_ptrs[thr]->dont_stop);
}
/* order setting dont_stop vs testing requested_psscr */
smp_mb();
- for (thr = 0; thr < threads_per_core; ++thr) {
- if (!paca_ptrs[cpu0+thr]->requested_psscr)
+ for_each_possible_cpu_in_core(cpu0, thr) {
+ if (!paca_ptrs[thr]->requested_psscr)
++awake_threads;
else
- poke_threads |= (1 << thr);
+ poke_threads |= (1 << (thr - cpu0));
}
/* If at least 3 threads are awake, the core is in SMT4 already */
if (awake_threads < need_awake) {
/* We have to wake some threads; we'll use msgsnd */
- for (thr = 0; thr < threads_per_core; ++thr) {
- if (poke_threads & (1 << thr)) {
+ for_each_possible_cpu_in_core(cpu0, thr) {
+ if (poke_threads & (1 << (thr - cpu0))) {
ppc_msgsnd_sync();
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
- paca_ptrs[cpu0+thr]->hw_cpu_id);
+ paca_ptrs[thr]->hw_cpu_id);
}
}
/* now spin until at least 3 threads are awake */
do {
- for (thr = 0; thr < threads_per_core; ++thr) {
- if ((poke_threads & (1 << thr)) &&
- !paca_ptrs[cpu0+thr]->requested_psscr) {
+ for_each_possible_cpu_in_core(cpu0, thr) {
+ if ((poke_threads & (1 << (thr - cpu0))) &&
+ !paca_ptrs[thr]->requested_psscr) {
++awake_threads;
- poke_threads &= ~(1 << thr);
+ poke_threads &= ~(1 << (thr - cpu0));
}
}
} while (awake_threads < need_awake);
@@ -868,9 +868,9 @@ void pnv_power9_force_smt4_release(void)
cpu0 = cpu & ~(threads_per_core - 1);
/* clear all the dont_stop flags */
- for (thr = 0; thr < threads_per_core; ++thr) {
- if (cpu != cpu0 + thr)
- atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
+ for_each_possible_cpu_in_core(cpu0, thr) {
+ if (cpu != thr)
+ atomic_dec(&paca_ptrs[thr]->dont_stop);
}
}
EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 191424468f10..b229115c8c0f 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -151,9 +151,12 @@ static void wait_for_sync_step(int step)
{
int i, cpu = smp_processor_id();
- for (i = cpu + 1; i < cpu + threads_per_core; i++)
+ for_each_possible_cpu_in_core(cpu, i) {
+ if (i == cpu)
+ continue;
while(per_cpu(split_state, i).step < step)
barrier();
+ }
/* Order the wait loop vs any subsequent loads/stores. */
mb();
--
2.31.1
next prev parent reply other threads:[~2023-10-09 11:35 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-09 11:30 [PATCHv8 0/5] enable nr_cpus for powerpc Pingfan Liu
2023-10-09 11:30 ` [PATCHv8 1/5] powerpc/setup : Enable boot_cpu_hwid for PPC32 Pingfan Liu
2023-10-10 4:44 ` Sourabh Jain
2023-10-10 8:24 ` Sourabh Jain
2023-10-10 9:08 ` Sourabh Jain
2023-10-11 2:30 ` Pingfan Liu
2023-10-11 10:53 ` Sourabh Jain
2023-10-12 13:20 ` Pingfan Liu
2023-10-16 6:43 ` Sourabh Jain
2023-10-17 2:12 ` Pingfan Liu
2023-10-09 11:30 ` [PATCHv8 2/5] powerpc/setup: Loosen the mapping between cpu logical id and its seq in dt Pingfan Liu
2023-10-10 10:37 ` Hari Bathini
2023-10-11 3:11 ` Pingfan Liu
2023-10-09 11:30 ` [PATCHv8 3/5] powerpc/setup: Handle the case when boot_cpuid greater than nr_cpus Pingfan Liu
2023-10-10 8:26 ` Hari Bathini
2023-10-11 3:05 ` Pingfan Liu
2023-10-12 5:32 ` Hari Bathini
2023-10-09 11:30 ` Pingfan Liu [this message]
2023-10-09 11:30 ` [PATCHv8 5/5] powerpc/setup: alloc extra paca_ptrs to hold boot_cpuid Pingfan Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231009113036.45988-5-piliu@redhat.com \
--to=piliu@redhat.com \
--cc=bhe@redhat.com \
--cc=kexec@lists.infradead.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mahesh@linux.ibm.com \
--cc=ming.lei@redhat.com \
--cc=npiggin@gmail.com \
--cc=wenxiong@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).