From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, David Woodhouse <dwmw@infradead.org>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Brian Gerst <brgerst@gmail.com>,
"Arjan van de Veen" <arjan@linux.intel.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Paul McKenney <paulmck@kernel.org>,
Tom Lendacky <thomas.lendacky@amd.com>,
Sean Christopherson <seanjc@google.com>,
Oleksandr Natalenko <oleksandr@natalenko.name>,
Paul Menzel <pmenzel@molgen.mpg.de>,
"Guilherme G. Piccoli" <gpiccoli@igalia.com>,
Piotr Gorski <lucjan.lucjanov@gmail.com>,
David Woodhouse <dwmw@amazon.co.uk>,
Usama Arif <usama.arif@bytedance.com>,
Juergen Gross <jgross@suse.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
xen-devel@lists.xenproject.org,
Russell King <linux@armlinux.org.uk>,
Arnd Bergmann <arnd@arndb.de>,
linux-arm-kernel@lists.infradead.org,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
linux-csky@vger.kernel.org,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
linux-mips@vger.kernel.org,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Helge Deller <deller@gmx.de>,
linux-parisc@vger.kernel.org,
Paul Walmsley <paul.walmsley@sifive.com>,
Palmer Dabbelt <palmer@dabbelt.com>,
linux-riscv@lists.infradead.org,
Mark Rutland <mark.rutland@arm.com>,
Sabin Rapan <sabrapan@amazon.com>
Subject: [patch 13/37] x86/smpboot: Remove cpu_callin_mask
Date: Sat, 15 Apr 2023 01:44:34 +0200 (CEST) [thread overview]
Message-ID: <20230414232310.010585365@linutronix.de> (raw)
In-Reply-To: 20230414225551.858160935@linutronix.de
Now that TSC synchronization is SMP function call based there is no reason
to wait for the AP to be set in smp_callin_mask. The control CPU waits for
the AP to set itself in the online mask anyway.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/smpboot.c | 61 +++++++---------------------------------------
1 file changed, 10 insertions(+), 51 deletions(-)
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -104,7 +104,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
/* All of these masks are initialized in setup_cpu_local_masks() */
static cpumask_var_t cpu_initialized_mask;
static cpumask_var_t cpu_callout_mask;
-static cpumask_var_t cpu_callin_mask;
/* Representing CPUs for which sibling maps can be computed */
static cpumask_var_t cpu_sibling_setup_mask;
@@ -167,21 +166,16 @@ static inline void smpboot_restore_warm_
*/
static void smp_callin(void)
{
- int cpuid;
+ int cpuid = smp_processor_id();
/*
* If waken up by an INIT in an 82489DX configuration
- * cpu_callout_mask guarantees we don't get here before
- * an INIT_deassert IPI reaches our local APIC, so it is
- * now safe to touch our local APIC.
- */
- cpuid = smp_processor_id();
-
- /*
- * the boot CPU has finished the init stage and is spinning
- * on callin_map until we finish. We are free to set up this
- * CPU, first the APIC. (this is probably redundant on most
- * boards)
+ * cpu_callout_mask guarantees we don't get here before an
+ * INIT_deassert IPI reaches our local APIC, so it is now safe to
+ * touch our local APIC.
+ *
+ * Set up this CPU, first the APIC, which is probably redundant on
+ * most boards.
*/
apic_ap_setup();
@@ -192,7 +186,7 @@ static void smp_callin(void)
* The topology information must be up to date before
* calibrate_delay() and notify_cpu_starting().
*/
- set_cpu_sibling_map(raw_smp_processor_id());
+ set_cpu_sibling_map(cpuid);
ap_init_aperfmperf();
@@ -205,11 +199,6 @@ static void smp_callin(void)
* state (CPUHP_ONLINE in the case of serial bringup).
*/
notify_cpu_starting(cpuid);
-
- /*
- * Allow the master to continue.
- */
- cpumask_set_cpu(cpuid, cpu_callin_mask);
}
static void ap_calibrate_delay(void)
@@ -268,11 +257,6 @@ static void notrace start_secondary(void
rcu_cpu_starting(raw_smp_processor_id());
x86_cpuinit.early_percpu_clock_init();
- /*
- * Sync point with wait_cpu_callin(). The AP doesn't wait here
- * but just sets the bit to let the controlling CPU (BSP) know that
- * it's got this far.
- */
smp_callin();
/* Otherwise gcc will move up smp_processor_id() before cpu_init() */
@@ -1112,7 +1096,7 @@ static int wait_cpu_cpumask(unsigned int
* and thus wait_for_master_cpu(), then set cpu_callout_mask to allow it
* to proceed. The AP will then proceed past setting its 'callin' bit
* and end up waiting in check_tsc_sync_target() until we reach
- * do_wait_cpu_online() to tend to it.
+ * wait_cpu_online() to tend to it.
*/
static int wait_cpu_initialized(unsigned int cpu)
{
@@ -1127,20 +1111,7 @@ static int wait_cpu_initialized(unsigned
}
/*
- * Bringup step three: Wait for the target AP to reach smp_callin().
- * The AP is not waiting for us here so we don't need to parallelise
- * this step. Not entirely clear why we care about this, since we just
- * proceed directly to TSC synchronization which is the next sync
- * point with the AP anyway.
- */
-static void wait_cpu_callin(unsigned int cpu)
-{
- while (!cpumask_test_cpu(cpu, cpu_callin_mask))
- schedule();
-}
-
-/*
- * Bringup step four: Wait for the target AP to reach set_cpu_online() in
+ * Bringup step three: Wait for the target AP to reach set_cpu_online() in
* start_secondary().
*/
static void wait_cpu_online(unsigned int cpu)
@@ -1170,14 +1141,6 @@ static int native_kick_ap(unsigned int c
}
/*
- * Already booted CPU?
- */
- if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
- pr_debug("do_boot_cpu %d Already started\n", cpu);
- return -ENOSYS;
- }
-
- /*
* Save current MTRR state in case it was changed since early boot
* (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
*/
@@ -1214,7 +1177,6 @@ int native_cpu_up(unsigned int cpu, stru
if (ret)
goto out;
- wait_cpu_callin(cpu);
wait_cpu_online(cpu);
out:
@@ -1330,7 +1292,6 @@ void __init smp_prepare_cpus_common(void
* Setup boot CPU information
*/
smp_store_boot_cpu_info(); /* Final full version of the data */
- cpumask_copy(cpu_callin_mask, cpumask_of(0));
mb();
for_each_possible_cpu(i) {
@@ -1545,7 +1506,6 @@ early_param("possible_cpus", _setup_poss
void __init setup_cpu_local_masks(void)
{
alloc_bootmem_cpumask_var(&cpu_initialized_mask);
- alloc_bootmem_cpumask_var(&cpu_callin_mask);
alloc_bootmem_cpumask_var(&cpu_callout_mask);
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}
@@ -1609,7 +1569,6 @@ static void remove_cpu_from_maps(int cpu
{
set_cpu_online(cpu, false);
cpumask_clear_cpu(cpu, cpu_callout_mask);
- cpumask_clear_cpu(cpu, cpu_callin_mask);
/* was set by cpu_init() */
cpumask_clear_cpu(cpu, cpu_initialized_mask);
numa_remove_cpu(cpu);
next prev parent reply other threads:[~2023-04-14 23:45 UTC|newest]
Thread overview: 78+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-14 23:44 [patch 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Thomas Gleixner
2023-04-14 23:44 ` [patch 01/37] x86/smpboot: Cleanup topology_phys_to_logical_pkg()/die() Thomas Gleixner
2023-04-14 23:44 ` [patch 02/37] cpu/hotplug: Mark arch_disable_smp_support() and bringup_nonboot_cpus() __init Thomas Gleixner
2023-04-14 23:44 ` [patch 03/37] x86/smpboot: Avoid pointless delay calibration is TSC is synchronized Thomas Gleixner
2023-04-14 23:44 ` [patch 04/37] x86/smpboot: Rename start_cpu0() to soft_restart_cpu() Thomas Gleixner
2023-04-14 23:44 ` [patch 05/37] x86/topology: Remove CPU0 hotplug option Thomas Gleixner
2023-06-21 16:50 ` Paul E. McKenney
2023-04-14 23:44 ` [patch 06/37] x86/smpboot: Remove the CPU0 hotplug kludge Thomas Gleixner
2023-04-14 23:44 ` [patch 07/37] x86/smpboot: Restrict soft_restart_cpu() to SEV Thomas Gleixner
2023-04-14 23:44 ` [patch 08/37] x86/smpboot: Split up native_cpu_up() into separate phases and document them Thomas Gleixner
2023-04-14 23:44 ` [patch 09/37] x86/smpboot: Get rid of cpu_init_secondary() Thomas Gleixner
2023-04-14 23:44 ` [patch 10/37] x86/cpu/cacheinfo: Remove cpu_callout_mask dependency Thomas Gleixner
2023-04-14 23:44 ` [patch 11/37] x86/smpboot: Move synchronization masks to SMP boot code Thomas Gleixner
2023-04-14 23:44 ` [patch 12/37] x86/smpboot: Make TSC synchronization function call based Thomas Gleixner
2023-04-14 23:44 ` Thomas Gleixner [this message]
2023-04-14 23:44 ` [patch 14/37] cpu/hotplug: Rework sparse_irq locking in bringup_cpu() Thomas Gleixner
2023-04-14 23:44 ` [patch 15/37] x86/smpboot: Remove wait for cpu_online() Thomas Gleixner
2023-04-14 23:44 ` [patch 16/37] x86/xen/smp_pv: Remove wait for CPU online Thomas Gleixner
2023-04-17 20:46 ` Boris Ostrovsky
2023-04-14 23:44 ` [patch 17/37] x86/xen/hvm: Get rid of DEAD_FROZEN handling Thomas Gleixner
2023-04-14 23:44 ` [patch 18/37] cpu/hotplug: Add CPU state tracking and synchronization Thomas Gleixner
2023-04-14 23:44 ` [patch 19/37] x86/smpboot: Switch to hotplug core state synchronization Thomas Gleixner
2023-04-15 12:58 ` Brian Gerst
2023-04-15 21:04 ` Thomas Gleixner
2023-04-14 23:44 ` [patch 20/37] cpu/hotplug: Remove cpu_report_state() and related unused cruft Thomas Gleixner
2023-04-14 23:44 ` [patch 21/37] ARM: smp: Switch to hotplug core state synchronization Thomas Gleixner
2023-04-14 23:44 ` [patch 22/37] arm64: " Thomas Gleixner
2023-04-17 15:50 ` Mark Rutland
2023-04-25 19:51 ` Thomas Gleixner
2023-04-26 7:59 ` Mark Rutland
2023-04-26 8:15 ` Thomas Gleixner
2023-04-14 23:44 ` [patch 23/37] csky/smp: " Thomas Gleixner
2023-04-14 23:44 ` [patch 24/37] MIPS: SMP_CPS: " Thomas Gleixner
2023-04-14 23:44 ` [patch 25/37] parisc: " Thomas Gleixner
2023-04-14 23:44 ` [patch 26/37] riscv: " Thomas Gleixner
2023-05-01 23:55 ` Palmer Dabbelt
2023-04-14 23:44 ` [patch 27/37] cpu/hotplug: Remove unused state functions Thomas Gleixner
2023-04-14 23:44 ` [patch 28/37] cpu/hotplug: Reset task stack state in _cpu_up() Thomas Gleixner
2023-04-14 23:45 ` [patch 29/37] cpu/hotplug: Provide a split up CPUHP_BRINGUP mechanism Thomas Gleixner
2023-04-14 23:45 ` [patch 30/37] x86/smpboot: Enable split CPU startup Thomas Gleixner
2023-04-14 23:45 ` [patch 31/37] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner
2023-04-14 23:45 ` [patch 32/37] cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE Thomas Gleixner
2023-04-14 23:45 ` [patch 33/37] x86/topology: Store extended topology leaf information Thomas Gleixner
2023-04-14 23:45 ` [patch 34/37] x86/cpu/amd; Invoke detect_extended_topology_early() on boot CPU Thomas Gleixner
2023-04-14 23:45 ` [patch 35/37] x86/smpboot: Support parallel startup of secondary CPUs Thomas Gleixner
2023-04-15 13:22 ` Brian Gerst
2023-04-15 21:06 ` Thomas Gleixner
2023-04-24 17:58 ` Thomas Gleixner
2023-04-14 23:45 ` [patch 36/37] x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it Thomas Gleixner
2023-04-14 23:45 ` [patch 37/37] x86/smpboot: Allow parallel bringup for SEV-ES Thomas Gleixner
2023-04-17 8:35 ` [patch 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Juergen Gross
2023-04-17 10:30 ` Peter Zijlstra
2023-04-17 10:44 ` Andrew Cooper
2023-04-17 11:19 ` Paul Menzel
2023-04-17 11:24 ` Paul Menzel
2023-04-17 14:48 ` Thomas Gleixner
2023-04-17 17:40 ` Paul Menzel
2023-04-18 6:58 ` Thomas Gleixner
2023-04-18 8:40 ` Thomas Gleixner
2023-04-18 20:10 ` Paul Menzel
2023-04-19 9:38 ` Thomas Gleixner
2023-04-19 12:38 ` Thomas Gleixner
2023-04-19 13:32 ` David Woodhouse
2023-04-19 13:43 ` Thomas Gleixner
2023-04-19 13:50 ` Andrew Cooper
2023-04-19 16:21 ` Andrew Cooper
2023-04-20 8:32 ` Thomas Gleixner
2023-04-20 9:23 ` Andrew Cooper
2023-04-20 11:17 ` Thomas Gleixner
2023-04-20 14:51 ` Sean Christopherson
2023-04-20 15:57 ` Thomas Gleixner
2023-04-20 16:47 ` Paul Menzel
2023-04-20 19:10 ` Thomas Gleixner
2023-04-21 16:36 ` Thomas Gleixner
2023-04-24 18:46 ` Paul Menzel
2023-04-25 20:07 ` Thomas Gleixner
2023-04-27 14:48 ` Michael Kelley (LINUX)
2023-05-04 18:46 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230414232310.010585365@linutronix.de \
--to=tglx@linutronix.de \
--cc=James.Bottomley@HansenPartnership.com \
--cc=andrew.cooper3@citrix.com \
--cc=arjan@linux.intel.com \
--cc=arnd@arndb.de \
--cc=boris.ostrovsky@oracle.com \
--cc=brgerst@gmail.com \
--cc=catalin.marinas@arm.com \
--cc=deller@gmx.de \
--cc=dwmw@amazon.co.uk \
--cc=dwmw@infradead.org \
--cc=gpiccoli@igalia.com \
--cc=guoren@kernel.org \
--cc=jgross@suse.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-csky@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-parisc@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux@armlinux.org.uk \
--cc=lucjan.lucjanov@gmail.com \
--cc=mark.rutland@arm.com \
--cc=oleksandr@natalenko.name \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=paulmck@kernel.org \
--cc=pbonzini@redhat.com \
--cc=pmenzel@molgen.mpg.de \
--cc=sabrapan@amazon.com \
--cc=seanjc@google.com \
--cc=thomas.lendacky@amd.com \
--cc=tsbogend@alpha.franken.de \
--cc=usama.arif@bytedance.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).