linux-csky.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, David Woodhouse <dwmw@infradead.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Brian Gerst <brgerst@gmail.com>,
	"Arjan van de Veen" <arjan@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Paul McKenney <paulmck@kernel.org>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Sean Christopherson <seanjc@google.com>,
	Oleksandr Natalenko <oleksandr@natalenko.name>,
	Paul Menzel <pmenzel@molgen.mpg.de>,
	"Guilherme G. Piccoli" <gpiccoli@igalia.com>,
	Piotr Gorski <lucjan.lucjanov@gmail.com>,
	David Woodhouse <dwmw@amazon.co.uk>,
	Usama Arif <usama.arif@bytedance.com>,
	Juergen Gross <jgross@suse.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	xen-devel@lists.xenproject.org,
	Russell King <linux@armlinux.org.uk>,
	Arnd Bergmann <arnd@arndb.de>,
	linux-arm-kernel@lists.infradead.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-mips@vger.kernel.org,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Helge Deller <deller@gmx.de>,
	linux-parisc@vger.kernel.org,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	linux-riscv@lists.infradead.org,
	Mark Rutland <mark.rutland@arm.com>,
	Sabin Rapan <sabrapan@amazon.com>
Subject: [patch 12/37] x86/smpboot: Make TSC synchronization function call based
Date: Sat, 15 Apr 2023 01:44:32 +0200 (CEST)	[thread overview]
Message-ID: <20230414232309.948211096@linutronix.de> (raw)
In-Reply-To: 20230414225551.858160935@linutronix.de

Spin-waiting on the control CPU until the AP reaches the TSC
synchronization is just a waste especially in the case that there is no
synchronization required.

As the synchronization has to run with interrupts disabled the control CPU
part can just be done from a SMP function call. The upcoming AP issues that
call async only in the case that synchronization is required.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/tsc.h |    2 --
 arch/x86/kernel/smpboot.c  |   20 +++-----------------
 arch/x86/kernel/tsc_sync.c |   36 +++++++++++-------------------------
 3 files changed, 14 insertions(+), 44 deletions(-)

--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -55,12 +55,10 @@ extern bool tsc_async_resets;
 #ifdef CONFIG_X86_TSC
 extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
 extern void tsc_verify_tsc_adjust(bool resume);
-extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
 static inline void tsc_verify_tsc_adjust(bool resume) { }
-static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
 #endif
 
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -278,11 +278,7 @@ static void notrace start_secondary(void
 	/* Otherwise gcc will move up smp_processor_id() before cpu_init() */
 	barrier();
 
-	/*
-	 * Check TSC synchronization with the control CPU, which will do
-	 * its part of this from wait_cpu_online(), making it an implicit
-	 * synchronization point.
-	 */
+	/* Check TSC synchronization with the control CPU. */
 	check_tsc_sync_target();
 
 	/*
@@ -1144,21 +1140,11 @@ static void wait_cpu_callin(unsigned int
 }
 
 /*
- * Bringup step four: Synchronize the TSC and wait for the target AP
- * to reach set_cpu_online() in start_secondary().
+ * Bringup step four: Wait for the target AP to reach set_cpu_online() in
+ * start_secondary().
  */
 static void wait_cpu_online(unsigned int cpu)
 {
-	unsigned long flags;
-
-	/*
-	 * Check TSC synchronization with the AP (keep irqs disabled
-	 * while doing so):
-	 */
-	local_irq_save(flags);
-	check_tsc_sync_source(cpu);
-	local_irq_restore(flags);
-
 	/*
 	 * Wait for the AP to mark itself online, so the core caller
 	 * can drop sparse_irq_lock.
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -245,7 +245,6 @@ bool tsc_store_and_check_tsc_adjust(bool
  */
 static atomic_t start_count;
 static atomic_t stop_count;
-static atomic_t skip_test;
 static atomic_t test_runs;
 
 /*
@@ -344,21 +343,14 @@ static inline unsigned int loop_timeout(
 }
 
 /*
- * Source CPU calls into this - it waits for the freshly booted
- * target CPU to arrive and then starts the measurement:
+ * The freshly booted CPU initiates this via an async SMP function call.
  */
-void check_tsc_sync_source(int cpu)
+static void check_tsc_sync_source(void *__cpu)
 {
+	unsigned int cpu = (unsigned long)__cpu;
 	int cpus = 2;
 
 	/*
-	 * No need to check if we already know that the TSC is not
-	 * synchronized or if we have no TSC.
-	 */
-	if (unsynchronized_tsc())
-		return;
-
-	/*
 	 * Set the maximum number of test runs to
 	 *  1 if the CPU does not provide the TSC_ADJUST MSR
 	 *  3 if the MSR is available, so the target can try to adjust
@@ -368,16 +360,9 @@ void check_tsc_sync_source(int cpu)
 	else
 		atomic_set(&test_runs, 3);
 retry:
-	/*
-	 * Wait for the target to start or to skip the test:
-	 */
-	while (atomic_read(&start_count) != cpus - 1) {
-		if (atomic_read(&skip_test) > 0) {
-			atomic_set(&skip_test, 0);
-			return;
-		}
+	/* Wait for the target to start. */
+	while (atomic_read(&start_count) != cpus - 1)
 		cpu_relax();
-	}
 
 	/*
 	 * Trigger the target to continue into the measurement too:
@@ -397,14 +382,14 @@ void check_tsc_sync_source(int cpu)
 	if (!nr_warps) {
 		atomic_set(&test_runs, 0);
 
-		pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+		pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n",
 			smp_processor_id(), cpu);
 
 	} else if (atomic_dec_and_test(&test_runs) || random_warps) {
 		/* Force it to 0 if random warps brought us here */
 		atomic_set(&test_runs, 0);
 
-		pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+		pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n",
 			smp_processor_id(), cpu);
 		pr_warn("Measured %Ld cycles TSC warp between CPUs, "
 			"turning off TSC clock.\n", max_warp);
@@ -457,11 +442,12 @@ void check_tsc_sync_target(void)
 	 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
 	 * register might have been wreckaged by the BIOS..
 	 */
-	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
-		atomic_inc(&skip_test);
+	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable)
 		return;
-	}
 
+	/* Kick the control CPU into the TSC synchronization function */
+	smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source,
+				 (unsigned long *)(unsigned long)cpu, 0);
 retry:
 	/*
 	 * Register this CPU's participation and wait for the


  parent reply	other threads:[~2023-04-14 23:45 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-14 23:44 [patch 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Thomas Gleixner
2023-04-14 23:44 ` [patch 01/37] x86/smpboot: Cleanup topology_phys_to_logical_pkg()/die() Thomas Gleixner
2023-04-14 23:44 ` [patch 02/37] cpu/hotplug: Mark arch_disable_smp_support() and bringup_nonboot_cpus() __init Thomas Gleixner
2023-04-14 23:44 ` [patch 03/37] x86/smpboot: Avoid pointless delay calibration is TSC is synchronized Thomas Gleixner
2023-04-14 23:44 ` [patch 04/37] x86/smpboot: Rename start_cpu0() to soft_restart_cpu() Thomas Gleixner
2023-04-14 23:44 ` [patch 05/37] x86/topology: Remove CPU0 hotplug option Thomas Gleixner
2023-06-21 16:50   ` Paul E. McKenney
2023-04-14 23:44 ` [patch 06/37] x86/smpboot: Remove the CPU0 hotplug kludge Thomas Gleixner
2023-04-14 23:44 ` [patch 07/37] x86/smpboot: Restrict soft_restart_cpu() to SEV Thomas Gleixner
2023-04-14 23:44 ` [patch 08/37] x86/smpboot: Split up native_cpu_up() into separate phases and document them Thomas Gleixner
2023-04-14 23:44 ` [patch 09/37] x86/smpboot: Get rid of cpu_init_secondary() Thomas Gleixner
2023-04-14 23:44 ` [patch 10/37] x86/cpu/cacheinfo: Remove cpu_callout_mask dependency Thomas Gleixner
2023-04-14 23:44 ` [patch 11/37] x86/smpboot: Move synchronization masks to SMP boot code Thomas Gleixner
2023-04-14 23:44 ` Thomas Gleixner [this message]
2023-04-14 23:44 ` [patch 13/37] x86/smpboot: Remove cpu_callin_mask Thomas Gleixner
2023-04-14 23:44 ` [patch 14/37] cpu/hotplug: Rework sparse_irq locking in bringup_cpu() Thomas Gleixner
2023-04-14 23:44 ` [patch 15/37] x86/smpboot: Remove wait for cpu_online() Thomas Gleixner
2023-04-14 23:44 ` [patch 16/37] x86/xen/smp_pv: Remove wait for CPU online Thomas Gleixner
2023-04-17 20:46   ` Boris Ostrovsky
2023-04-14 23:44 ` [patch 17/37] x86/xen/hvm: Get rid of DEAD_FROZEN handling Thomas Gleixner
2023-04-14 23:44 ` [patch 18/37] cpu/hotplug: Add CPU state tracking and synchronization Thomas Gleixner
2023-04-14 23:44 ` [patch 19/37] x86/smpboot: Switch to hotplug core state synchronization Thomas Gleixner
2023-04-15 12:58   ` Brian Gerst
2023-04-15 21:04     ` Thomas Gleixner
2023-04-14 23:44 ` [patch 20/37] cpu/hotplug: Remove cpu_report_state() and related unused cruft Thomas Gleixner
2023-04-14 23:44 ` [patch 21/37] ARM: smp: Switch to hotplug core state synchronization Thomas Gleixner
2023-04-14 23:44 ` [patch 22/37] arm64: " Thomas Gleixner
2023-04-17 15:50   ` Mark Rutland
2023-04-25 19:51     ` Thomas Gleixner
2023-04-26  7:59       ` Mark Rutland
2023-04-26  8:15         ` Thomas Gleixner
2023-04-14 23:44 ` [patch 23/37] csky/smp: " Thomas Gleixner
2023-04-14 23:44 ` [patch 24/37] MIPS: SMP_CPS: " Thomas Gleixner
2023-04-14 23:44 ` [patch 25/37] parisc: " Thomas Gleixner
2023-04-14 23:44 ` [patch 26/37] riscv: " Thomas Gleixner
2023-05-01 23:55   ` Palmer Dabbelt
2023-04-14 23:44 ` [patch 27/37] cpu/hotplug: Remove unused state functions Thomas Gleixner
2023-04-14 23:44 ` [patch 28/37] cpu/hotplug: Reset task stack state in _cpu_up() Thomas Gleixner
2023-04-14 23:45 ` [patch 29/37] cpu/hotplug: Provide a split up CPUHP_BRINGUP mechanism Thomas Gleixner
2023-04-14 23:45 ` [patch 30/37] x86/smpboot: Enable split CPU startup Thomas Gleixner
2023-04-14 23:45 ` [patch 31/37] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner
2023-04-14 23:45 ` [patch 32/37] cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE Thomas Gleixner
2023-04-14 23:45 ` [patch 33/37] x86/topology: Store extended topology leaf information Thomas Gleixner
2023-04-14 23:45 ` [patch 34/37] x86/cpu/amd; Invoke detect_extended_topology_early() on boot CPU Thomas Gleixner
2023-04-14 23:45 ` [patch 35/37] x86/smpboot: Support parallel startup of secondary CPUs Thomas Gleixner
2023-04-15 13:22   ` Brian Gerst
2023-04-15 21:06     ` Thomas Gleixner
2023-04-24 17:58       ` Thomas Gleixner
2023-04-14 23:45 ` [patch 36/37] x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it Thomas Gleixner
2023-04-14 23:45 ` [patch 37/37] x86/smpboot: Allow parallel bringup for SEV-ES Thomas Gleixner
2023-04-17  8:35 ` [patch 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Juergen Gross
2023-04-17 10:30 ` Peter Zijlstra
2023-04-17 10:44   ` Andrew Cooper
2023-04-17 11:19 ` Paul Menzel
2023-04-17 11:24   ` Paul Menzel
2023-04-17 14:48   ` Thomas Gleixner
2023-04-17 17:40     ` Paul Menzel
2023-04-18  6:58       ` Thomas Gleixner
2023-04-18  8:40         ` Thomas Gleixner
2023-04-18 20:10           ` Paul Menzel
2023-04-19  9:38             ` Thomas Gleixner
2023-04-19 12:38               ` Thomas Gleixner
2023-04-19 13:32                 ` David Woodhouse
2023-04-19 13:43                 ` Thomas Gleixner
2023-04-19 13:50                   ` Andrew Cooper
2023-04-19 16:21                     ` Andrew Cooper
2023-04-20  8:32                       ` Thomas Gleixner
2023-04-20  9:23                         ` Andrew Cooper
2023-04-20 11:17                           ` Thomas Gleixner
2023-04-20 14:51                             ` Sean Christopherson
2023-04-20 15:57                               ` Thomas Gleixner
2023-04-20 16:47                                 ` Paul Menzel
2023-04-20 19:10                                   ` Thomas Gleixner
2023-04-21 16:36                                     ` Thomas Gleixner
2023-04-24 18:46                                     ` Paul Menzel
2023-04-25 20:07                                 ` Thomas Gleixner
2023-04-27 14:48 ` Michael Kelley (LINUX)
2023-05-04 18:46   ` Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230414232309.948211096@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=arjan@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=boris.ostrovsky@oracle.com \
    --cc=brgerst@gmail.com \
    --cc=catalin.marinas@arm.com \
    --cc=deller@gmx.de \
    --cc=dwmw@amazon.co.uk \
    --cc=dwmw@infradead.org \
    --cc=gpiccoli@igalia.com \
    --cc=guoren@kernel.org \
    --cc=jgross@suse.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=lucjan.lucjanov@gmail.com \
    --cc=mark.rutland@arm.com \
    --cc=oleksandr@natalenko.name \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulmck@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pmenzel@molgen.mpg.de \
    --cc=sabrapan@amazon.com \
    --cc=seanjc@google.com \
    --cc=thomas.lendacky@amd.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=usama.arif@bytedance.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).