From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, David Woodhouse <dwmw@infradead.org>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Brian Gerst <brgerst@gmail.com>,
"Arjan van de Veen" <arjan@linux.intel.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Paul McKenney <paulmck@kernel.org>,
Tom Lendacky <thomas.lendacky@amd.com>,
Sean Christopherson <seanjc@google.com>,
Oleksandr Natalenko <oleksandr@natalenko.name>,
Paul Menzel <pmenzel@molgen.mpg.de>,
"Guilherme G. Piccoli" <gpiccoli@igalia.com>,
Piotr Gorski <lucjan.lucjanov@gmail.com>,
Juergen Gross <jgross@suse.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
xen-devel@lists.xenproject.org,
David Woodhouse <dwmw@amazon.co.uk>,
Usama Arif <usama.arif@bytedance.com>,
Russell King <linux@armlinux.org.uk>,
Arnd Bergmann <arnd@arndb.de>,
linux-arm-kernel@lists.infradead.org,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
linux-csky@vger.kernel.org,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
linux-mips@vger.kernel.org,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Helge Deller <deller@gmx.de>,
linux-parisc@vger.kernel.org,
Paul Walmsley <paul.walmsley@sifive.com>,
Palmer Dabbelt <palmer@dabbelt.com>,
linux-riscv@lists.infradead.org,
Mark Rutland <mark.rutland@arm.com>,
Sabin Rapan <sabrapan@amazon.com>
Subject: [patch 19/37] x86/smpboot: Switch to hotplug core state synchronization
Date: Sat, 15 Apr 2023 01:44:44 +0200 (CEST) [thread overview]
Message-ID: <20230414232310.382005483@linutronix.de> (raw)
In-Reply-To: 20230414225551.858160935@linutronix.de
The new AP state tracking and synchronization mechanism in the CPU hotplug
core code allows to remove quite some x86 specific code:
1) The AP alive synchronization based on cpumasks
2) The decision whether an AP can be brought up again
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Juergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: xen-devel@lists.xenproject.org
---
arch/x86/Kconfig | 1
arch/x86/include/asm/smp.h | 7 +
arch/x86/kernel/smp.c | 1
arch/x86/kernel/smpboot.c | 159 ++++++++++-----------------------------------
arch/x86/xen/smp_hvm.c | 16 +---
arch/x86/xen/smp_pv.c | 39 ++++++-----
6 files changed, 72 insertions(+), 151 deletions(-)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -272,6 +272,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select HAVE_GENERIC_VDSO
+ select HOTPLUG_CORE_SYNC_FULL if SMP
select HOTPLUG_SMT if SMP
select IRQ_FORCED_THREADING
select NEED_PER_CPU_EMBED_FIRST_CHUNK
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -38,6 +38,8 @@ struct smp_ops {
void (*crash_stop_other_cpus)(void);
void (*smp_send_reschedule)(int cpu);
+ void (*cleanup_dead_cpu)(unsigned cpu);
+ void (*poll_sync_state)(void);
int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
@@ -90,7 +92,8 @@ static inline int __cpu_disable(void)
static inline void __cpu_die(unsigned int cpu)
{
- smp_ops.cpu_die(cpu);
+ if (smp_ops.cpu_die)
+ smp_ops.cpu_die(cpu);
}
static inline void play_dead(void)
@@ -122,8 +125,6 @@ void native_smp_cpus_done(unsigned int m
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
-int common_cpu_die(unsigned int cpu);
-void native_cpu_die(unsigned int cpu);
void hlt_play_dead(void);
void native_play_dead(void);
void play_dead_common(void);
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -269,7 +269,6 @@ struct smp_ops smp_ops = {
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
- .cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,6 +57,7 @@
#include <linux/pgtable.h>
#include <linux/overflow.h>
#include <linux/stackprotector.h>
+#include <linux/cpuhotplug.h>
#include <asm/acpi.h>
#include <asm/cacheinfo.h>
@@ -101,9 +102,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_die_map);
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
-/* All of these masks are initialized in setup_cpu_local_masks() */
-static cpumask_var_t cpu_initialized_mask;
-static cpumask_var_t cpu_callout_mask;
/* Representing CPUs for which sibling maps can be computed */
static cpumask_var_t cpu_sibling_setup_mask;
@@ -169,8 +167,8 @@ static void smp_callin(void)
int cpuid = smp_processor_id();
/*
- * If waken up by an INIT in an 82489DX configuration
- * cpu_callout_mask guarantees we don't get here before an
+ * If waken up by an INIT in an 82489DX configuration the alive
+ * synchronization guarantees we don't get here before an
* INIT_deassert IPI reaches our local APIC, so it is now safe to
* touch our local APIC.
*
@@ -216,17 +214,6 @@ static void ap_calibrate_delay(void)
cpu_data(smp_processor_id()).loops_per_jiffy = loops_per_jiffy;
}
-static void wait_for_master_cpu(int cpu)
-{
- /*
- * Wait for release by control CPU before continuing with AP
- * initialization.
- */
- WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
- while (!cpumask_test_cpu(cpu, cpu_callout_mask))
- cpu_relax();
-}
-
/*
* Activate a secondary processor.
*/
@@ -247,11 +234,10 @@ static void notrace start_secondary(void
cpu_init_exception_handling();
/*
- * Sync point with wait_cpu_initialized(). Sets AP in
- * cpu_initialized_mask and then waits for the control CPU
- * to release it.
+ * Sync point with the hotplug core. Sets the sync state to ALIVE
+ * and waits for the control CPU to release it.
*/
- wait_for_master_cpu(raw_smp_processor_id());
+ cpuhp_ap_sync_alive();
cpu_init();
rcu_cpu_starting(raw_smp_processor_id());
@@ -285,7 +271,6 @@ static void notrace start_secondary(void
set_cpu_online(smp_processor_id(), true);
lapic_online();
unlock_vector_lock();
- cpu_set_state_online(smp_processor_id());
x86_platform.nmi_init();
/* enable local interrupts */
@@ -736,9 +721,10 @@ static void impress_friends(void)
* Allow the user to impress friends.
*/
pr_debug("Before bogomips\n");
- for_each_possible_cpu(cpu)
- if (cpumask_test_cpu(cpu, cpu_callout_mask))
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask))
bogosum += cpu_data(cpu).loops_per_jiffy;
+ }
pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
num_online_cpus(),
bogosum/(500000/HZ),
@@ -1010,6 +996,7 @@ int common_cpu_up(unsigned int cpu, stru
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
{
unsigned long start_ip = real_mode_header->trampoline_start;
+ int ret;
#ifdef CONFIG_X86_64
/* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */
@@ -1050,13 +1037,6 @@ static int do_boot_cpu(int apicid, int c
}
}
- /*
- * AP might wait on cpu_callout_mask in cpu_init() with
- * cpu_initialized_mask set if previous attempt to online
- * it timed-out. Clear cpu_initialized_mask so that after
- * INIT/SIPI it could start with a clean state.
- */
- cpumask_clear_cpu(cpu, cpu_initialized_mask);
smp_mb();
/*
@@ -1067,47 +1047,16 @@ static int do_boot_cpu(int apicid, int c
* - Use an INIT boot APIC message
*/
if (apic->wakeup_secondary_cpu_64)
- return apic->wakeup_secondary_cpu_64(apicid, start_ip);
+ ret = apic->wakeup_secondary_cpu_64(apicid, start_ip);
else if (apic->wakeup_secondary_cpu)
- return apic->wakeup_secondary_cpu(apicid, start_ip);
-
- return wakeup_secondary_cpu_via_init(apicid, start_ip);
-}
-
-static int wait_cpu_cpumask(unsigned int cpu, const struct cpumask *mask)
-{
- unsigned long timeout;
-
- /*
- * Wait up to 10s for the CPU to report in.
- */
- timeout = jiffies + 10*HZ;
- while (time_before(jiffies, timeout)) {
- if (cpumask_test_cpu(cpu, mask))
- return 0;
-
- schedule();
- }
- return -1;
-}
-
-/*
- * Bringup step two: Wait for the target AP to reach cpu_init_secondary()
- * and thus wait_for_master_cpu(), then set cpu_callout_mask to allow it
- * to proceed. The AP will then proceed past setting its 'callin' bit
- * and end up waiting in check_tsc_sync_target() until we reach
- * wait_cpu_online() to tend to it.
- */
-static int wait_cpu_initialized(unsigned int cpu)
-{
- /*
- * Wait for first sign of life from AP.
- */
- if (wait_cpu_cpumask(cpu, cpu_initialized_mask))
- return -1;
+ ret = apic->wakeup_secondary_cpu(apicid, start_ip);
+ else
+ ret = wakeup_secondary_cpu_via_init(apicid, start_ip);
- cpumask_set_cpu(cpu, cpu_callout_mask);
- return 0;
+ /* If the wakeup mechanism failed, cleanup the warm reset vector */
+ if (ret)
+ arch_cpuhp_cleanup_kick_cpu(cpu);
+ return ret;
}
static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
@@ -1132,11 +1081,6 @@ static int native_kick_ap(unsigned int c
*/
mtrr_save_state();
- /* x86 CPUs take themselves offline, so delayed offline is OK. */
- err = cpu_check_up_prepare(cpu);
- if (err && err != -EBUSY)
- return err;
-
/* the FPU context is blank, nobody can own it */
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
@@ -1153,17 +1097,29 @@ static int native_kick_ap(unsigned int c
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- int ret;
-
- ret = native_kick_ap(cpu, tidle);
- if (!ret)
- ret = wait_cpu_initialized(cpu);
+ return native_kick_ap(cpu, tidle);
+}
+void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
+{
/* Cleanup possible dangling ends... */
- if (x86_platform.legacy.warm_reset)
+ if (smp_ops.cpu_up == native_cpu_up && x86_platform.legacy.warm_reset)
smpboot_restore_warm_reset_vector();
+}
- return ret;
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
+{
+ if (smp_ops.cleanup_dead_cpu)
+ smp_ops.cleanup_dead_cpu(cpu);
+
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+}
+
+void arch_cpuhp_sync_state_poll(void)
+{
+ if (smp_ops.poll_sync_state)
+ smp_ops.poll_sync_state();
}
/**
@@ -1355,9 +1311,6 @@ void __init native_smp_prepare_boot_cpu(
if (!IS_ENABLED(CONFIG_SMP))
switch_gdt_and_percpu_base(me);
- /* already set me in cpu_online_mask in boot_cpu_init() */
- cpumask_set_cpu(me, cpu_callout_mask);
- cpu_set_state_online(me);
native_pv_lock_init();
}
@@ -1484,8 +1437,6 @@ early_param("possible_cpus", _setup_poss
/* correctly size the local cpu masks */
void __init setup_cpu_local_masks(void)
{
- alloc_bootmem_cpumask_var(&cpu_initialized_mask);
- alloc_bootmem_cpumask_var(&cpu_callout_mask);
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}
@@ -1547,9 +1498,6 @@ static void remove_siblinginfo(int cpu)
static void remove_cpu_from_maps(int cpu)
{
set_cpu_online(cpu, false);
- cpumask_clear_cpu(cpu, cpu_callout_mask);
- /* was set by cpu_init() */
- cpumask_clear_cpu(cpu, cpu_initialized_mask);
numa_remove_cpu(cpu);
}
@@ -1600,36 +1548,11 @@ int native_cpu_disable(void)
return 0;
}
-int common_cpu_die(unsigned int cpu)
-{
- int ret = 0;
-
- /* We don't do anything here: idle task is faking death itself. */
-
- /* They ack this in play_dead() by setting CPU_DEAD */
- if (cpu_wait_death(cpu, 5)) {
- if (system_state == SYSTEM_RUNNING)
- pr_info("CPU %u is now offline\n", cpu);
- } else {
- pr_err("CPU %u didn't die...\n", cpu);
- ret = -1;
- }
-
- return ret;
-}
-
-void native_cpu_die(unsigned int cpu)
-{
- common_cpu_die(cpu);
-}
-
void play_dead_common(void)
{
idle_task_exit();
- /* Ack it */
- (void)cpu_report_death();
-
+ cpuhp_ap_report_dead();
/*
* With physical CPU hotplug, we should halt the cpu
*/
@@ -1731,12 +1654,6 @@ int native_cpu_disable(void)
return -ENOSYS;
}
-void native_cpu_die(unsigned int cpu)
-{
- /* We said "no" in __cpu_disable */
- BUG();
-}
-
void native_play_dead(void)
{
BUG();
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -55,18 +55,16 @@ static void __init xen_hvm_smp_prepare_c
}
#ifdef CONFIG_HOTPLUG_CPU
-static void xen_hvm_cpu_die(unsigned int cpu)
+static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
{
- if (common_cpu_die(cpu) == 0) {
- if (xen_have_vector_callback) {
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- xen_teardown_timer(cpu);
- }
+ if (xen_have_vector_callback) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
}
}
#else
-static void xen_hvm_cpu_die(unsigned int cpu)
+static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
{
BUG();
}
@@ -77,7 +75,7 @@ void __init xen_hvm_smp_init(void)
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_cpus_done = xen_smp_cpus_done;
- smp_ops.cpu_die = xen_hvm_cpu_die;
+ smp_ops.cleanup_dead_cpu = xen_hvm_cleanup_dead_cpu;
if (!xen_have_vector_callback) {
#ifdef CONFIG_PARAVIRT_SPINLOCKS
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -62,6 +62,7 @@ static void cpu_bringup(void)
int cpu;
cr4_init();
+ cpuhp_ap_sync_alive();
cpu_init();
touch_softlockup_watchdog();
@@ -83,7 +84,7 @@ static void cpu_bringup(void)
set_cpu_online(cpu, true);
- cpu_set_state_online(cpu); /* Implies full memory barrier. */
+ smp_mb();
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
@@ -323,14 +324,6 @@ static int xen_pv_cpu_up(unsigned int cp
xen_setup_runstate_info(cpu);
- /*
- * PV VCPUs are always successfully taken down (see 'while' loop
- * in xen_cpu_die()), so -EBUSY is an error.
- */
- rc = cpu_check_up_prepare(cpu);
- if (rc)
- return rc;
-
/* make sure interrupts start blocked */
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
@@ -349,6 +342,11 @@ static int xen_pv_cpu_up(unsigned int cp
return 0;
}
+static void xen_pv_poll_sync_state(void)
+{
+ HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static int xen_pv_cpu_disable(void)
{
@@ -364,18 +362,18 @@ static int xen_pv_cpu_disable(void)
static void xen_pv_cpu_die(unsigned int cpu)
{
- while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
- xen_vcpu_nr(cpu), NULL)) {
+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) {
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/10);
}
+}
- if (common_cpu_die(cpu) == 0) {
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- xen_teardown_timer(cpu);
- xen_pmu_finish(cpu);
- }
+static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
+{
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+ xen_pmu_finish(cpu);
}
static void __noreturn xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -397,6 +395,11 @@ static void xen_pv_cpu_die(unsigned int
BUG();
}
+static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
+{
+ BUG();
+}
+
static void __noreturn xen_pv_play_dead(void)
{
BUG();
@@ -437,6 +440,8 @@ static const struct smp_ops xen_smp_ops
.cpu_up = xen_pv_cpu_up,
.cpu_die = xen_pv_cpu_die,
+ .cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
+ .poll_sync_state = xen_pv_poll_sync_state,
.cpu_disable = xen_pv_cpu_disable,
.play_dead = xen_pv_play_dead,
next prev parent reply other threads:[~2023-04-14 23:46 UTC|newest]
Thread overview: 78+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-14 23:44 [patch 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Thomas Gleixner
2023-04-14 23:44 ` [patch 01/37] x86/smpboot: Cleanup topology_phys_to_logical_pkg()/die() Thomas Gleixner
2023-04-14 23:44 ` [patch 02/37] cpu/hotplug: Mark arch_disable_smp_support() and bringup_nonboot_cpus() __init Thomas Gleixner
2023-04-14 23:44 ` [patch 03/37] x86/smpboot: Avoid pointless delay calibration is TSC is synchronized Thomas Gleixner
2023-04-14 23:44 ` [patch 04/37] x86/smpboot: Rename start_cpu0() to soft_restart_cpu() Thomas Gleixner
2023-04-14 23:44 ` [patch 05/37] x86/topology: Remove CPU0 hotplug option Thomas Gleixner
2023-06-21 16:50 ` Paul E. McKenney
2023-04-14 23:44 ` [patch 06/37] x86/smpboot: Remove the CPU0 hotplug kludge Thomas Gleixner
2023-04-14 23:44 ` [patch 07/37] x86/smpboot: Restrict soft_restart_cpu() to SEV Thomas Gleixner
2023-04-14 23:44 ` [patch 08/37] x86/smpboot: Split up native_cpu_up() into separate phases and document them Thomas Gleixner
2023-04-14 23:44 ` [patch 09/37] x86/smpboot: Get rid of cpu_init_secondary() Thomas Gleixner
2023-04-14 23:44 ` [patch 10/37] x86/cpu/cacheinfo: Remove cpu_callout_mask dependency Thomas Gleixner
2023-04-14 23:44 ` [patch 11/37] x86/smpboot: Move synchronization masks to SMP boot code Thomas Gleixner
2023-04-14 23:44 ` [patch 12/37] x86/smpboot: Make TSC synchronization function call based Thomas Gleixner
2023-04-14 23:44 ` [patch 13/37] x86/smpboot: Remove cpu_callin_mask Thomas Gleixner
2023-04-14 23:44 ` [patch 14/37] cpu/hotplug: Rework sparse_irq locking in bringup_cpu() Thomas Gleixner
2023-04-14 23:44 ` [patch 15/37] x86/smpboot: Remove wait for cpu_online() Thomas Gleixner
2023-04-14 23:44 ` [patch 16/37] x86/xen/smp_pv: Remove wait for CPU online Thomas Gleixner
2023-04-17 20:46 ` Boris Ostrovsky
2023-04-14 23:44 ` [patch 17/37] x86/xen/hvm: Get rid of DEAD_FROZEN handling Thomas Gleixner
2023-04-14 23:44 ` [patch 18/37] cpu/hotplug: Add CPU state tracking and synchronization Thomas Gleixner
2023-04-14 23:44 ` Thomas Gleixner [this message]
2023-04-15 12:58 ` [patch 19/37] x86/smpboot: Switch to hotplug core state synchronization Brian Gerst
2023-04-15 21:04 ` Thomas Gleixner
2023-04-14 23:44 ` [patch 20/37] cpu/hotplug: Remove cpu_report_state() and related unused cruft Thomas Gleixner
2023-04-14 23:44 ` [patch 21/37] ARM: smp: Switch to hotplug core state synchronization Thomas Gleixner
2023-04-14 23:44 ` [patch 22/37] arm64: " Thomas Gleixner
2023-04-17 15:50 ` Mark Rutland
2023-04-25 19:51 ` Thomas Gleixner
2023-04-26 7:59 ` Mark Rutland
2023-04-26 8:15 ` Thomas Gleixner
2023-04-14 23:44 ` [patch 23/37] csky/smp: " Thomas Gleixner
2023-04-14 23:44 ` [patch 24/37] MIPS: SMP_CPS: " Thomas Gleixner
2023-04-14 23:44 ` [patch 25/37] parisc: " Thomas Gleixner
2023-04-14 23:44 ` [patch 26/37] riscv: " Thomas Gleixner
2023-05-01 23:55 ` Palmer Dabbelt
2023-04-14 23:44 ` [patch 27/37] cpu/hotplug: Remove unused state functions Thomas Gleixner
2023-04-14 23:44 ` [patch 28/37] cpu/hotplug: Reset task stack state in _cpu_up() Thomas Gleixner
2023-04-14 23:45 ` [patch 29/37] cpu/hotplug: Provide a split up CPUHP_BRINGUP mechanism Thomas Gleixner
2023-04-14 23:45 ` [patch 30/37] x86/smpboot: Enable split CPU startup Thomas Gleixner
2023-04-14 23:45 ` [patch 31/37] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner
2023-04-14 23:45 ` [patch 32/37] cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE Thomas Gleixner
2023-04-14 23:45 ` [patch 33/37] x86/topology: Store extended topology leaf information Thomas Gleixner
2023-04-14 23:45 ` [patch 34/37] x86/cpu/amd; Invoke detect_extended_topology_early() on boot CPU Thomas Gleixner
2023-04-14 23:45 ` [patch 35/37] x86/smpboot: Support parallel startup of secondary CPUs Thomas Gleixner
2023-04-15 13:22 ` Brian Gerst
2023-04-15 21:06 ` Thomas Gleixner
2023-04-24 17:58 ` Thomas Gleixner
2023-04-14 23:45 ` [patch 36/37] x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it Thomas Gleixner
2023-04-14 23:45 ` [patch 37/37] x86/smpboot: Allow parallel bringup for SEV-ES Thomas Gleixner
2023-04-17 8:35 ` [patch 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Juergen Gross
2023-04-17 10:30 ` Peter Zijlstra
2023-04-17 10:44 ` Andrew Cooper
2023-04-17 11:19 ` Paul Menzel
2023-04-17 11:24 ` Paul Menzel
2023-04-17 14:48 ` Thomas Gleixner
2023-04-17 17:40 ` Paul Menzel
2023-04-18 6:58 ` Thomas Gleixner
2023-04-18 8:40 ` Thomas Gleixner
2023-04-18 20:10 ` Paul Menzel
2023-04-19 9:38 ` Thomas Gleixner
2023-04-19 12:38 ` Thomas Gleixner
2023-04-19 13:32 ` David Woodhouse
2023-04-19 13:43 ` Thomas Gleixner
2023-04-19 13:50 ` Andrew Cooper
2023-04-19 16:21 ` Andrew Cooper
2023-04-20 8:32 ` Thomas Gleixner
2023-04-20 9:23 ` Andrew Cooper
2023-04-20 11:17 ` Thomas Gleixner
2023-04-20 14:51 ` Sean Christopherson
2023-04-20 15:57 ` Thomas Gleixner
2023-04-20 16:47 ` Paul Menzel
2023-04-20 19:10 ` Thomas Gleixner
2023-04-21 16:36 ` Thomas Gleixner
2023-04-24 18:46 ` Paul Menzel
2023-04-25 20:07 ` Thomas Gleixner
2023-04-27 14:48 ` Michael Kelley (LINUX)
2023-05-04 18:46 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230414232310.382005483@linutronix.de \
--to=tglx@linutronix.de \
--cc=James.Bottomley@HansenPartnership.com \
--cc=andrew.cooper3@citrix.com \
--cc=arjan@linux.intel.com \
--cc=arnd@arndb.de \
--cc=boris.ostrovsky@oracle.com \
--cc=brgerst@gmail.com \
--cc=catalin.marinas@arm.com \
--cc=deller@gmx.de \
--cc=dwmw@amazon.co.uk \
--cc=dwmw@infradead.org \
--cc=gpiccoli@igalia.com \
--cc=guoren@kernel.org \
--cc=jgross@suse.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-csky@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-parisc@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux@armlinux.org.uk \
--cc=lucjan.lucjanov@gmail.com \
--cc=mark.rutland@arm.com \
--cc=oleksandr@natalenko.name \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=paulmck@kernel.org \
--cc=pbonzini@redhat.com \
--cc=pmenzel@molgen.mpg.de \
--cc=sabrapan@amazon.com \
--cc=seanjc@google.com \
--cc=thomas.lendacky@amd.com \
--cc=tsbogend@alpha.franken.de \
--cc=usama.arif@bytedance.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).