From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org,
oleg@redhat.com, paulmck@linux.vnet.ibm.com,
rusty@rustcorp.com.au, mingo@kernel.org,
akpm@linux-foundation.org, namhyung@kernel.org
Cc: linux-arch@vger.kernel.org, linux@arm.linux.org.uk,
nikunj@linux.vnet.ibm.com, linux-pm@vger.kernel.org,
fweisbec@gmail.com, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, rostedt@goodmis.org,
xiaoguangrong@linux.vnet.ibm.com, rjw@sisk.pl, sbw@mit.edu,
wangyun@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com,
netdev@vger.kernel.org, vincent.guittot@linaro.org,
walken@google.com, linuxppc-dev@lists.ozlabs.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH v6 26/46] x86: Use get/put_online_cpus_atomic() to prevent CPU offline
Date: Mon, 18 Feb 2013 18:11:51 +0530 [thread overview]
Message-ID: <20130218124150.26245.8414.stgit@srivatsabhat.in.ibm.com> (raw)
In-Reply-To: <20130218123714.26245.61816.stgit@srivatsabhat.in.ibm.com>
Once stop_machine() is gone from the CPU offline path, we won't be able to
depend on preempt_disable() or local_irq_disable() to prevent CPUs from
going offline from under us.
Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline,
while invoking from atomic context.
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Cc: Tony Luck <tony.luck@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Daniel J Blueman <daniel@numascale-asia.com>
Cc: Steffen Persvold <sp@numascale.com>
Cc: Joerg Roedel <joerg.roedel@amd.com>
Cc: linux-edac@vger.kernel.org
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---
arch/x86/include/asm/ipi.h | 5 +++++
arch/x86/kernel/apic/apic_flat_64.c | 10 ++++++++++
arch/x86/kernel/apic/apic_numachip.c | 5 +++++
arch/x86/kernel/apic/es7000_32.c | 5 +++++
arch/x86/kernel/apic/io_apic.c | 7 +++++--
arch/x86/kernel/apic/ipi.c | 10 ++++++++++
arch/x86/kernel/apic/x2apic_cluster.c | 4 ++++
arch/x86/kernel/apic/x2apic_uv_x.c | 4 ++++
arch/x86/kernel/cpu/mcheck/therm_throt.c | 4 ++--
arch/x86/mm/tlb.c | 14 +++++++-------
10 files changed, 57 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 615fa90..112249c 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -20,6 +20,7 @@
* Subject to the GNU Public License, v.2
*/
+#include <linux/cpu.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/smp.h>
@@ -131,18 +132,22 @@ extern int no_broadcast;
static inline void __default_local_send_IPI_allbutself(int vector)
{
+ get_online_cpus_atomic();
if (no_broadcast || vector == NMI_VECTOR)
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
else
__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
+ put_online_cpus_atomic();
}
static inline void __default_local_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
if (no_broadcast || vector == NMI_VECTOR)
apic->send_IPI_mask(cpu_online_mask, vector);
else
__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
+ put_online_cpus_atomic();
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 00c77cf..8207ade 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+#include <linux/cpu.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
@@ -92,6 +93,8 @@ static void flat_send_IPI_allbutself(int vector)
#else
int hotplug = 0;
#endif
+
+ get_online_cpus_atomic();
if (hotplug || vector == NMI_VECTOR) {
if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
unsigned long mask = cpumask_bits(cpu_online_mask)[0];
@@ -105,16 +108,19 @@ static void flat_send_IPI_allbutself(int vector)
__default_send_IPI_shortcut(APIC_DEST_ALLBUT,
vector, apic->dest_logical);
}
+ put_online_cpus_atomic();
}
static void flat_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
if (vector == NMI_VECTOR) {
flat_send_IPI_mask(cpu_online_mask, vector);
} else {
__default_send_IPI_shortcut(APIC_DEST_ALLINC,
vector, apic->dest_logical);
}
+ put_online_cpus_atomic();
}
static unsigned int flat_get_apic_id(unsigned long x)
@@ -255,12 +261,16 @@ static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
static void physflat_send_IPI_allbutself(int vector)
{
+ get_online_cpus_atomic();
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+ put_online_cpus_atomic();
}
static void physflat_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
physflat_send_IPI_mask(cpu_online_mask, vector);
+ put_online_cpus_atomic();
}
static int physflat_probe(void)
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 9c2aa89..7d19c1d 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+#include <linux/cpu.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -131,15 +132,19 @@ static void numachip_send_IPI_allbutself(int vector)
unsigned int this_cpu = smp_processor_id();
unsigned int cpu;
+ get_online_cpus_atomic();
for_each_online_cpu(cpu) {
if (cpu != this_cpu)
numachip_send_IPI_one(cpu, vector);
}
+ put_online_cpus_atomic();
}
static void numachip_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
numachip_send_IPI_mask(cpu_online_mask, vector);
+ put_online_cpus_atomic();
}
static void numachip_send_IPI_self(int vector)
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 0874799..ddf2995 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -45,6 +45,7 @@
#include <linux/gfp.h>
#include <linux/nmi.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
#include <linux/io.h>
#include <asm/apicdef.h>
@@ -412,12 +413,16 @@ static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
static void es7000_send_IPI_allbutself(int vector)
{
+ get_online_cpus_atomic();
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+ put_online_cpus_atomic();
}
static void es7000_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
es7000_send_IPI_mask(cpu_online_mask, vector);
+ put_online_cpus_atomic();
}
static int es7000_apic_id_registered(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b739d39..ca1c2a5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
@@ -1788,13 +1789,13 @@ __apicdebuginit(void) print_local_APICs(int maxcpu)
if (!maxcpu)
return;
- preempt_disable();
+ get_online_cpus_atomic();
for_each_online_cpu(cpu) {
if (cpu >= maxcpu)
break;
smp_call_function_single(cpu, print_local_APIC, NULL, 1);
}
- preempt_enable();
+ put_online_cpus_atomic();
}
__apicdebuginit(void) print_PIC(void)
@@ -2209,6 +2210,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
+ get_online_cpus_atomic();
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
@@ -2219,6 +2221,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
+ put_online_cpus_atomic();
}
asmlinkage void smp_irq_move_cleanup_interrupt(void)
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index cce91bf..c65aa77 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -29,12 +29,14 @@ void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
* to an arbitrary mask, so I do a unicast to each CPU instead.
* - mbligh
*/
+ get_online_cpus_atomic();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
query_cpu), vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
+ put_online_cpus_atomic();
}
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
@@ -46,6 +48,7 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
/* See Hack comment above */
+ get_online_cpus_atomic();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
@@ -54,6 +57,7 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
query_cpu), vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
+ put_online_cpus_atomic();
}
#ifdef CONFIG_X86_32
@@ -70,12 +74,14 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
* should be modified to do 1 message per cluster ID - mbligh
*/
+ get_online_cpus_atomic();
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, apic->dest_logical);
local_irq_restore(flags);
+ put_online_cpus_atomic();
}
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
@@ -87,6 +93,7 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
/* See Hack comment above */
+ get_online_cpus_atomic();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
@@ -96,6 +103,7 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
vector, apic->dest_logical);
}
local_irq_restore(flags);
+ put_online_cpus_atomic();
}
/*
@@ -109,10 +117,12 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
if (WARN_ONCE(!mask, "empty IPI mask"))
return;
+ get_online_cpus_atomic();
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
local_irq_restore(flags);
+ put_online_cpus_atomic();
}
void default_send_IPI_allbutself(int vector)
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index c88baa4..cb08e6b 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -88,12 +88,16 @@ x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
static void x2apic_send_IPI_allbutself(int vector)
{
+ get_online_cpus_atomic();
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
+ put_online_cpus_atomic();
}
static void x2apic_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
+ put_online_cpus_atomic();
}
static int
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 8cfade9..cc469a3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -244,15 +244,19 @@ static void uv_send_IPI_allbutself(int vector)
unsigned int this_cpu = smp_processor_id();
unsigned int cpu;
+ get_online_cpus_atomic();
for_each_online_cpu(cpu) {
if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector);
}
+ put_online_cpus_atomic();
}
static void uv_send_IPI_all(int vector)
{
+ get_online_cpus_atomic();
uv_send_IPI_mask(cpu_online_mask, vector);
+ put_online_cpus_atomic();
}
static int uv_apic_id_valid(int apicid)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 47a1870..d128ba4 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -82,13 +82,13 @@ static ssize_t therm_throt_device_show_##event##_##name( \
unsigned int cpu = dev->id; \
ssize_t ret; \
\
- preempt_disable(); /* CPU hotplug */ \
+ get_online_cpus_atomic(); /* CPU hotplug */ \
if (cpu_online(cpu)) { \
ret = sprintf(buf, "%lu\n", \
per_cpu(thermal_state, cpu).event.name); \
} else \
ret = 0; \
- preempt_enable(); \
+ put_online_cpus_atomic(); \
\
return ret; \
}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 13a6b29..2c3ec76 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -147,12 +147,12 @@ void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
- preempt_disable();
+ get_online_cpus_atomic();
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
- preempt_enable();
+ put_online_cpus_atomic();
}
/*
@@ -187,7 +187,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long addr;
unsigned act_entries, tlb_entries = 0;
- preempt_disable();
+ get_online_cpus_atomic();
if (current->active_mm != mm)
goto flush_all;
@@ -225,21 +225,21 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
- preempt_enable();
+ put_online_cpus_atomic();
return;
}
flush_all:
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
- preempt_enable();
+ put_online_cpus_atomic();
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
struct mm_struct *mm = vma->vm_mm;
- preempt_disable();
+ get_online_cpus_atomic();
if (current->active_mm == mm) {
if (current->mm)
@@ -251,7 +251,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
- preempt_enable();
+ put_online_cpus_atomic();
}
static void do_flush_tlb_all(void *info)
next prev parent reply other threads:[~2013-02-18 12:43 UTC|newest]
Thread overview: 115+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-02-18 12:38 [PATCH v6 00/46] CPU hotplug: stop_machine()-free CPU hotplug Srivatsa S. Bhat
2013-02-18 12:38 ` [PATCH v6 01/46] percpu_rwlock: Introduce the global reader-writer lock backend Srivatsa S. Bhat
2013-02-18 12:38 ` [PATCH v6 02/46] percpu_rwlock: Introduce per-CPU variables for the reader and the writer Srivatsa S. Bhat
2013-02-18 12:38 ` [PATCH v6 03/46] percpu_rwlock: Provide a way to define and init percpu-rwlocks at compile time Srivatsa S. Bhat
2013-02-18 12:38 ` [PATCH v6 04/46] percpu_rwlock: Implement the core design of Per-CPU Reader-Writer Locks Srivatsa S. Bhat
2013-02-18 15:45 ` Michel Lespinasse
2013-02-18 16:21 ` Srivatsa S. Bhat
2013-02-18 16:31 ` Steven Rostedt
2013-02-18 16:46 ` Srivatsa S. Bhat
2013-02-18 17:56 ` Srivatsa S. Bhat
2013-02-18 18:07 ` Michel Lespinasse
2013-02-18 18:14 ` Srivatsa S. Bhat
2013-02-25 15:53 ` Lai Jiangshan
2013-02-25 19:26 ` Srivatsa S. Bhat
2013-02-26 0:17 ` Lai Jiangshan
2013-02-26 0:19 ` Lai Jiangshan
2013-02-26 9:02 ` Srivatsa S. Bhat
2013-02-26 12:59 ` Lai Jiangshan
2013-02-26 14:22 ` Srivatsa S. Bhat
2013-02-26 16:25 ` Lai Jiangshan
2013-02-26 19:30 ` Srivatsa S. Bhat
2013-02-27 0:33 ` Lai Jiangshan
2013-02-27 21:19 ` Srivatsa S. Bhat
2013-03-01 17:44 ` [PATCH] lglock: add read-preference local-global rwlock Lai Jiangshan
2013-03-01 17:53 ` Tejun Heo
2013-03-01 20:06 ` Srivatsa S. Bhat
2013-03-01 18:28 ` Oleg Nesterov
2013-03-02 12:13 ` Michel Lespinasse
2013-03-02 13:14 ` [PATCH V2] " Lai Jiangshan
2013-03-02 17:11 ` Srivatsa S. Bhat
2013-03-05 15:41 ` Lai Jiangshan
2013-03-05 17:55 ` Srivatsa S. Bhat
2013-03-02 17:20 ` Oleg Nesterov
2013-03-03 17:40 ` Oleg Nesterov
2013-03-05 1:37 ` Michel Lespinasse
2013-03-05 15:27 ` Lai Jiangshan
2013-03-05 16:19 ` Michel Lespinasse
2013-03-05 16:41 ` Oleg Nesterov
2013-03-02 17:06 ` [PATCH] " Oleg Nesterov
2013-03-05 15:54 ` Lai Jiangshan
2013-03-05 16:32 ` Michel Lespinasse
2013-03-05 16:35 ` Oleg Nesterov
2013-03-02 13:42 ` Lai Jiangshan
2013-03-02 17:01 ` Oleg Nesterov
2013-03-01 17:50 ` [PATCH v6 04/46] percpu_rwlock: Implement the core design of Per-CPU Reader-Writer Locks Lai Jiangshan
2013-03-01 19:47 ` Srivatsa S. Bhat
2013-03-05 16:25 ` Lai Jiangshan
2013-03-05 18:27 ` Srivatsa S. Bhat
2013-03-01 18:10 ` Tejun Heo
2013-03-01 19:59 ` Srivatsa S. Bhat
2013-02-27 11:11 ` Michel Lespinasse
2013-02-27 19:25 ` Oleg Nesterov
2013-02-28 11:34 ` Michel Lespinasse
2013-02-28 18:00 ` Oleg Nesterov
2013-02-28 18:20 ` Oleg Nesterov
2013-02-26 13:34 ` Lai Jiangshan
2013-02-26 15:17 ` Srivatsa S. Bhat
2013-02-26 14:17 ` Lai Jiangshan
2013-02-26 14:37 ` Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 05/46] percpu_rwlock: Make percpu-rwlocks IRQ-safe, optimally Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 06/46] percpu_rwlock: Rearrange the read-lock code to fastpath nested percpu readers Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 07/46] percpu_rwlock: Allow writers to be readers, and add lockdep annotations Srivatsa S. Bhat
2013-02-18 15:51 ` Michel Lespinasse
2013-02-18 16:31 ` Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 08/46] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat
2013-02-18 16:23 ` Michel Lespinasse
2013-02-18 16:43 ` Srivatsa S. Bhat
2013-02-18 17:21 ` Michel Lespinasse
2013-02-18 18:50 ` Srivatsa S. Bhat
2013-02-19 9:40 ` Michel Lespinasse
2013-02-19 9:55 ` Srivatsa S. Bhat
2013-02-19 10:42 ` David Laight
2013-02-19 10:58 ` Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 09/46] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 10/46] smp, cpu hotplug: Fix smp_call_function_*() to prevent CPU offline properly Srivatsa S. Bhat
2013-02-18 12:39 ` [PATCH v6 11/46] smp, cpu hotplug: Fix on_each_cpu_*() " Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 12/46] sched/timer: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 13/46] sched/migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 14/46] sched/rt: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 15/46] tick: " Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 16/46] time/clocksource: " Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 17/46] clockevents: Use get/put_online_cpus_atomic() in clockevents_notify() Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 18/46] softirq: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-02-18 12:40 ` [PATCH v6 19/46] irq: " Srivatsa S. Bhat
2013-02-18 12:41 ` [PATCH v6 20/46] net: " Srivatsa S. Bhat
2013-02-18 12:41 ` [PATCH v6 21/46] block: " Srivatsa S. Bhat
2013-02-18 12:41 ` [PATCH v6 22/46] crypto: pcrypt - Protect access to cpu_online_mask with get/put_online_cpus() Srivatsa S. Bhat
2013-02-18 12:41 ` [PATCH v6 23/46] infiniband: ehca: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-02-18 12:41 ` [PATCH v6 24/46] [SCSI] fcoe: " Srivatsa S. Bhat
2013-02-18 12:41 ` [PATCH v6 25/46] staging: octeon: " Srivatsa S. Bhat
2013-02-18 12:41 ` Srivatsa S. Bhat [this message]
2013-02-18 12:42 ` [PATCH v6 27/46] perf/x86: " Srivatsa S. Bhat
2013-02-18 12:42 ` [PATCH v6 28/46] KVM: Use get/put_online_cpus_atomic() to prevent CPU offline from atomic context Srivatsa S. Bhat
2013-02-18 12:42 ` [PATCH v6 29/46] kvm/vmx: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-02-18 12:42 ` [PATCH v6 30/46] x86/xen: " Srivatsa S. Bhat
2013-02-18 12:42 ` [PATCH v6 31/46] alpha/smp: " Srivatsa S. Bhat
2013-02-18 12:42 ` [PATCH v6 32/46] blackfin/smp: " Srivatsa S. Bhat
2013-02-18 12:42 ` [PATCH v6 33/46] cris/smp: " Srivatsa S. Bhat
2013-02-18 13:07 ` Jesper Nilsson
2013-02-18 12:43 ` [PATCH v6 34/46] hexagon/smp: " Srivatsa S. Bhat
2013-02-18 12:43 ` [PATCH v6 35/46] ia64: " Srivatsa S. Bhat
2013-02-18 12:43 ` [PATCH v6 36/46] m32r: " Srivatsa S. Bhat
2013-02-18 12:43 ` [PATCH v6 37/46] MIPS: " Srivatsa S. Bhat
2013-02-18 12:43 ` [PATCH v6 38/46] mn10300: " Srivatsa S. Bhat
2013-02-18 12:43 ` [PATCH v6 39/46] parisc: " Srivatsa S. Bhat
2013-02-18 12:43 ` [PATCH v6 40/46] powerpc: " Srivatsa S. Bhat
2013-02-18 12:44 ` [PATCH v6 41/46] sh: " Srivatsa S. Bhat
2013-02-18 12:44 ` [PATCH v6 42/46] sparc: " Srivatsa S. Bhat
2013-02-18 12:44 ` [PATCH v6 43/46] tile: " Srivatsa S. Bhat
2013-02-18 12:44 ` [PATCH v6 44/46] cpu: No more __stop_machine() in _cpu_down() Srivatsa S. Bhat
2013-02-18 12:44 ` [PATCH v6 45/46] CPU hotplug, stop_machine: Decouple CPU hotplug from stop_machine() in Kconfig Srivatsa S. Bhat
2013-02-18 12:44 ` [PATCH v6 46/46] Documentation/cpu-hotplug: Remove references to stop_machine() Srivatsa S. Bhat
2013-02-22 0:31 ` [PATCH v6 00/46] CPU hotplug: stop_machine()-free CPU hotplug Rusty Russell
2013-02-25 21:45 ` Srivatsa S. Bhat
2013-03-01 12:05 ` Vincent Guittot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130218124150.26245.8414.stgit@srivatsabhat.in.ibm.com \
--to=srivatsa.bhat@linux.vnet.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=fweisbec@gmail.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=linux@arm.linux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mingo@kernel.org \
--cc=namhyung@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nikunj@linux.vnet.ibm.com \
--cc=oleg@redhat.com \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=rjw@sisk.pl \
--cc=rostedt@goodmis.org \
--cc=rusty@rustcorp.com.au \
--cc=sbw@mit.edu \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=walken@google.com \
--cc=wangyun@linux.vnet.ibm.com \
--cc=xiaoguangrong@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).