From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: peterz@infradead.org, tj@kernel.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, rusty@rustcorp.com.au, mingo@kernel.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Tony Luck <tony.luck@intel.com>, Fenghua Yu <fenghua.yu@intel.com>, Andrew Morton <akpm@linux-foundation.org>, "Eric W. Biederman" <ebiederm@xmission.com>, Thomas Gleixner <tglx@linutronix.de>, linux-ia64@vger.kernel.org"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v2 35/45] ia64: irq, perfmon: Use get/put_online_cpus_atomic() to prevent CPU offline Date: Wed, 26 Jun 2013 02:02:11 +0530 [thread overview] Message-ID: <20130625203211.16593.71188.stgit@srivatsabhat.in.ibm.com> (raw) In-Reply-To: <20130625202452.16593.22810.stgit@srivatsabhat.in.ibm.com> Once stop_machine() is gone from the CPU offline path, we won't be able to depend on disabling preemption to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-ia64@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- arch/ia64/kernel/irq_ia64.c | 15 +++++++++++++++ arch/ia64/kernel/perfmon.c | 8 +++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 1034884..f58b162 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -25,6 +25,7 @@ #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/smp.h> +#include <linux/cpu.h> #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> @@ -160,9 +161,11 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) unsigned long flags; int ret; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return ret; } @@ -190,9 +193,11 @@ static void clear_irq_vector(int irq) { unsigned long flags; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); } int @@ -204,6 +209,7 @@ ia64_native_assign_irq_vector (int irq) vector = -ENOSPC; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); @@ -218,6 +224,7 @@ ia64_native_assign_irq_vector (int irq) BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return vector; } @@ -302,9 +309,11 @@ int irq_prepare_move(int irq, int cpu) unsigned long flags; int ret; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); ret = __irq_prepare_move(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return ret; } @@ -320,11 +329,13 @@ void irq_complete_move(unsigned irq) if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) return; + get_online_cpus_atomic(); cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); cfg->move_cleanup_count = cpus_weight(cleanup_mask); for_each_cpu_mask(i, cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; + put_online_cpus_atomic(); } static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) @@ -393,10 +404,12 @@ void destroy_and_reserve_irq(unsigned int irq) dynamic_irq_cleanup(irq); + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); } /* @@ -409,6 +422,7 @@ int create_irq(void) cpumask_t domain = CPU_MASK_NONE; irq = vector = -ENOSPC; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); @@ -424,6 +438,7 @@ int create_irq(void) BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); if (irq >= 0) dynamic_irq_init(irq); return irq; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9ea25fc..16c8303 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -6476,9 +6476,12 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) /* do the easy test first */ if (pfm_alt_intr_handler) return -EBUSY; + get_online_cpus_atomic(); + /* one at a time in the install or remove, just fail the others */ if (!spin_trylock(&pfm_alt_install_check)) { - return -EBUSY; + ret = -EBUSY; + goto out; } /* reserve our session */ @@ -6498,6 +6501,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) pfm_alt_intr_handler = hdl; spin_unlock(&pfm_alt_install_check); + put_online_cpus_atomic(); return 0; @@ -6510,6 +6514,8 @@ cleanup_reserve: } spin_unlock(&pfm_alt_install_check); +out: + put_online_cpus_atomic(); return ret; }
WARNING: multiple messages have this Message-ID (diff)
From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, rusty@rustcorp.com.au, mingo@kernel.org, akpm@linux-foundation.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Tony Luck <tony.luck@intel.com>, Fenghua Yu <fenghua.yu@intel.com>, "Eric W. Biederman" <ebiederm@xmission.com>, linux-ia64@vger.kernel.org"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v2 35/45] ia64: irq, perfmon: Use get/put_online_cpus_atomic() to prevent CPU offline Date: Wed, 26 Jun 2013 02:02:11 +0530 [thread overview] Message-ID: <20130625203211.16593.71188.stgit@srivatsabhat.in.ibm.com> (raw) Message-ID: <20130625203211.4lREa4aXw3ygqjczbN3D7880Hx_pOdDyCh2q5LB_kBA@z> (raw) In-Reply-To: <20130625202452.16593.22810.stgit@srivatsabhat.in.ibm.com> Once stop_machine() is gone from the CPU offline path, we won't be able to depend on disabling preemption to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-ia64@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- arch/ia64/kernel/irq_ia64.c | 15 +++++++++++++++ arch/ia64/kernel/perfmon.c | 8 +++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 1034884..f58b162 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -25,6 +25,7 @@ #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/smp.h> +#include <linux/cpu.h> #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> @@ -160,9 +161,11 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) unsigned long flags; int ret; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return ret; } @@ -190,9 +193,11 @@ static void clear_irq_vector(int irq) { unsigned long flags; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); } int @@ -204,6 +209,7 @@ ia64_native_assign_irq_vector (int irq) vector = -ENOSPC; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); @@ -218,6 +224,7 @@ ia64_native_assign_irq_vector (int irq) BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return vector; } @@ -302,9 +309,11 @@ int irq_prepare_move(int irq, int cpu) unsigned long flags; int ret; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); ret = __irq_prepare_move(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return ret; } @@ -320,11 +329,13 @@ void irq_complete_move(unsigned irq) if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) return; + get_online_cpus_atomic(); cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); cfg->move_cleanup_count = cpus_weight(cleanup_mask); for_each_cpu_mask(i, cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; + put_online_cpus_atomic(); } static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) @@ -393,10 +404,12 @@ void destroy_and_reserve_irq(unsigned int irq) dynamic_irq_cleanup(irq); + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); } /* @@ -409,6 +422,7 @@ int create_irq(void) cpumask_t domain = CPU_MASK_NONE; irq = vector = -ENOSPC; + get_online_cpus_atomic(); spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); @@ -424,6 +438,7 @@ int create_irq(void) BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); if (irq >= 0) dynamic_irq_init(irq); return irq; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9ea25fc..16c8303 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -6476,9 +6476,12 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) /* do the easy test first */ if (pfm_alt_intr_handler) return -EBUSY; + get_online_cpus_atomic(); + /* one at a time in the install or remove, just fail the others */ if (!spin_trylock(&pfm_alt_install_check)) { - return -EBUSY; + ret = -EBUSY; + goto out; } /* reserve our session */ @@ -6498,6 +6501,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) pfm_alt_intr_handler = hdl; spin_unlock(&pfm_alt_install_check); + put_online_cpus_atomic(); return 0; @@ -6510,6 +6514,8 @@ cleanup_reserve: } spin_unlock(&pfm_alt_install_check); +out: + put_online_cpus_atomic(); return ret; }
next prev parent reply other threads:[~2013-06-25 20:32 UTC|newest] Thread overview: 130+ messages / expand[flat|nested] mbox.gz Atom feed top 2013-06-25 20:25 [PATCH v2 00/45] CPU hotplug: stop_machine()-free CPU hotplug, part 1 Srivatsa S. Bhat 2013-06-25 20:25 ` Srivatsa S. Bhat 2013-06-25 20:25 ` [PATCH v2 01/45] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat 2013-06-25 20:25 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 02/45] CPU hotplug: Clarify the usage of different synchronization APIs Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 03/45] Documentation, CPU hotplug: Recommend usage of get/put_online_cpus_atomic() Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 04/45] CPU hotplug: Add infrastructure to check lacking hotplug synchronization Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 05/45] CPU hotplug: Protect set_cpu_online() to avoid false-positives Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 07/45] CPU hotplug: Expose the new debug config option Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 08/45] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 09/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 10/45] sched/core: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 11/45] migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 12/45] sched/fair: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 13/45] timer: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 14/45] sched/rt: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 15/45] rcu: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 22:00 ` Paul E. McKenney 2013-06-26 14:09 ` Srivatsa S. Bhat 2013-06-26 14:29 ` David Laight 2013-06-26 14:29 ` David Laight 2013-06-26 14:34 ` Paul E. McKenney 2013-06-26 14:51 ` Steven Rostedt 2013-06-26 14:51 ` Steven Rostedt 2013-06-26 15:21 ` Tejun Heo 2013-06-26 15:33 ` Steven Rostedt 2013-06-26 15:33 ` Steven Rostedt 2013-06-26 17:29 ` Tejun Heo 2013-06-26 18:28 ` Srivatsa S. Bhat 2013-06-26 18:28 ` Srivatsa S. Bhat 2013-06-26 21:34 ` Tejun Heo 2013-06-26 21:34 ` Tejun Heo 2013-06-27 6:53 ` Srivatsa S. Bhat 2013-06-27 6:53 ` Srivatsa S. Bhat 2013-06-26 18:22 ` Srivatsa S. Bhat 2013-06-26 18:22 ` Srivatsa S. Bhat 2013-06-27 8:54 ` David Laight 2013-06-27 8:54 ` David Laight 2013-06-27 10:06 ` Srivatsa S. Bhat 2013-06-26 14:45 ` Paul E. McKenney 2013-06-26 18:18 ` Srivatsa S. Bhat 2013-06-26 14:33 ` Paul E. McKenney 2013-06-25 20:28 ` [PATCH v2 16/45] tick-broadcast: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:28 ` [PATCH v2 17/45] time/clocksource: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:28 ` [PATCH v2 18/45] softirq: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:28 ` [PATCH v2 19/45] irq: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 20/45] net: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 21/45] block: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 22/45] percpu_counter: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 23/45] infiniband: ehca: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 24/45] [SCSI] fcoe: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:30 ` [PATCH v2 25/45] staging/octeon: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:45 ` Greg Kroah-Hartman 2013-06-25 20:45 ` Greg Kroah-Hartman 2013-06-25 20:30 ` [PATCH v2 26/45] x86: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:30 ` [PATCH v2 27/45] perf/x86: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:30 ` [PATCH v2 28/45] KVM: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-26 8:20 ` Paolo Bonzini 2013-06-25 20:30 ` [PATCH v2 29/45] kvm/vmx: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-26 7:46 ` Paolo Bonzini 2013-06-26 8:06 ` Srivatsa S. Bhat 2013-06-26 8:23 ` Paolo Bonzini 2013-06-26 8:23 ` Paolo Bonzini 2013-06-26 8:41 ` Srivatsa S. Bhat 2013-06-26 8:41 ` Srivatsa S. Bhat 2013-06-26 8:57 ` Paolo Bonzini 2013-06-26 8:57 ` Paolo Bonzini 2013-06-25 20:30 ` [PATCH v2 30/45] x86/xen: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:31 ` [PATCH v2 31/45] alpha/smp: " Srivatsa S. Bhat 2013-06-25 20:31 ` Srivatsa S. Bhat 2013-06-25 20:31 ` [PATCH v2 32/45] blackfin/smp: " Srivatsa S. Bhat 2013-06-25 20:31 ` Srivatsa S. Bhat 2013-06-25 20:31 ` [PATCH v2 33/45] cris/smp: " Srivatsa S. Bhat 2013-06-25 20:31 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 34/45] hexagon/smp: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat [this message] 2013-06-25 20:32 ` [PATCH v2 35/45] ia64: irq, perfmon: " Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 36/45] ia64: smp, tlb: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 37/45] m32r: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 38/45] MIPS: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-26 13:39 ` Ralf Baechle 2013-06-26 13:39 ` Ralf Baechle 2013-06-27 7:08 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 39/45] mn10300: " Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 40/45] powerpc, irq: Use GFP_ATOMIC allocations in atomic context Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 42/45] powerpc: Use get/put_online_cpus_atomic() to avoid false-positive warning Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 43/45] sh: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 44/45] sparc: " Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:34 ` [PATCH v2 45/45] tile: " Srivatsa S. Bhat 2013-06-25 20:34 ` Srivatsa S. Bhat
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20130625203211.16593.71188.stgit@srivatsabhat.in.ibm.com \ --to=srivatsa.bhat@linux.vnet.ibm.com \ --cc=akpm@linux-foundation.org \ --cc=ebiederm@xmission.com \ --cc=fenghua.yu@intel.com \ --cc=fweisbec@gmail.com \ --cc=laijs@cn.fujitsu.com \ --cc=linux-arch@vger.kernel.org \ --cc=linux-ia64@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-pm@vger.kernel.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mingo@kernel.org \ --cc=namhyung@kernel.org \ --cc=netdev@vger.kernel.org \ --cc=nikunj@linux.vnet.ibm.com \ --cc=oleg@redhat.com \ --cc=paulmck@linux.vnet.ibm.com \ --cc=peterz@infradead.org \ --cc=rostedt@goodmis.org \ --cc=rusty@rustcorp.com.au \ --cc=sbw@mit.edu \ --cc=tglx@linutronix.de \ --cc=tj@kernel.org \ --cc=tony.luck@intel.com \ --cc=vincent.guittot@linaro.org \ --cc=walken@google.com \ --cc=wangyun@linux.vnet.ibm.com \ --cc=xiaoguangrong@linux.vnet.ibm.com \ --cc=zhong@linux.vnet.ibm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).