From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: peterz@infradead.org, tj@kernel.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, rusty@rustcorp.com.au, mingo@kernel.org, akpm@linux-foundation.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com, David.Laight@aculab.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>, x86@kernel.org, Tony Luck <tony.luck@intel.com>, Borislav Petkov <bp@alien8.de>, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>, Sebastian Andrzej Siewior <sebastian@breakpoint.cc>, Joerg Roedel <joro@8bytes.org>, Jan Beulich <jbeulich@suse.com>, Joonsoo Kim <js1304@gmail.com>, linux-edac@vger.kernel.org"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v3 27/45] x86: Use get/put_online_cpus_atomic() to prevent CPU offline Date: Fri, 28 Jun 2013 01:27:11 +0530 [thread overview] Message-ID: <20130627195711.29830.22170.stgit@srivatsabhat.in.ibm.com> (raw) In-Reply-To: <20130627195136.29830.10445.stgit@srivatsabhat.in.ibm.com> Once stop_machine() is gone from the CPU offline path, we won't be able to depend on disabling preemption to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: x86@kernel.org Cc: Tony Luck <tony.luck@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jan Beulich <jbeulich@suse.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: linux-edac@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- arch/x86/kernel/apic/io_apic.c | 21 ++++++++++++++++++--- arch/x86/kernel/cpu/mcheck/therm_throt.c | 4 ++-- arch/x86/mm/tlb.c | 14 +++++++------- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9ed796c..4c71c1e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/sched.h> +#include <linux/cpu.h> #include <linux/pci.h> #include <linux/mc146818rtc.h> #include <linux/compiler.h> @@ -1169,9 +1170,11 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) int err; unsigned long flags; + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); err = __assign_irq_vector(irq, cfg, mask); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return err; } @@ -1757,13 +1760,13 @@ __apicdebuginit(void) print_local_APICs(int maxcpu) if (!maxcpu) return; - preempt_disable(); + get_online_cpus_atomic(); for_each_online_cpu(cpu) { if (cpu >= maxcpu) break; smp_call_function_single(cpu, print_local_APIC, NULL, 1); } - preempt_enable(); + put_online_cpus_atomic(); } __apicdebuginit(void) print_PIC(void) @@ -2153,10 +2156,12 @@ static int ioapic_retrigger_irq(struct irq_data *data) unsigned long flags; int cpu; + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); cpu = cpumask_first_and(cfg->domain, cpu_online_mask); apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return 1; } @@ -2175,6 +2180,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) { cpumask_var_t cleanup_mask; + get_online_cpus_atomic(); if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { unsigned int i; for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) @@ -2185,6 +2191,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) free_cpumask_var(cleanup_mask); } cfg->move_in_progress = 0; + put_online_cpus_atomic(); } asmlinkage void smp_irq_move_cleanup_interrupt(void) @@ -2939,11 +2946,13 @@ unsigned int __create_irqs(unsigned int from, unsigned int count, int node) goto out_irqs; } + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); for (i = 0; i < count; i++) if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) goto out_vecs; raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); for (i = 0; i < count; i++) { irq_set_chip_data(irq + i, cfg[i]); @@ -2957,6 +2966,7 @@ out_vecs: for (i--; i >= 0; i--) __clear_irq_vector(irq + i, cfg[i]); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); out_irqs: for (i = 0; i < count; i++) free_irq_at(irq + i, cfg[i]); @@ -2994,9 +3004,11 @@ void destroy_irq(unsigned int irq) free_remapped_irq(irq); + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq, cfg); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); free_irq_at(irq, cfg); } @@ -3365,8 +3377,11 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) if (!cfg) return -EINVAL; ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); - if (!ret) + if (!ret) { + get_online_cpus_atomic(); setup_ioapic_irq(irq, cfg, attr); + put_online_cpus_atomic(); + } return ret; } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 2f3a799..3eea984 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -83,13 +83,13 @@ static ssize_t therm_throt_device_show_##event##_##name( \ unsigned int cpu = dev->id; \ ssize_t ret; \ \ - preempt_disable(); /* CPU hotplug */ \ + get_online_cpus_atomic(); /* CPU hotplug */ \ if (cpu_online(cpu)) { \ ret = sprintf(buf, "%lu\n", \ per_cpu(thermal_state, cpu).event.name); \ } else \ ret = 0; \ - preempt_enable(); \ + put_online_cpus_atomic(); \ \ return ret; \ } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 282375f..8126374 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -147,12 +147,12 @@ void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; - preempt_disable(); + get_online_cpus_atomic(); local_flush_tlb(); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); - preempt_enable(); + put_online_cpus_atomic(); } /* @@ -187,7 +187,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long addr; unsigned act_entries, tlb_entries = 0; - preempt_disable(); + get_online_cpus_atomic(); if (current->active_mm != mm) goto flush_all; @@ -225,21 +225,21 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); - preempt_enable(); + put_online_cpus_atomic(); return; } flush_all: if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); - preempt_enable(); + put_online_cpus_atomic(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; - preempt_disable(); + get_online_cpus_atomic(); if (current->active_mm == mm) { if (current->mm) @@ -251,7 +251,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); - preempt_enable(); + put_online_cpus_atomic(); } static void do_flush_tlb_all(void *info)
WARNING: multiple messages have this Message-ID (diff)
From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, rusty@rustcorp.com.au, mingo@kernel.org, akpm@linux-foundation.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com, David.Laight@aculab.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>, x86@kernel.org, Tony Luck <tony.luck@intel.com>, Borislav Petkov <bp@alien8.de>, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>, Sebastian Andrzej Siewior <sebastian@breakpoint.cc>, Joerg Roedel <joro@8bytes.org>, Jan Beulich <jbeulich@suse.com>, Joonsoo Kim <js1304@gmail.com>, linux-edac@vger.kernel.org"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v3 27/45] x86: Use get/put_online_cpus_atomic() to prevent CPU offline Date: Fri, 28 Jun 2013 01:27:11 +0530 [thread overview] Message-ID: <20130627195711.29830.22170.stgit@srivatsabhat.in.ibm.com> (raw) Message-ID: <20130627195711.Xv2UkcHJHC3xRej_RGLa1_iz9tlXjxE47aWk1T0V0mk@z> (raw) In-Reply-To: <20130627195136.29830.10445.stgit@srivatsabhat.in.ibm.com> Once stop_machine() is gone from the CPU offline path, we won't be able to depend on disabling preemption to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: x86@kernel.org Cc: Tony Luck <tony.luck@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jan Beulich <jbeulich@suse.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: linux-edac@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- arch/x86/kernel/apic/io_apic.c | 21 ++++++++++++++++++--- arch/x86/kernel/cpu/mcheck/therm_throt.c | 4 ++-- arch/x86/mm/tlb.c | 14 +++++++------- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9ed796c..4c71c1e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/sched.h> +#include <linux/cpu.h> #include <linux/pci.h> #include <linux/mc146818rtc.h> #include <linux/compiler.h> @@ -1169,9 +1170,11 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) int err; unsigned long flags; + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); err = __assign_irq_vector(irq, cfg, mask); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return err; } @@ -1757,13 +1760,13 @@ __apicdebuginit(void) print_local_APICs(int maxcpu) if (!maxcpu) return; - preempt_disable(); + get_online_cpus_atomic(); for_each_online_cpu(cpu) { if (cpu >= maxcpu) break; smp_call_function_single(cpu, print_local_APIC, NULL, 1); } - preempt_enable(); + put_online_cpus_atomic(); } __apicdebuginit(void) print_PIC(void) @@ -2153,10 +2156,12 @@ static int ioapic_retrigger_irq(struct irq_data *data) unsigned long flags; int cpu; + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); cpu = cpumask_first_and(cfg->domain, cpu_online_mask); apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); return 1; } @@ -2175,6 +2180,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) { cpumask_var_t cleanup_mask; + get_online_cpus_atomic(); if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { unsigned int i; for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) @@ -2185,6 +2191,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) free_cpumask_var(cleanup_mask); } cfg->move_in_progress = 0; + put_online_cpus_atomic(); } asmlinkage void smp_irq_move_cleanup_interrupt(void) @@ -2939,11 +2946,13 @@ unsigned int __create_irqs(unsigned int from, unsigned int count, int node) goto out_irqs; } + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); for (i = 0; i < count; i++) if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) goto out_vecs; raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); for (i = 0; i < count; i++) { irq_set_chip_data(irq + i, cfg[i]); @@ -2957,6 +2966,7 @@ out_vecs: for (i--; i >= 0; i--) __clear_irq_vector(irq + i, cfg[i]); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); out_irqs: for (i = 0; i < count; i++) free_irq_at(irq + i, cfg[i]); @@ -2994,9 +3004,11 @@ void destroy_irq(unsigned int irq) free_remapped_irq(irq); + get_online_cpus_atomic(); raw_spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq, cfg); raw_spin_unlock_irqrestore(&vector_lock, flags); + put_online_cpus_atomic(); free_irq_at(irq, cfg); } @@ -3365,8 +3377,11 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) if (!cfg) return -EINVAL; ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); - if (!ret) + if (!ret) { + get_online_cpus_atomic(); setup_ioapic_irq(irq, cfg, attr); + put_online_cpus_atomic(); + } return ret; } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 2f3a799..3eea984 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -83,13 +83,13 @@ static ssize_t therm_throt_device_show_##event##_##name( \ unsigned int cpu = dev->id; \ ssize_t ret; \ \ - preempt_disable(); /* CPU hotplug */ \ + get_online_cpus_atomic(); /* CPU hotplug */ \ if (cpu_online(cpu)) { \ ret = sprintf(buf, "%lu\n", \ per_cpu(thermal_state, cpu).event.name); \ } else \ ret = 0; \ - preempt_enable(); \ + put_online_cpus_atomic(); \ \ return ret; \ } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 282375f..8126374 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -147,12 +147,12 @@ void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; - preempt_disable(); + get_online_cpus_atomic(); local_flush_tlb(); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); - preempt_enable(); + put_online_cpus_atomic(); } /* @@ -187,7 +187,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long addr; unsigned act_entries, tlb_entries = 0; - preempt_disable(); + get_online_cpus_atomic(); if (current->active_mm != mm) goto flush_all; @@ -225,21 +225,21 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); - preempt_enable(); + put_online_cpus_atomic(); return; } flush_all: if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); - preempt_enable(); + put_online_cpus_atomic(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; - preempt_disable(); + get_online_cpus_atomic(); if (current->active_mm == mm) { if (current->mm) @@ -251,7 +251,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); - preempt_enable(); + put_online_cpus_atomic(); } static void do_flush_tlb_all(void *info)
next prev parent reply other threads:[~2013-06-27 19:57 UTC|newest] Thread overview: 101+ messages / expand[flat|nested] mbox.gz Atom feed top 2013-06-27 19:52 [PATCH v3 00/45] CPU hotplug: stop_machine()-free CPU hotplug, part 1 Srivatsa S. Bhat 2013-06-27 19:52 ` [PATCH v3 01/45] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat 2013-06-27 19:52 ` Srivatsa S. Bhat 2013-06-27 19:52 ` [PATCH v3 02/45] CPU hotplug: Clarify the usage of different synchronization APIs Srivatsa S. Bhat 2013-06-27 19:52 ` Srivatsa S. Bhat 2013-06-27 19:52 ` [PATCH v3 03/45] Documentation, CPU hotplug: Recommend usage of get/put_online_cpus_atomic() Srivatsa S. Bhat 2013-06-27 19:52 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 04/45] CPU hotplug: Add infrastructure to check lacking hotplug synchronization Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 05/45] CPU hotplug: Protect set_cpu_online() to avoid false-positives Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 07/45] CPU hotplug: Add _nocheck() variants of accessor functions Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 08/45] CPU hotplug: Expose the new debug config option Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 09/45] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 10/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-07-02 5:32 ` Michael Wang 2013-07-02 5:32 ` Michael Wang 2013-07-02 8:25 ` Srivatsa S. Bhat 2013-07-02 8:25 ` Srivatsa S. Bhat 2013-07-02 8:47 ` Michael Wang 2013-07-02 9:51 ` Srivatsa S. Bhat 2013-07-02 9:51 ` Srivatsa S. Bhat 2013-07-02 10:08 ` Michael Wang 2013-07-02 10:08 ` Michael Wang 2013-06-27 19:54 ` [PATCH v3 11/45] sched/core: " Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 12/45] migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 13/45] sched/fair: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 14/45] timer: " Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 15/45] sched/rt: " Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:55 ` [PATCH v3 16/45] rcu: Use cpu_is_offline_nocheck() to avoid false-positive warnings Srivatsa S. Bhat 2013-06-27 19:55 ` Srivatsa S. Bhat 2013-06-27 20:12 ` Paul E. McKenney 2013-06-27 20:12 ` Paul E. McKenney 2013-06-27 19:55 ` [PATCH v3 17/45] tick-broadcast: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:55 ` Srivatsa S. Bhat 2013-06-27 19:55 ` [PATCH v3 18/45] time/clocksource: " Srivatsa S. Bhat 2013-06-27 19:55 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 19/45] softirq: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 20/45] irq: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 21/45] net: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 22/45] block: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 23/45] percpu_counter: Use _nocheck version of for_each_online_cpu() Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 24/45] infiniband: ehca: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 25/45] [SCSI] fcoe: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 26/45] staging/octeon: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat [this message] 2013-06-27 19:57 ` [PATCH v3 27/45] x86: " Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 28/45] perf/x86: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 29/45] KVM: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 30/45] x86/xen: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 31/45] alpha/smp: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 32/45] blackfin/smp: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 33/45] cris/smp: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 34/45] hexagon/smp: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 35/45] ia64: irq, perfmon: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 36/45] ia64: smp, tlb: " Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 37/45] m32r: " Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 38/45] MIPS: " Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 39/45] mn10300: " Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 40/45] powerpc, irq: Use GFP_ATOMIC allocations in atomic context Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 42/45] powerpc: Use get/put_online_cpus_atomic() to avoid false-positive warning Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 43/45] sh: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 44/45] sparc: " Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 45/45] tile: " Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20130627195711.29830.22170.stgit@srivatsabhat.in.ibm.com \ --to=srivatsa.bhat@linux.vnet.ibm.com \ --cc=David.Laight@aculab.com \ --cc=akpm@linux-foundation.org \ --cc=bp@alien8.de \ --cc=fweisbec@gmail.com \ --cc=hpa@zytor.com \ --cc=jbeulich@suse.com \ --cc=joro@8bytes.org \ --cc=js1304@gmail.com \ --cc=konrad.wilk@oracle.com \ --cc=laijs@cn.fujitsu.com \ --cc=linux-arch@vger.kernel.org \ --cc=linux-edac@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-pm@vger.kernel.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mingo@kernel.org \ --cc=mingo@redhat.com \ --cc=namhyung@kernel.org \ --cc=netdev@vger.kernel.org \ --cc=nikunj@linux.vnet.ibm.com \ --cc=oleg@redhat.com \ --cc=paulmck@linux.vnet.ibm.com \ --cc=peterz@infradead.org \ --cc=rostedt@goodmis.org \ --cc=rusty@rustcorp.com.au \ --cc=sbw@mit.edu \ --cc=sebastian@breakpoint.cc \ --cc=tglx@linutronix.de \ --cc=tj@kernel.org \ --cc=tony.luck@intel.com \ --cc=vincent.guittot@linaro.org \ --cc=walken@google.com \ --cc=wangyun@linux.vnet.ibm.com \ --cc=x86@kernel.org \ --cc=xiaoguangrong@linux.vnet.ibm.com \ --cc=zhong@linux.vnet.ibm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).