From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: tglx@linutronix.de, peterz@infradead.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, mingo@kernel.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com, David.Laight@aculab.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Rusty Russell <rusty@rustcorp.com.au>, Alex Shi <alex.shi@intel.com>, KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>, Tejun Heo <tj@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Joonsoo Kim <js1304@gmail.com>"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v3 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Date: Fri, 28 Jun 2013 01:23:31 +0530 [thread overview] Message-ID: <20130627195331.29830.74970.stgit@srivatsabhat.in.ibm.com> (raw) In-Reply-To: <20130627195136.29830.10445.stgit@srivatsabhat.in.ibm.com> Now that we have a debug infrastructure in place to detect cases where get/put_online_cpus_atomic() had to be used, add these checks at the right spots to help catch places where we missed converting to the new APIs. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Alex Shi <alex.shi@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- include/linux/cpumask.h | 47 +++++++++++++++++++++++++++++++++++++++++++++-- lib/cpumask.c | 8 ++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9197ca4..06d2c36 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -169,6 +169,7 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); } @@ -184,6 +185,8 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -199,6 +202,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -288,8 +293,15 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * * No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) +#define cpumask_test_cpu(cpu, cpumask) \ +({ \ + int __ret; \ + \ + check_hotplug_safe_cpu(cpu, cpumask); \ + __ret = test_bit(cpumask_check(cpu), \ + cpumask_bits((cpumask))); \ + __ret; \ +}) /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -349,6 +361,9 @@ static inline int cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -362,6 +377,9 @@ static inline int cpumask_and(struct cpumask *dstp, static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -376,6 +394,9 @@ static inline void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -392,6 +413,9 @@ static inline int cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -404,6 +428,8 @@ static inline int cpumask_andnot(struct cpumask *dstp, static inline void cpumask_complement(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } @@ -416,6 +442,9 @@ static inline void cpumask_complement(struct cpumask *dstp, static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -428,6 +457,10 @@ static inline bool cpumask_equal(const struct cpumask *src1p, static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) { + + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -442,6 +475,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, static inline int cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -470,6 +506,12 @@ static inline bool cpumask_full(const struct cpumask *srcp) */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { + /* + * Often, we just want to have a rough estimate of the number of + * online CPUs, without going to the trouble of synchronizing with + * CPU hotplug. So don't invoke check_hotplug_safe_cpumask() here. + */ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); } @@ -507,6 +549,7 @@ static inline void cpumask_shift_left(struct cpumask *dstp, static inline void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87..481df57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -7,12 +7,14 @@ int __first_cpu(const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); } EXPORT_SYMBOL(__first_cpu); int __next_cpu(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); @@ -20,6 +22,7 @@ EXPORT_SYMBOL(__next_cpu); #if NR_CPUS > 64 int __next_cpu_nr(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, nr_cpu_ids, find_next_bit(srcp->bits, nr_cpu_ids, n+1)); } @@ -37,6 +40,9 @@ EXPORT_SYMBOL(__next_cpu_nr); int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) if (cpumask_test_cpu(n, src2p)) break; @@ -57,6 +63,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) unsigned int i; cpumask_check(cpu); + check_hotplug_safe_cpumask(mask); + for_each_cpu(i, mask) if (i != cpu) break;
WARNING: multiple messages have this Message-ID (diff)
From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, rusty@rustcorp.com.au, mingo@kernel.org, akpm@linux-foundation.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com, David.Laight@aculab.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Alex Shi <alex.shi@intel.com>, KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>, Joonsoo Kim <js1304@gmail.com>"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v3 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Date: Fri, 28 Jun 2013 01:23:31 +0530 [thread overview] Message-ID: <20130627195331.29830.74970.stgit@srivatsabhat.in.ibm.com> (raw) Message-ID: <20130627195331.htrGoPSsUz6o-4gK1fmQQ_fFrNWuadvpuwk0RaXJS3I@z> (raw) In-Reply-To: <20130627195136.29830.10445.stgit@srivatsabhat.in.ibm.com> Now that we have a debug infrastructure in place to detect cases where get/put_online_cpus_atomic() had to be used, add these checks at the right spots to help catch places where we missed converting to the new APIs. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Alex Shi <alex.shi@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- include/linux/cpumask.h | 47 +++++++++++++++++++++++++++++++++++++++++++++-- lib/cpumask.c | 8 ++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9197ca4..06d2c36 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -169,6 +169,7 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); } @@ -184,6 +185,8 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -199,6 +202,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -288,8 +293,15 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * * No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) +#define cpumask_test_cpu(cpu, cpumask) \ +({ \ + int __ret; \ + \ + check_hotplug_safe_cpu(cpu, cpumask); \ + __ret = test_bit(cpumask_check(cpu), \ + cpumask_bits((cpumask))); \ + __ret; \ +}) /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -349,6 +361,9 @@ static inline int cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -362,6 +377,9 @@ static inline int cpumask_and(struct cpumask *dstp, static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -376,6 +394,9 @@ static inline void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -392,6 +413,9 @@ static inline int cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -404,6 +428,8 @@ static inline int cpumask_andnot(struct cpumask *dstp, static inline void cpumask_complement(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } @@ -416,6 +442,9 @@ static inline void cpumask_complement(struct cpumask *dstp, static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -428,6 +457,10 @@ static inline bool cpumask_equal(const struct cpumask *src1p, static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) { + + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -442,6 +475,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, static inline int cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -470,6 +506,12 @@ static inline bool cpumask_full(const struct cpumask *srcp) */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { + /* + * Often, we just want to have a rough estimate of the number of + * online CPUs, without going to the trouble of synchronizing with + * CPU hotplug. So don't invoke check_hotplug_safe_cpumask() here. + */ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); } @@ -507,6 +549,7 @@ static inline void cpumask_shift_left(struct cpumask *dstp, static inline void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87..481df57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -7,12 +7,14 @@ int __first_cpu(const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); } EXPORT_SYMBOL(__first_cpu); int __next_cpu(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); @@ -20,6 +22,7 @@ EXPORT_SYMBOL(__next_cpu); #if NR_CPUS > 64 int __next_cpu_nr(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, nr_cpu_ids, find_next_bit(srcp->bits, nr_cpu_ids, n+1)); } @@ -37,6 +40,9 @@ EXPORT_SYMBOL(__next_cpu_nr); int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) if (cpumask_test_cpu(n, src2p)) break; @@ -57,6 +63,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) unsigned int i; cpumask_check(cpu); + check_hotplug_safe_cpumask(mask); + for_each_cpu(i, mask) if (i != cpu) break;
next prev parent reply other threads:[~2013-06-27 19:53 UTC|newest] Thread overview: 101+ messages / expand[flat|nested] mbox.gz Atom feed top 2013-06-27 19:52 [PATCH v3 00/45] CPU hotplug: stop_machine()-free CPU hotplug, part 1 Srivatsa S. Bhat 2013-06-27 19:52 ` [PATCH v3 01/45] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat 2013-06-27 19:52 ` Srivatsa S. Bhat 2013-06-27 19:52 ` [PATCH v3 02/45] CPU hotplug: Clarify the usage of different synchronization APIs Srivatsa S. Bhat 2013-06-27 19:52 ` Srivatsa S. Bhat 2013-06-27 19:52 ` [PATCH v3 03/45] Documentation, CPU hotplug: Recommend usage of get/put_online_cpus_atomic() Srivatsa S. Bhat 2013-06-27 19:52 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 04/45] CPU hotplug: Add infrastructure to check lacking hotplug synchronization Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 05/45] CPU hotplug: Protect set_cpu_online() to avoid false-positives Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat [this message] 2013-06-27 19:53 ` [PATCH v3 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 07/45] CPU hotplug: Add _nocheck() variants of accessor functions Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:53 ` [PATCH v3 08/45] CPU hotplug: Expose the new debug config option Srivatsa S. Bhat 2013-06-27 19:53 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 09/45] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 10/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-07-02 5:32 ` Michael Wang 2013-07-02 5:32 ` Michael Wang 2013-07-02 8:25 ` Srivatsa S. Bhat 2013-07-02 8:25 ` Srivatsa S. Bhat 2013-07-02 8:47 ` Michael Wang 2013-07-02 9:51 ` Srivatsa S. Bhat 2013-07-02 9:51 ` Srivatsa S. Bhat 2013-07-02 10:08 ` Michael Wang 2013-07-02 10:08 ` Michael Wang 2013-06-27 19:54 ` [PATCH v3 11/45] sched/core: " Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 12/45] migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 13/45] sched/fair: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 14/45] timer: " Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:54 ` [PATCH v3 15/45] sched/rt: " Srivatsa S. Bhat 2013-06-27 19:54 ` Srivatsa S. Bhat 2013-06-27 19:55 ` [PATCH v3 16/45] rcu: Use cpu_is_offline_nocheck() to avoid false-positive warnings Srivatsa S. Bhat 2013-06-27 19:55 ` Srivatsa S. Bhat 2013-06-27 20:12 ` Paul E. McKenney 2013-06-27 20:12 ` Paul E. McKenney 2013-06-27 19:55 ` [PATCH v3 17/45] tick-broadcast: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:55 ` Srivatsa S. Bhat 2013-06-27 19:55 ` [PATCH v3 18/45] time/clocksource: " Srivatsa S. Bhat 2013-06-27 19:55 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 19/45] softirq: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 20/45] irq: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 21/45] net: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 22/45] block: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 23/45] percpu_counter: Use _nocheck version of for_each_online_cpu() Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 24/45] infiniband: ehca: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:56 ` [PATCH v3 25/45] [SCSI] fcoe: " Srivatsa S. Bhat 2013-06-27 19:56 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 26/45] staging/octeon: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 27/45] x86: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 28/45] perf/x86: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 29/45] KVM: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 30/45] x86/xen: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:57 ` [PATCH v3 31/45] alpha/smp: " Srivatsa S. Bhat 2013-06-27 19:57 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 32/45] blackfin/smp: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 33/45] cris/smp: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 34/45] hexagon/smp: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:58 ` [PATCH v3 35/45] ia64: irq, perfmon: " Srivatsa S. Bhat 2013-06-27 19:58 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 36/45] ia64: smp, tlb: " Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 37/45] m32r: " Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 38/45] MIPS: " Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 39/45] mn10300: " Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 19:59 ` [PATCH v3 40/45] powerpc, irq: Use GFP_ATOMIC allocations in atomic context Srivatsa S. Bhat 2013-06-27 19:59 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 42/45] powerpc: Use get/put_online_cpus_atomic() to avoid false-positive warning Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 43/45] sh: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 44/45] sparc: " Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat 2013-06-27 20:00 ` [PATCH v3 45/45] tile: " Srivatsa S. Bhat 2013-06-27 20:00 ` Srivatsa S. Bhat
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20130627195331.29830.74970.stgit@srivatsabhat.in.ibm.com \ --to=srivatsa.bhat@linux.vnet.ibm.com \ --cc=David.Laight@aculab.com \ --cc=akpm@linux-foundation.org \ --cc=alex.shi@intel.com \ --cc=fweisbec@gmail.com \ --cc=js1304@gmail.com \ --cc=kosaki.motohiro@jp.fujitsu.com \ --cc=laijs@cn.fujitsu.com \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-pm@vger.kernel.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mingo@kernel.org \ --cc=namhyung@kernel.org \ --cc=netdev@vger.kernel.org \ --cc=nikunj@linux.vnet.ibm.com \ --cc=oleg@redhat.com \ --cc=paulmck@linux.vnet.ibm.com \ --cc=peterz@infradead.org \ --cc=rostedt@goodmis.org \ --cc=rusty@rustcorp.com.au \ --cc=sbw@mit.edu \ --cc=tglx@linutronix.de \ --cc=tj@kernel.org \ --cc=vincent.guittot@linaro.org \ --cc=walken@google.com \ --cc=wangyun@linux.vnet.ibm.com \ --cc=xiaoguangrong@linux.vnet.ibm.com \ --cc=zhong@linux.vnet.ibm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).