From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: tglx@linutronix.de, peterz@infradead.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, mingo@kernel.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com Cc: linux-arch@vger.kernel.org, Alex Shi <alex.shi@intel.com>, nikunj@linux.vnet.ibm.com, zhong@linux.vnet.ibm.com, linux-pm@vger.kernel.org, fweisbec@gmail.com, Rusty Russell <rusty@rustcorp.com.au>, linux-kernel@vger.kernel.org, rostedt@goodmis.org, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, Joonsoo Kim <js1304@gmail.com>, wangyun@linux.vnet.ibm.com, "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>, netdev@vger.kernel.org, Tejun Heo <tj@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v2 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Date: Wed, 26 Jun 2013 01:56:41 +0530 [thread overview] Message-ID: <20130625202641.16593.90767.stgit@srivatsabhat.in.ibm.com> (raw) In-Reply-To: <20130625202452.16593.22810.stgit@srivatsabhat.in.ibm.com> Now that we have a debug infrastructure in place to detect cases where get/put_online_cpus_atomic() had to be used, add these checks at the right spots to help catch places where we missed converting to the new APIs. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Alex Shi <alex.shi@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- include/linux/cpumask.h | 47 +++++++++++++++++++++++++++++++++++++++++++++-- lib/cpumask.c | 8 ++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9197ca4..06d2c36 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -169,6 +169,7 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); } @@ -184,6 +185,8 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -199,6 +202,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -288,8 +293,15 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * * No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) +#define cpumask_test_cpu(cpu, cpumask) \ +({ \ + int __ret; \ + \ + check_hotplug_safe_cpu(cpu, cpumask); \ + __ret = test_bit(cpumask_check(cpu), \ + cpumask_bits((cpumask))); \ + __ret; \ +}) /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -349,6 +361,9 @@ static inline int cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -362,6 +377,9 @@ static inline int cpumask_and(struct cpumask *dstp, static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -376,6 +394,9 @@ static inline void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -392,6 +413,9 @@ static inline int cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -404,6 +428,8 @@ static inline int cpumask_andnot(struct cpumask *dstp, static inline void cpumask_complement(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } @@ -416,6 +442,9 @@ static inline void cpumask_complement(struct cpumask *dstp, static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -428,6 +457,10 @@ static inline bool cpumask_equal(const struct cpumask *src1p, static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) { + + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -442,6 +475,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, static inline int cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -470,6 +506,12 @@ static inline bool cpumask_full(const struct cpumask *srcp) */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { + /* + * Often, we just want to have a rough estimate of the number of + * online CPUs, without going to the trouble of synchronizing with + * CPU hotplug. So don't invoke check_hotplug_safe_cpumask() here. + */ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); } @@ -507,6 +549,7 @@ static inline void cpumask_shift_left(struct cpumask *dstp, static inline void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87..481df57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -7,12 +7,14 @@ int __first_cpu(const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); } EXPORT_SYMBOL(__first_cpu); int __next_cpu(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); @@ -20,6 +22,7 @@ EXPORT_SYMBOL(__next_cpu); #if NR_CPUS > 64 int __next_cpu_nr(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, nr_cpu_ids, find_next_bit(srcp->bits, nr_cpu_ids, n+1)); } @@ -37,6 +40,9 @@ EXPORT_SYMBOL(__next_cpu_nr); int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) if (cpumask_test_cpu(n, src2p)) break; @@ -57,6 +63,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) unsigned int i; cpumask_check(cpu); + check_hotplug_safe_cpumask(mask); + for_each_cpu(i, mask) if (i != cpu) break;
WARNING: multiple messages have this Message-ID (diff)
From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org, oleg@redhat.com, paulmck@linux.vnet.ibm.com, rusty@rustcorp.com.au, mingo@kernel.org, akpm@linux-foundation.org, namhyung@kernel.org, walken@google.com, vincent.guittot@linaro.org, laijs@cn.fujitsu.com Cc: rostedt@goodmis.org, wangyun@linux.vnet.ibm.com, xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu, fweisbec@gmail.com, zhong@linux.vnet.ibm.com, nikunj@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org, Alex Shi <alex.shi@intel.com>, KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>, Joonsoo Kim <js1304@gmail.com>"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Subject: [PATCH v2 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Date: Wed, 26 Jun 2013 01:56:41 +0530 [thread overview] Message-ID: <20130625202641.16593.90767.stgit@srivatsabhat.in.ibm.com> (raw) Message-ID: <20130625202641.4EC8dfeDp_0uxe-QLT1Gd-9HPCX_RqS4paA_jywXgFU@z> (raw) In-Reply-To: <20130625202452.16593.22810.stgit@srivatsabhat.in.ibm.com> Now that we have a debug infrastructure in place to detect cases where get/put_online_cpus_atomic() had to be used, add these checks at the right spots to help catch places where we missed converting to the new APIs. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Alex Shi <alex.shi@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- include/linux/cpumask.h | 47 +++++++++++++++++++++++++++++++++++++++++++++-- lib/cpumask.c | 8 ++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9197ca4..06d2c36 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -169,6 +169,7 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); } @@ -184,6 +185,8 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -199,6 +202,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); + + check_hotplug_safe_cpumask(srcp); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -288,8 +293,15 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * * No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) +#define cpumask_test_cpu(cpu, cpumask) \ +({ \ + int __ret; \ + \ + check_hotplug_safe_cpu(cpu, cpumask); \ + __ret = test_bit(cpumask_check(cpu), \ + cpumask_bits((cpumask))); \ + __ret; \ +}) /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -349,6 +361,9 @@ static inline int cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -362,6 +377,9 @@ static inline int cpumask_and(struct cpumask *dstp, static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -376,6 +394,9 @@ static inline void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -392,6 +413,9 @@ static inline int cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -404,6 +428,8 @@ static inline int cpumask_andnot(struct cpumask *dstp, static inline void cpumask_complement(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } @@ -416,6 +442,9 @@ static inline void cpumask_complement(struct cpumask *dstp, static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -428,6 +457,10 @@ static inline bool cpumask_equal(const struct cpumask *src1p, static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) { + + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -442,6 +475,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, static inline int cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -470,6 +506,12 @@ static inline bool cpumask_full(const struct cpumask *srcp) */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { + /* + * Often, we just want to have a rough estimate of the number of + * online CPUs, without going to the trouble of synchronizing with + * CPU hotplug. So don't invoke check_hotplug_safe_cpumask() here. + */ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); } @@ -507,6 +549,7 @@ static inline void cpumask_shift_left(struct cpumask *dstp, static inline void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) { + check_hotplug_safe_cpumask(srcp); bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87..481df57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -7,12 +7,14 @@ int __first_cpu(const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); } EXPORT_SYMBOL(__first_cpu); int __next_cpu(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); @@ -20,6 +22,7 @@ EXPORT_SYMBOL(__next_cpu); #if NR_CPUS > 64 int __next_cpu_nr(int n, const cpumask_t *srcp) { + check_hotplug_safe_cpumask(srcp); return min_t(int, nr_cpu_ids, find_next_bit(srcp->bits, nr_cpu_ids, n+1)); } @@ -37,6 +40,9 @@ EXPORT_SYMBOL(__next_cpu_nr); int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { + check_hotplug_safe_cpumask(src1p); + check_hotplug_safe_cpumask(src2p); + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) if (cpumask_test_cpu(n, src2p)) break; @@ -57,6 +63,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) unsigned int i; cpumask_check(cpu); + check_hotplug_safe_cpumask(mask); + for_each_cpu(i, mask) if (i != cpu) break;
next prev parent reply other threads:[~2013-06-25 20:26 UTC|newest] Thread overview: 130+ messages / expand[flat|nested] mbox.gz Atom feed top 2013-06-25 20:25 [PATCH v2 00/45] CPU hotplug: stop_machine()-free CPU hotplug, part 1 Srivatsa S. Bhat 2013-06-25 20:25 ` Srivatsa S. Bhat 2013-06-25 20:25 ` [PATCH v2 01/45] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat 2013-06-25 20:25 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 02/45] CPU hotplug: Clarify the usage of different synchronization APIs Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 03/45] Documentation, CPU hotplug: Recommend usage of get/put_online_cpus_atomic() Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 04/45] CPU hotplug: Add infrastructure to check lacking hotplug synchronization Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 05/45] CPU hotplug: Protect set_cpu_online() to avoid false-positives Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat [this message] 2013-06-25 20:26 ` [PATCH v2 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 07/45] CPU hotplug: Expose the new debug config option Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:26 ` [PATCH v2 08/45] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat 2013-06-25 20:26 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 09/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 10/45] sched/core: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 11/45] migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 12/45] sched/fair: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 13/45] timer: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 14/45] sched/rt: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 20:27 ` [PATCH v2 15/45] rcu: " Srivatsa S. Bhat 2013-06-25 20:27 ` Srivatsa S. Bhat 2013-06-25 22:00 ` Paul E. McKenney 2013-06-26 14:09 ` Srivatsa S. Bhat 2013-06-26 14:29 ` David Laight 2013-06-26 14:29 ` David Laight 2013-06-26 14:34 ` Paul E. McKenney 2013-06-26 14:51 ` Steven Rostedt 2013-06-26 14:51 ` Steven Rostedt 2013-06-26 15:21 ` Tejun Heo 2013-06-26 15:33 ` Steven Rostedt 2013-06-26 15:33 ` Steven Rostedt 2013-06-26 17:29 ` Tejun Heo 2013-06-26 18:28 ` Srivatsa S. Bhat 2013-06-26 18:28 ` Srivatsa S. Bhat 2013-06-26 21:34 ` Tejun Heo 2013-06-26 21:34 ` Tejun Heo 2013-06-27 6:53 ` Srivatsa S. Bhat 2013-06-27 6:53 ` Srivatsa S. Bhat 2013-06-26 18:22 ` Srivatsa S. Bhat 2013-06-26 18:22 ` Srivatsa S. Bhat 2013-06-27 8:54 ` David Laight 2013-06-27 8:54 ` David Laight 2013-06-27 10:06 ` Srivatsa S. Bhat 2013-06-26 14:45 ` Paul E. McKenney 2013-06-26 18:18 ` Srivatsa S. Bhat 2013-06-26 14:33 ` Paul E. McKenney 2013-06-25 20:28 ` [PATCH v2 16/45] tick-broadcast: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:28 ` [PATCH v2 17/45] time/clocksource: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:28 ` [PATCH v2 18/45] softirq: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:28 ` [PATCH v2 19/45] irq: " Srivatsa S. Bhat 2013-06-25 20:28 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 20/45] net: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 21/45] block: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 22/45] percpu_counter: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 23/45] infiniband: ehca: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:29 ` [PATCH v2 24/45] [SCSI] fcoe: " Srivatsa S. Bhat 2013-06-25 20:29 ` Srivatsa S. Bhat 2013-06-25 20:30 ` [PATCH v2 25/45] staging/octeon: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:45 ` Greg Kroah-Hartman 2013-06-25 20:45 ` Greg Kroah-Hartman 2013-06-25 20:30 ` [PATCH v2 26/45] x86: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:30 ` [PATCH v2 27/45] perf/x86: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:30 ` [PATCH v2 28/45] KVM: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-26 8:20 ` Paolo Bonzini 2013-06-25 20:30 ` [PATCH v2 29/45] kvm/vmx: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-26 7:46 ` Paolo Bonzini 2013-06-26 8:06 ` Srivatsa S. Bhat 2013-06-26 8:23 ` Paolo Bonzini 2013-06-26 8:23 ` Paolo Bonzini 2013-06-26 8:41 ` Srivatsa S. Bhat 2013-06-26 8:41 ` Srivatsa S. Bhat 2013-06-26 8:57 ` Paolo Bonzini 2013-06-26 8:57 ` Paolo Bonzini 2013-06-25 20:30 ` [PATCH v2 30/45] x86/xen: " Srivatsa S. Bhat 2013-06-25 20:30 ` Srivatsa S. Bhat 2013-06-25 20:31 ` [PATCH v2 31/45] alpha/smp: " Srivatsa S. Bhat 2013-06-25 20:31 ` Srivatsa S. Bhat 2013-06-25 20:31 ` [PATCH v2 32/45] blackfin/smp: " Srivatsa S. Bhat 2013-06-25 20:31 ` Srivatsa S. Bhat 2013-06-25 20:31 ` [PATCH v2 33/45] cris/smp: " Srivatsa S. Bhat 2013-06-25 20:31 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 34/45] hexagon/smp: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 35/45] ia64: irq, perfmon: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 36/45] ia64: smp, tlb: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 37/45] m32r: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-25 20:32 ` [PATCH v2 38/45] MIPS: " Srivatsa S. Bhat 2013-06-25 20:32 ` Srivatsa S. Bhat 2013-06-26 13:39 ` Ralf Baechle 2013-06-26 13:39 ` Ralf Baechle 2013-06-27 7:08 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 39/45] mn10300: " Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 40/45] powerpc, irq: Use GFP_ATOMIC allocations in atomic context Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 42/45] powerpc: Use get/put_online_cpus_atomic() to avoid false-positive warning Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 43/45] sh: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:33 ` [PATCH v2 44/45] sparc: " Srivatsa S. Bhat 2013-06-25 20:33 ` Srivatsa S. Bhat 2013-06-25 20:34 ` [PATCH v2 45/45] tile: " Srivatsa S. Bhat 2013-06-25 20:34 ` Srivatsa S. Bhat
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20130625202641.16593.90767.stgit@srivatsabhat.in.ibm.com \ --to=srivatsa.bhat@linux.vnet.ibm.com \ --cc=akpm@linux-foundation.org \ --cc=alex.shi@intel.com \ --cc=fweisbec@gmail.com \ --cc=js1304@gmail.com \ --cc=kosaki.motohiro@jp.fujitsu.com \ --cc=laijs@cn.fujitsu.com \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-pm@vger.kernel.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mingo@kernel.org \ --cc=namhyung@kernel.org \ --cc=netdev@vger.kernel.org \ --cc=nikunj@linux.vnet.ibm.com \ --cc=oleg@redhat.com \ --cc=paulmck@linux.vnet.ibm.com \ --cc=peterz@infradead.org \ --cc=rostedt@goodmis.org \ --cc=rusty@rustcorp.com.au \ --cc=sbw@mit.edu \ --cc=tglx@linutronix.de \ --cc=tj@kernel.org \ --cc=vincent.guittot@linaro.org \ --cc=walken@google.com \ --cc=wangyun@linux.vnet.ibm.com \ --cc=xiaoguangrong@linux.vnet.ibm.com \ --cc=zhong@linux.vnet.ibm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).