From: Christoph Lameter <cl@linux.com>
To: Tejun Heo <tj@kernel.org>
Cc: akpm@linuxfoundation.org, rostedt@goodmis.org,
linux-kernel@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH 6/6] percpu: Add preemption checks to __this_cpu ops
Date: Tue, 15 Oct 2013 12:47:28 -0500 [thread overview]
Message-ID: <20131015174747.813545438@linux.com> (raw)
In-Reply-To: 20131015174722.615394057@linux.com
[-- Attachment #1: preempt_check_this_cpu_ops --]
[-- Type: text/plain, Size: 6721 bytes --]
V3->V4:
- Drop CONFIG_DEBUG_THIS_CPU_OPERATIONS
- Add support for logging the exact operation that caused the issue.
We define a check function in order to avoid trouble with the
include files. Then the higher level __this_cpu macros are
modified to invoke the check before __this_cpu operation
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/percpu.h
===================================================================
--- linux.orig/include/linux/percpu.h 2013-10-10 11:30:17.111743444 -0500
+++ linux/include/linux/percpu.h 2013-10-10 10:30:26.817316787 -0500
@@ -175,6 +175,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(v
extern void __bad_size_call_parameter(void);
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void __this_cpu_preempt_check(const char *op);
+#else
+static inline void __this_cpu_preempt_check(const char *op) { }
+#endif
+
#define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
@@ -538,7 +544,8 @@ do { \
# ifndef __this_cpu_read_8
# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
# endif
-# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+# define __this_cpu_read(pcp) \
+ (__this_cpu_preempt_check("read"),__pcpu_size_call_return(__this_cpu_read_, (pcp)))
#endif
#define __this_cpu_generic_to_op(pcp, val, op) \
@@ -559,7 +566,12 @@ do { \
# ifndef __this_cpu_write_8
# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
# endif
-# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+
+# define __this_cpu_write(pcp, val) \
+do { __this_cpu_preempt_check("write"); \
+ __pcpu_size_call(__this_cpu_write_, (pcp), (val)); \
+} while (0)
+
#endif
#ifndef __this_cpu_add
@@ -575,7 +587,12 @@ do { \
# ifndef __this_cpu_add_8
# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
# endif
-# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+
+# define __this_cpu_add(pcp, val) \
+do { __this_cpu_preempt_check("add"); \
+ __pcpu_size_call(__this_cpu_add_, (pcp), (val)); \
+} while (0)
+
#endif
#ifndef __this_cpu_sub
@@ -603,7 +620,12 @@ do { \
# ifndef __this_cpu_and_8
# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
# endif
-# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+
+# define __this_cpu_and(pcp, val) \
+do { __this_cpu_preempt_check("and"); \
+ __pcpu_size_call(__this_cpu_and_, (pcp), (val)); \
+} while (0)
+
#endif
#ifndef __this_cpu_or
@@ -619,7 +641,12 @@ do { \
# ifndef __this_cpu_or_8
# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
# endif
-# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+
+# define __this_cpu_or(pcp, val) \
+do { __this_cpu_preempt_check("or"); \
+ __pcpu_size_call(__this_cpu_or_, (pcp), (val)); \
+} while (0)
+
#endif
#ifndef __this_cpu_xor
@@ -635,7 +662,12 @@ do { \
# ifndef __this_cpu_xor_8
# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
# endif
-# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+
+# define __this_cpu_xor(pcp, val) \
+do { __this_cpu_preempt_check("xor"); \
+ __pcpu_size_call(__this_cpu_xor_, (pcp), (val)); \
+} while (0)
+
#endif
#define __this_cpu_generic_add_return(pcp, val) \
@@ -658,7 +690,7 @@ do { \
# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
# endif
# define __this_cpu_add_return(pcp, val) \
- __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
+ (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val))
#endif
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
@@ -686,7 +718,7 @@ do { \
# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# define __this_cpu_xchg(pcp, nval) \
- __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+ (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval))
#endif
#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
@@ -712,7 +744,7 @@ do { \
# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# define __this_cpu_cmpxchg(pcp, oval, nval) \
- __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+ (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval))
#endif
#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
@@ -745,7 +777,7 @@ do { \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+ (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
#endif
/*
Index: linux/lib/smp_processor_id.c
===================================================================
--- linux.orig/lib/smp_processor_id.c 2013-10-10 11:30:17.111743444 -0500
+++ linux/lib/smp_processor_id.c 2013-10-10 10:32:16.046357108 -0500
@@ -7,7 +7,7 @@
#include <linux/kallsyms.h>
#include <linux/sched.h>
-notrace unsigned int debug_smp_processor_id(void)
+notrace static unsigned int check_preemption_disabled(char *what)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
- "code: %s/%d\n",
- preempt_count() - 1, current->comm, current->pid);
+ printk(KERN_ERR "%s in preemptible [%08x] code: %s/%d\n",
+ what, preempt_count() - 1, current->comm, current->pid);
+
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
@@ -51,5 +51,18 @@ out:
return this_cpu;
}
+notrace unsigned int debug_smp_processor_id(void)
+{
+ return check_preemption_disabled("BUG: using smp_processor_id()");
+}
EXPORT_SYMBOL(debug_smp_processor_id);
+notrace void __this_cpu_preempt_check(const char *op)
+{
+ char text[40];
+
+ snprintf(text, sizeof(text), "__this_cpu_%s operation", op);
+ check_preemption_disabled(text);
+}
+EXPORT_SYMBOL(__this_cpu_preempt_check);
+
next prev parent reply other threads:[~2013-10-15 17:56 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-15 17:47 [PATCH 0/6] percpu: Implement Preemption checks for __this_cpu operations V4b Christoph Lameter
2013-10-15 17:47 ` [PATCH 1/6] net: ip4_datagram_connect: Use correct form of statistics update Christoph Lameter
2013-10-15 18:36 ` Eric Dumazet
2013-10-16 6:09 ` Ingo Molnar
2013-10-16 8:35 ` Peter Zijlstra
2013-10-16 9:14 ` Eric Dumazet
2013-10-16 9:26 ` Ingo Molnar
2013-10-16 14:27 ` Christoph Lameter
2013-10-16 14:37 ` Eric Dumazet
2013-10-15 17:47 ` [PATCH 2/6] percpu: Add raw_cpu_ops Christoph Lameter
2013-10-15 17:47 ` [PATCH 3/6] mm: Use raw_cpu ops for determining current NUMA node Christoph Lameter
2013-10-16 8:38 ` Peter Zijlstra
2013-10-16 14:22 ` Christoph Lameter
2013-10-15 17:47 ` [PATCH 4/6] Use raw_cpu_write for initialization of per cpu refcount Christoph Lameter
2013-10-16 8:43 ` Peter Zijlstra
2013-10-15 17:47 ` [PATCH 5/6] net: __this_cpu_inc in route.c Christoph Lameter
2013-10-16 8:46 ` Peter Zijlstra
2013-10-16 9:22 ` Eric Dumazet
2013-10-16 10:25 ` Peter Zijlstra
2013-10-16 15:07 ` Christoph Lameter
2013-10-15 17:47 ` Christoph Lameter [this message]
2013-10-16 8:49 ` [PATCH 6/6] percpu: Add preemption checks to __this_cpu ops Peter Zijlstra
2013-10-16 15:09 ` Christoph Lameter
2013-10-16 15:36 ` Peter Zijlstra
2013-10-16 15:55 ` Christoph Lameter
2013-10-16 16:25 ` Peter Zijlstra
2013-10-16 16:52 ` Steven Rostedt
2013-10-16 17:11 ` Peter Zijlstra
2013-10-16 17:39 ` Steven Rostedt
2013-10-16 18:38 ` Peter Zijlstra
2013-10-17 19:22 ` Christoph Lameter
2013-10-17 21:13 ` Peter Zijlstra
[not found] <20131011175518.634285474@linux.com>
2013-10-11 17:54 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20131015174747.813545438@linux.com \
--to=cl@linux.com \
--cc=akpm@linuxfoundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox