From: Christoph Lameter <cl@linux.com>
To: akpm@linux-foundation.org
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: linux-kernel@vger.kernel.org
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Tejun Heo <tj@kernel.org>
Subject: [rfc: cpuops adv V1 1/8] percpu: generic this_cpu_cmpxchg() and this_cpu_cmpxchg_double support
Date: Thu, 02 Dec 2010 15:53:41 -0600 [thread overview]
Message-ID: <20101202215359.439960472@linux.com> (raw)
In-Reply-To: 20101202215340.562309713@linux.com
[-- Attachment #1: this_cpu_cmpxchg --]
[-- Type: text/plain, Size: 10952 bytes --]
Provide arch code to create the (local atomic) instructions.
V2->V3:
- Clean up some parameters
- Provide implementation of irqsafe_cpu_cmpxchg
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/percpu.h | 258 ++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 257 insertions(+), 1 deletion(-)
Index: linux-2.6/include/linux/percpu.h
===================================================================
--- linux-2.6.orig/include/linux/percpu.h 2010-11-30 14:06:56.000000000 -0600
+++ linux-2.6/include/linux/percpu.h 2010-11-30 14:21:43.000000000 -0600
@@ -259,6 +259,22 @@ extern void __bad_size_call_parameter(vo
ret__; \
})
+/* Special handling for cmpxchg_double */
+#define __pcpu_size_call_return_int(stem, pcp, ...) \
+({ \
+ int ret__; \
+ __verify_pcpu_ptr(pcp); \
+ switch(sizeof(*pcp)) { \
+ case 1: ret__ = stem##1(pcp, __VA_ARGS__);break; \
+ case 2: ret__ = stem##2(pcp, __VA_ARGS__);break; \
+ case 4: ret__ = stem##4(pcp, __VA_ARGS__);break; \
+ case 8: ret__ = stem##8(pcp, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ ret__; \
+})
+
#define __pcpu_size_call(stem, variable, ...) \
do { \
__verify_pcpu_ptr(&(variable)); \
@@ -322,6 +338,185 @@ do { \
# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
#endif
+#define __this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval) __pcpu_size_call_return2(this_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval) __pcpu_size_call_return2(\
+ __this_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
+/*
+ * cmpxchg_double replaces two adjacent scalars at once. The first parameter
+ * passed is a percpu pointer, not a scalar like the other this_cpu
+ * operations. This is so because the function operates on two scalars
+ * (must be of same size). A truth value is returned to indicate success or
+ * failure (since a double register result is difficult to handle).
+ * There is very limited hardware support for these operations. So only certain
+ * sizes may work.
+ */
+#define __this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2) \
+({ \
+ typeof(oval2) * __percpu pcp2 = (typeof(oval2) *)((pcp) + 1); \
+ int __ret = 0; \
+ if (__this_cpu_read(*pcp) == (oval1) && \
+ __this_cpu_read(*pcp2) == (oval2)) { \
+ __this_cpu_write(*pcp, (nval1)); \
+ __this_cpu_write(*pcp2, (nval2)); \
+ __ret = 1; \
+ } \
+ (__ret); \
+})
+
+#ifndef __this_cpu_cmpxchg_double
+# ifndef __this_cpu_cmpxchg_double_1
+# define __this_cpu_cmpxchg_double_1(pcp, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_2
+# define __this_cpu_cmpxchg_double_2(pcp, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_4
+# define __this_cpu_cmpxchg_double_4(pcp, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_8
+# define __this_cpu_cmpxchg_double_8(pcp, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# define __this_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2) \
+ __pcpu_size_call_return_int(__this_cpu_cmpxchg_double_, (pcp), \
+ oval1, oval2, nval1, nval2)
+#endif
+
+#define _this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2) \
+({ \
+ int ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_generic_cmpxchg_double(pcp, \
+ oval1, oval2, nval1, nval2); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_cmpxchg_double
+# ifndef this_cpu_cmpxchg_double_1
+# define this_cpu_cmpxchg_double_1(pcp, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_2
+# define this_cpu_cmpxchg_double_2(pcp, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_4
+# define this_cpu_cmpxchg_double_4(pcp, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_8
+# define this_cpu_cmpxchg_double_8(pcp, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# define this_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2) \
+ __pcpu_size_call_return_int(this_cpu_cmpxchg_double_, (pcp), \
+ oval1, oval2, nval1, nval2)
+#endif
+
+
+
+
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
preempt_disable(); \
@@ -610,7 +805,7 @@ do { \
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
* processors (which one gets when using regular atomic operations)
- . They are guaranteed to be atomic vs. local interrupts and
+ * They are guaranteed to be atomic vs. local interrupts and
* preemption only.
*/
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
@@ -697,4 +892,65 @@ do { \
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
#endif
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ local_irq_restore(flags); \
+ ret__; \
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval) __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
+#define irqsafe_generic_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2) \
+({ \
+ int ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ ret__ = __this_cpu_generic_cmpxchg_double(pcp, \
+ oval1, oval2, nval1, nval2); \
+ local_irq_restore(flags); \
+ ret__; \
+})
+
+#ifndef irqsafe_cpu_cmpxchg_double
+# ifndef irqsafe_cpu_cmpxchg_double_1
+# define irqsafe_cpu_cmpxchg_double_1(pcp, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_2
+# define irqsafe_cpu_cmpxchg_double_2(pcp, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_4
+# define irqsafe_cpu_cmpxchg_double_4(pcp, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_8
+# define irqsafe_cpu_cmpxchg_double_8(pcp, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2)
+# endif
+# define irqsafe_cpu_cmpxchg_double(pcp, oval1, oval2, nval1, nval2) \
+ __pcpu_size_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp), \
+ oval1, oval2, nval1, nval2)
+#endif
+
#endif /* __LINUX_PERCPU_H */
next prev parent reply other threads:[~2010-12-02 21:54 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-12-02 21:53 [rfc: cpuops adv V1 0/8] Cmpxchg and xchg support for cpu ops Christoph Lameter
2010-12-02 21:53 ` Christoph Lameter [this message]
2010-12-02 21:53 ` [rfc: cpuops adv V1 2/8] --- include/linux/percpu.h | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) Christoph Lameter
2010-12-02 22:06 ` [rfc: cpuops adv V1 2/8] Fallback to atomic xchg, cmpxchg Christoph Lameter
2010-12-02 21:53 ` [rfc: cpuops adv V1 3/8] x86: this_cpu_cmpxchg and this_cpu_cmpxchg_double operations Christoph Lameter
2010-12-06 17:14 ` Avi Kivity
2010-12-06 17:35 ` Christoph Lameter
2010-12-07 9:31 ` Avi Kivity
2010-12-02 21:53 ` [rfc: cpuops adv V1 4/8] irq_work: Use per cpu atomics instead of regular atomics Christoph Lameter
2010-12-02 21:53 ` [rfc: cpuops adv V1 5/8] vmstat: User per cpu atomics to avoid interrupt disable / enable Christoph Lameter
2010-12-02 21:53 ` [rfc: cpuops adv V1 6/8] Lockless (and preemptless) fastpaths for slub Christoph Lameter
2010-12-02 21:53 ` [rfc: cpuops adv V1 7/8] slub: Add PageSlubPartial Christoph Lameter
2010-12-02 21:53 ` [rfc: cpuops adv V1 8/8] slub: [RFC] Partially lockless freepath slowpath Christoph Lameter
2010-12-04 19:29 ` [rfc: cpuops adv V1 0/8] Cmpxchg and xchg support for cpu ops Pekka Enberg
2010-12-04 19:31 ` Tejun Heo
2010-12-06 15:52 ` Christoph Lameter
2010-12-06 15:51 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20101202215359.439960472@linux.com \
--to=cl@linux.com \
--cc=akpm@linux-foundation.org \
--cc=penberg@cs.helsinki.fi \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox