From: Christoph Lameter <cl@linux.com>
To: Tejun Heo <tj@kernel.org>
Cc: akpm@linux-foundation.org
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: linux-kernel@vger.kernel.org
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Subject: [cpuops cmpxchg V2 1/5] percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
Date: Tue, 14 Dec 2010 10:28:43 -0600 [thread overview]
Message-ID: <20101214162853.057391047@linux.com> (raw)
In-Reply-To: 20101214162842.542421046@linux.com
[-- Attachment #1: cpuops_cmpxchg_generic --]
[-- Type: text/plain, Size: 7164 bytes --]
Generic code to provide new per cpu atomic features
this_cpu_cmpxchg
this_cpu_xchg
Fallback occurs to functions using interrupts disable/enable
to ensure correct per cpu atomicity.
Fallback to regular cmpxchg and xchg is not possible since per cpu atomic
semantics include the guarantee that the current cpus per cpu data is
accessed atomically. Use of regular cmpxchg and xchg requires the
determination of the address of the per cpu data before regular cmpxchg
or xchg which therefore cannot be atomically included in an xchg or
cmpxchg without segment override.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/percpu.h | 142 ++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 135 insertions(+), 7 deletions(-)
Index: linux-2.6/include/linux/percpu.h
===================================================================
--- linux-2.6.orig/include/linux/percpu.h 2010-12-08 13:16:22.000000000 -0600
+++ linux-2.6/include/linux/percpu.h 2010-12-08 14:43:46.000000000 -0600
@@ -242,21 +242,21 @@ extern void __bad_size_call_parameter(vo
#define __pcpu_size_call_return2(stem, pcp, ...) \
({ \
- typeof(pcp) ret__; \
+ typeof(pcp) pscr2_ret__; \
__verify_pcpu_ptr(&(pcp)); \
switch(sizeof(pcp)) { \
- case 1: ret__ = stem##1(pcp, __VA_ARGS__); \
+ case 1: pscr2_ret__ = stem##1(pcp, __VA_ARGS__); \
break; \
- case 2: ret__ = stem##2(pcp, __VA_ARGS__); \
+ case 2: pscr2_ret__ = stem##2(pcp, __VA_ARGS__); \
break; \
- case 4: ret__ = stem##4(pcp, __VA_ARGS__); \
+ case 4: pscr2_ret__ = stem##4(pcp, __VA_ARGS__); \
break; \
- case 8: ret__ = stem##8(pcp, __VA_ARGS__); \
+ case 8: pscr2_ret__ = stem##8(pcp, __VA_ARGS__); \
break; \
default: \
__bad_size_call_parameter();break; \
} \
- ret__; \
+ pscr2_ret__; \
})
#define __pcpu_size_call(stem, variable, ...) \
@@ -322,6 +322,106 @@ do { \
# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
#endif
+#define __this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval) __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval) __pcpu_size_call_return2(\
+ __this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
preempt_disable(); \
@@ -610,7 +710,7 @@ do { \
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
* processors (which one gets when using regular atomic operations)
- . They are guaranteed to be atomic vs. local interrupts and
+ * They are guaranteed to be atomic vs. local interrupts and
* preemption only.
*/
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
@@ -697,4 +797,32 @@ do { \
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
#endif
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ local_irq_restore(flags); \
+ ret__; \
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval) __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
#endif /* __LINUX_PERCPU_H */
next prev parent reply other threads:[~2010-12-14 16:28 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-12-14 16:28 [cpuops cmpxchg V2 0/5] Cmpxchg and xchg operations Christoph Lameter
2010-12-14 16:28 ` Christoph Lameter [this message]
2010-12-17 14:55 ` [cpuops cmpxchg V2 1/5] percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support Tejun Heo
2010-12-14 16:28 ` [cpuops cmpxchg V2 2/5] x86: this_cpu_cmpxchg and this_cpu_xchg operations Christoph Lameter
2010-12-17 15:22 ` Tejun Heo
2010-12-14 16:28 ` [cpuops cmpxchg V2 3/5] irq_work: Use per cpu atomics instead of regular atomics Christoph Lameter
2010-12-15 16:32 ` Tejun Heo
2010-12-15 16:34 ` H. Peter Anvin
2010-12-15 16:50 ` Peter Zijlstra
2010-12-15 17:04 ` Christoph Lameter
2010-12-15 17:18 ` Peter Zijlstra
2010-12-15 17:31 ` H. Peter Anvin
2010-12-15 17:32 ` Christoph Lameter
2010-12-18 15:32 ` Tejun Heo
2010-12-14 16:28 ` [cpuops cmpxchg V2 4/5] vmstat: User per cpu atomics to avoid interrupt disable / enable Christoph Lameter
2010-12-15 16:45 ` Tejun Heo
2010-12-15 17:01 ` Christoph Lameter
2010-12-14 16:28 ` [cpuops cmpxchg V2 5/5] cpuops: Use cmpxchg for xchg to avoid lock semantics Christoph Lameter
2010-12-14 16:35 ` Mathieu Desnoyers
2010-12-14 16:44 ` Eric Dumazet
2010-12-14 16:55 ` Christoph Lameter
2010-12-14 17:00 ` H. Peter Anvin
2010-12-14 17:19 ` Christoph Lameter
2010-12-14 17:22 ` H. Peter Anvin
2010-12-14 17:29 ` Tejun Heo
2010-12-14 17:35 ` Christoph Lameter
2010-12-15 1:06 ` H. Peter Anvin
2010-12-15 16:29 ` Tejun Heo
2010-12-15 16:35 ` H. Peter Anvin
2010-12-15 16:39 ` Tejun Heo
2010-12-16 16:14 ` Tejun Heo
2010-12-16 18:13 ` x86: Use this_cpu_has for thermal_interrupt Christoph Lameter
2010-12-18 15:35 ` Tejun Heo
2010-12-21 0:56 ` H. Peter Anvin
2010-12-30 11:29 ` Tejun Heo
2010-12-30 18:19 ` H. Peter Anvin
2010-12-31 12:43 ` Tejun Heo
2010-12-16 18:14 ` x86: udelay: Use this_cpu_read to avoid address calculation Christoph Lameter
2010-12-16 18:15 ` gameport: use this_cpu_read instead of lookup Christoph Lameter
2010-12-18 15:34 ` Tejun Heo
2010-12-16 18:16 ` acpi throttling: Use this_cpu_has and simplify code Christoph Lameter
2010-12-18 15:50 ` Tejun Heo
2010-12-21 1:52 ` ykzhao
2010-12-21 22:43 ` Christoph Lameter
2010-12-21 4:28 ` Len Brown
2010-12-16 18:19 ` [cpuops cmpxchg V2 5/5] cpuops: Use cmpxchg for xchg to avoid lock semantics H. Peter Anvin
2010-12-16 18:55 ` Tejun Heo
2010-12-16 20:42 ` H. Peter Anvin
2010-12-15 16:47 ` Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20101214162853.057391047@linux.com \
--to=cl@linux.com \
--cc=akpm@linux-foundation.org \
--cc=tj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox