public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Lameter <cl@linux.com>
To: akpm@linux-foundation.org
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: linux-kernel@vger.kernel.org
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Tejun Heo <tj@kernel.org>
Subject: [thiscpuops upgrade 09/10] x86: this_cpu_cmpxchg and this_cpu_cmpxchg_double operations
Date: Tue, 23 Nov 2010 17:51:48 -0600	[thread overview]
Message-ID: <20101123235201.146063796@linux.com> (raw)
In-Reply-To: 20101123235139.908255844@linux.com

[-- Attachment #1: this_cpu_cmpxchg_x86 --]
[-- Type: text/plain, Size: 5059 bytes --]

Provide support as far as the hardware capabilities of the x86 cpus
allow.

Signed-off-by: Christoph Lameter <cl@linux.com>

---
 arch/x86/include/asm/percpu.h |  100 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 100 insertions(+)

Index: linux-2.6/arch/x86/include/asm/percpu.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/percpu.h	2010-11-23 16:50:58.000000000 -0600
+++ linux-2.6/arch/x86/include/asm/percpu.h	2010-11-23 16:56:48.000000000 -0600
@@ -216,6 +216,41 @@ do {									\
 	pfo_ret__ + (val);						\
 })
 
+#define percpu_cmpxchg_op(var, oval, nval)				\
+({									\
+	typeof(var) __ret;						\
+	typeof(var) __old = (oval);					\
+	typeof(var) __new = (nval);					\
+	switch (sizeof(var)) {						\
+	case 1:								\
+		asm("cmpxchgb %2, "__percpu_arg(1)			\
+			    : "=a" (__ret), "+m" (&var)			\
+			    : "q" (__new), "0" (__old)			\
+			    : "memory");				\
+		break;							\
+	case 2:								\
+		asm("cmpxchgw %2, "__percpu_arg(1)			\
+			    : "=a" (__ret), "+m" (&var)			\
+			    : "r" (__new), "0" (__old)			\
+			    : "memory");				\
+		break;							\
+	case 4:								\
+		asm("cmpxchgl %2, "__percpu_arg(1)			\
+			    : "=a" (__ret), "+m" (&var)			\
+			    : "r" (__new), "0" (__old)			\
+			    : "memory");				\
+		break;							\
+	case 8:								\
+		asm("cmpxchgq %2, "__percpu_arg(1)			\
+			    : "=a" (__ret), "+m" (&var)			\
+			    : "r" (__new), "0" (__old)			\
+			    : "memory");				\
+		break;							\
+	default: __bad_percpu_size();					\
+	}								\
+	__ret;								\
+})
+
 #define percpu_from_op(op, var, constraint)		\
 ({							\
 	typeof(var) pfo_ret__;				\
@@ -346,7 +381,52 @@ do {									\
 #define this_cpu_add_return_1(pcp, val)		percpu_add_return_op((pcp), val)
 #define this_cpu_add_return_2(pcp, val)		percpu_add_return_op((pcp), val)
 #define this_cpu_add_return_4(pcp, val)		percpu_add_return_op((pcp), val)
+
+#define __this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+#define __this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+#define __this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+#define this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+
+#define percpu_cmpxchg8b_double(pcp, o1, o2, n1, n2)			\
+({									\
+	char __ret;							\
+	typeof(o1) __o1 = o1;						\
+	typeof(o1) __n1 = n1;						\
+	typeof(o2) __o2 = o2;						\
+	typeof(o2) __n2 = n2;						\
+	asm("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t"		\
+		    : "=a"(__ret), "=m" (*pcp)				\
+		    :  "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2));	\
+	__ret;								\
+})
+
+#define __this_cpu_cmpxchg_double_4(pcp, o1, o2, n1, n2) percpu_cmpxchg8b_double((pcp), o1, o2, n1, n2)
+#define this_cpu_cmpxchg_double_4(pcp, o1, o2, n1, n2)	percpu_cmpxchg8b_double((pcp), o1, o2, n1, n2)
+#define irqsafe_cmpxchg_double_4(pcp, o1, o2, n1, n2)	percpu_cmpxchg8b_double((pcp), o1, o2, n1, n2)
+
+#ifndef CONFIG_X86_64
+
+/* We can support a 8 byte cmpxchg with a special instruction on 32 bit */
+#define __this_cpu_cmpxchg_8(pcp, oval, nval)				\
+({									\
+	typeof(var) __ret;						\
+	typeof(var) __old = (oval);					\
+	typeof(var) __new = (nval);					\
+	asm("cmpxchg8b %2, "__percpu_arg(1)				\
+	    : "=A" (__ret), "+m" (&pcp)					\
+	    : "b" (((u32)new), "c" ((u32)(new >> 32)),  "0" (__old)	\
+	    : "memory");						\
+	__ret;								\
+})
+
+#define this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_cmpxchg_8(pcp, oval, nval)
+#define irqsafe_cmpxchg_8(pcp, oval, nval)	__this_cpu_cmpxchg_8(pcp, oval, nval)
+
+#endif
 #endif
+
 /*
  * Per cpu atomic 64 bit operations are only available under 64 bit.
  * 32 bit must fall back to generic operations.
@@ -374,6 +454,26 @@ do {									\
 #define __this_cpu_add_return_8(pcp, val)	percpu_add_return_op((pcp), val)
 #define this_cpu_add_return_8(pcp, val)	percpu_add_return_op((pcp), val)
 
+#define __this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op((pcp), oval, nval)
+
+#define percpu_cmpxchg16b(pcp, o1, o2, n1, n2)				\
+({									\
+	char __ret;							\
+	typeof(o1) __o1 = o1;						\
+	typeof(o1) __n1 = n1;						\
+	typeof(o2) __o2 = o2;						\
+	typeof(o2) __n2 = n2;						\
+	asm("cmpxchg16b "__percpu_arg(1)"\n\tsetz %0\n\t"		\
+		    : "=a"(__ret), "=m" (*pcp)				\
+		    :  "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2));	\
+	__ret;								\
+})
+
+#define __this_cpu_cmpxchg_double_8(pcp, o1, o2, n1, n2) percpu_cmpxchg16b((pcp), o1, o2, n1, n2)
+#define this_cpu_cmpxchg_double_8(pcp, o1, o2, n1, n2)	percpu_cmpxchg16b((pcp), o1, o2, n1, n2)
+#define irqsafe_cmpxchg_double_8(pcp, o1, o2, n1, n2)	percpu_cmpxchg16b((pcp), o1, o2, n1, n2)
+
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */


  parent reply	other threads:[~2010-11-23 23:52 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-11-23 23:51 [thiscpuops upgrade 00/10] Upgrade of this_cpu_ops Christoph Lameter
2010-11-23 23:51 ` [thiscpuops upgrade 01/10] percpucounter: Optimize __percpu_counter_add a bit through the use of this_cpu() options Christoph Lameter
2010-11-24  7:07   ` Pekka Enberg
2010-11-26 15:43   ` Tejun Heo
2010-11-23 23:51 ` [thiscpuops upgrade 02/10] vmstat: Optimize zone counter modifications through the use of this cpu operations Christoph Lameter
2010-11-26 16:25   ` Tejun Heo
2010-11-23 23:51 ` [thiscpuops upgrade 03/10] percpu: Generic support for this_cpu_add,sub,dec,inc_return Christoph Lameter
2010-11-26 16:31   ` Tejun Heo
2010-11-26 16:37     ` Christoph Lameter
2010-11-26 16:39       ` Tejun Heo
2010-11-23 23:51 ` [thiscpuops upgrade 04/10] x86: Support " Christoph Lameter
2010-11-26 16:33   ` Tejun Heo
2010-11-23 23:51 ` [thiscpuops upgrade 05/10] x86: Use this_cpu_inc_return for nmi counter Christoph Lameter
2010-11-26 16:35   ` Tejun Heo
2010-11-26 17:02     ` Christoph Lameter
2010-11-26 17:05       ` Tejun Heo
2010-11-23 23:51 ` [thiscpuops upgrade 06/10] vmstat: Use this_cpu_inc_return for vm statistics Christoph Lameter
2010-11-23 23:51 ` [thiscpuops upgrade 07/10] highmem: Use this_cpu_xx_return() operations Christoph Lameter
2010-11-23 23:51 ` [thiscpuops upgrade 08/10] percpu: generic this_cpu_cmpxchg() and this_cpu_cmpxchg_double support Christoph Lameter
2010-11-26 16:51   ` Tejun Heo
2010-11-26 16:56     ` Eric Dumazet
2010-11-26 16:58       ` Tejun Heo
2010-11-26 17:01         ` Eric Dumazet
2010-11-26 17:07           ` Tejun Heo
2010-11-26 17:16             ` Eric Dumazet
2010-11-23 23:51 ` Christoph Lameter [this message]
2010-11-24  0:41   ` [thiscpuops upgrade 09/10] x86: this_cpu_cmpxchg and this_cpu_cmpxchg_double operations Eric Dumazet
2010-11-24  3:11     ` Christoph Lameter
2010-11-24  7:05       ` Pekka Enberg
2010-11-24  0:44   ` Mathieu Desnoyers
2010-11-23 23:51 ` [thiscpuops upgrade 10/10] Lockless (and preemptless) fastpaths for slub Christoph Lameter
2010-11-24  0:22   ` Eric Dumazet
2010-11-24  3:13     ` Christoph Lameter
2010-11-24  4:37       ` Christoph Lameter
2010-11-24  1:02   ` Mathieu Desnoyers
2010-11-24  1:05     ` Mathieu Desnoyers
2010-11-24  3:09       ` Christoph Lameter
2010-11-24  7:16   ` Pekka Enberg
2010-11-24 16:17     ` Christoph Lameter
2010-11-24 16:37       ` Pekka Enberg
2010-11-24 16:45         ` Christoph Lameter
2010-11-24 16:47           ` Pekka Enberg
2010-11-24 16:55             ` Christoph Lameter
2010-11-24 19:37       ` Jeremy Fitzhardinge
2010-11-24 19:53         ` Christoph Lameter
2010-11-24 20:01           ` Jeremy Fitzhardinge
2010-11-24 19:56         ` Mathieu Desnoyers
2010-11-24  8:15   ` Peter Zijlstra
2010-11-24 16:14     ` Christoph Lameter
2010-11-24 17:26       ` Peter Zijlstra
2010-11-24 18:08         ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20101123235201.146063796@linux.com \
    --to=cl@linux.com \
    --cc=akpm@linux-foundation.org \
    --cc=penberg@cs.helsinki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox