From: Pan Xinhui <xinhui@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Cc: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au,
boqun.feng@gmail.com, peterz@infradead.org,
paulmck@linux.vnet.ibm.com, tglx@linutronix.de
Subject: [PATCH V2] powerpc: Implement {cmp}xchg for u8 and u16
Date: Tue, 19 Apr 2016 14:29:34 +0800 [thread overview]
Message-ID: <5715D04E.9050009@linux.vnet.ibm.com> (raw)
From: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Implement xchg{u8,u16}{local,relaxed}, and
cmpxchg{u8,u16}{,local,acquire,relaxed}.
It works on all ppc.
Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
---
change from V1:
rework totally.
---
arch/powerpc/include/asm/cmpxchg.h | 83 ++++++++++++++++++++++++++++++++++++++
1 file changed, 83 insertions(+)
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 44efe73..79a1f45 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -7,6 +7,37 @@
#include <asm/asm-compat.h>
#include <linux/bug.h>
+#ifdef __BIG_ENDIAN
+#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
+#else
+#define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
+#endif
+
+static __always_inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new);
+
+#define __XCHG_GEN(cmp, type, sfx, u32sfx, skip, v) \
+static __always_inline u32 \
+__##cmp##xchg_##type##sfx(v void *ptr, u32 old, u32 new) \
+{ \
+ int size = sizeof (type); \
+ int off = (unsigned long)ptr % sizeof(u32); \
+ volatile u32 *p = ptr - off; \
+ int bitoff = BITOFF_CAL(size, off); \
+ u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; \
+ u32 oldv, newv; \
+ u32 ret; \
+ do { \
+ oldv = READ_ONCE(*p); \
+ ret = (oldv & bitmask) >> bitoff; \
+ if (skip && ret != old) \
+ break; \
+ newv = (oldv & ~bitmask) | (new << bitoff); \
+ } while (__cmpxchg_u32##u32sfx((v void*)p, oldv, newv) != oldv);\
+ return ret; \
+}
+
/*
* Atomic exchange
*
@@ -14,6 +45,19 @@
* the previous value stored there.
*/
+#define XCHG_GEN(type, sfx, v) \
+ __XCHG_GEN(_, type, sfx, _local, 0, v) \
+static __always_inline u32 __xchg_##type##sfx(v void *p, u32 n) \
+{ \
+ return ___xchg_##type##sfx(p, 0, n); \
+}
+
+XCHG_GEN(u8, _local, volatile);
+XCHG_GEN(u8, _relaxed, );
+XCHG_GEN(u16, _local, volatile);
+XCHG_GEN(u16, _relaxed, );
+#undef XCHG_GEN
+
static __always_inline unsigned long
__xchg_u32_local(volatile void *p, unsigned long val)
{
@@ -88,6 +132,10 @@ static __always_inline unsigned long
__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
+ case 1:
+ return __xchg_u8_local(ptr, x);
+ case 2:
+ return __xchg_u16_local(ptr, x);
case 4:
return __xchg_u32_local(ptr, x);
#ifdef CONFIG_PPC64
@@ -103,6 +151,10 @@ static __always_inline unsigned long
__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
+ case 1:
+ return __xchg_u8_relaxed(ptr, x);
+ case 2:
+ return __xchg_u16_relaxed(ptr, x);
case 4:
return __xchg_u32_relaxed(ptr, x);
#ifdef CONFIG_PPC64
@@ -226,6 +278,21 @@ __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
return prev;
}
+
+#define CMPXCHG_GEN(type, sfx, v) \
+ __XCHG_GEN(cmp, type, sfx, sfx, 1, v)
+
+CMPXCHG_GEN(u8, , volatile);
+CMPXCHG_GEN(u8, _local, volatile);
+CMPXCHG_GEN(u8, _relaxed, );
+CMPXCHG_GEN(u8, _acquire, );
+CMPXCHG_GEN(u16, , volatile);
+CMPXCHG_GEN(u16, _local, volatile);
+CMPXCHG_GEN(u16, _relaxed, );
+CMPXCHG_GEN(u16, _acquire, );
+#undef CMPXCHG_GEN
+#undef __XCHG_GEN
+
#ifdef CONFIG_PPC64
static __always_inline unsigned long
__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
@@ -316,6 +383,10 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
#ifdef CONFIG_PPC64
@@ -332,6 +403,10 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8_local(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_local(ptr, old, new);
case 4:
return __cmpxchg_u32_local(ptr, old, new);
#ifdef CONFIG_PPC64
@@ -348,6 +423,10 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8_relaxed(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_relaxed(ptr, old, new);
case 4:
return __cmpxchg_u32_relaxed(ptr, old, new);
#ifdef CONFIG_PPC64
@@ -364,6 +443,10 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8_acquire(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_acquire(ptr, old, new);
case 4:
return __cmpxchg_u32_acquire(ptr, old, new);
#ifdef CONFIG_PPC64
--
2.4.3
next reply other threads:[~2016-04-19 6:30 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-19 6:29 Pan Xinhui [this message]
2016-04-19 9:18 ` [PATCH V2] powerpc: Implement {cmp}xchg for u8 and u16 Boqun Feng
2016-04-20 3:39 ` Pan Xinhui
2016-04-20 13:24 ` [PATCH V3] " Pan Xinhui
2016-04-20 14:24 ` Peter Zijlstra
2016-04-21 15:35 ` Pan Xinhui
2016-04-21 15:52 ` Boqun Feng
2016-04-22 1:59 ` Pan Xinhui
2016-04-22 3:16 ` Boqun Feng
2016-04-21 16:13 ` Peter Zijlstra
2016-04-25 10:10 ` Pan Xinhui
2016-04-25 15:37 ` Peter Zijlstra
2016-04-26 11:35 ` Pan Xinhui
2016-04-27 9:16 ` [PATCH V4] " Pan Xinhui
2016-04-27 13:58 ` Boqun Feng
2016-04-27 14:16 ` Boqun Feng
2016-04-27 14:50 ` Boqun Feng
2016-04-27 14:59 ` Boqun Feng
2016-04-28 10:21 ` Pan Xinhui
2016-04-28 7:59 ` Peter Zijlstra
2016-04-28 10:21 ` Pan Xinhui
2016-11-25 0:04 ` [V4] " Michael Ellerman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5715D04E.9050009@linux.vnet.ibm.com \
--to=xinhui@linux.vnet.ibm.com \
--cc=benh@kernel.crashing.org \
--cc=boqun.feng@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mpe@ellerman.id.au \
--cc=paulmck@linux.vnet.ibm.com \
--cc=paulus@samba.org \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).