public inbox for linux-riscv@lists.infradead.org
 help / color / mirror / Atom feed
From: guoren@kernel.org
To: palmer@rivosinc.com, heiko@sntech.de, hch@infradead.org,
	arnd@arndb.de, peterz@infradead.org, will@kernel.org,
	boqun.feng@gmail.com, longman@redhat.com, shorne@gmail.com,
	conor.dooley@microchip.com
Cc: linux-csky@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-riscv@lists.infradead.org,
	Guo Ren <guoren@linux.alibaba.com>, Guo Ren <guoren@kernel.org>,
	Jonas Bonn <jonas@southpole.se>,
	Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Subject: [PATCH V9 13/15] openrisc: cmpxchg: Cleanup unnecessary codes
Date: Mon,  8 Aug 2022 03:13:16 -0400	[thread overview]
Message-ID: <20220808071318.3335746-14-guoren@kernel.org> (raw)
In-Reply-To: <20220808071318.3335746-1-guoren@kernel.org>

From: Guo Ren <guoren@linux.alibaba.com>

Remove cmpxchg_small and xchg_small, because it's unnecessary now, and
they break the forward guarantee for atomic operations.

Also Remove unnecessary __HAVE_ARCH_CMPXCHG.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
---
 arch/openrisc/include/asm/cmpxchg.h | 167 +++++++++-------------------
 1 file changed, 50 insertions(+), 117 deletions(-)

diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index 79fd16162ccb..df83b33b5882 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -20,10 +20,8 @@
 #include  <linux/compiler.h>
 #include  <linux/types.h>
 
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long cmpxchg_u32(volatile void *ptr,
-		unsigned long old, unsigned long new)
+/* cmpxchg */
+static inline u32 cmpxchg32(volatile void *ptr, u32 old, u32 new)
 {
 	__asm__ __volatile__(
 		"1:	l.lwa %0, 0(%1)		\n"
@@ -41,8 +39,33 @@ static inline unsigned long cmpxchg_u32(volatile void *ptr,
 	return old;
 }
 
-static inline unsigned long xchg_u32(volatile void *ptr,
-		unsigned long val)
+#define __cmpxchg(ptr, old, new, size)					\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(*(ptr)) __old = (old);				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __ret;					\
+	switch (size) {							\
+	case 4:								\
+		__ret = (__typeof__(*(ptr)))				\
+			cmpxchg32(__ptr, (u32)__old, (u32)__new);	\
+		break;							\
+	default:							\
+		BUILD_BUG();						\
+	}								\
+	__ret;								\
+})
+
+#define arch_cmpxchg(ptr, o, n)						\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg((ptr),				\
+				       _o_, _n_, sizeof(*(ptr)));	\
+})
+
+/* xchg */
+static inline u32 xchg32(volatile void *ptr, u32 val)
 {
 	__asm__ __volatile__(
 		"1:	l.lwa %0, 0(%1)		\n"
@@ -56,116 +79,26 @@ static inline unsigned long xchg_u32(volatile void *ptr,
 	return val;
 }
 
-static inline u32 cmpxchg_small(volatile void *ptr, u32 old, u32 new,
-				int size)
-{
-	int off = (unsigned long)ptr % sizeof(u32);
-	volatile u32 *p = ptr - off;
-#ifdef __BIG_ENDIAN
-	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
-#else
-	int bitoff = off * BITS_PER_BYTE;
-#endif
-	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
-	u32 load32, old32, new32;
-	u32 ret;
-
-	load32 = READ_ONCE(*p);
-
-	while (true) {
-		ret = (load32 & bitmask) >> bitoff;
-		if (old != ret)
-			return ret;
-
-		old32 = (load32 & ~bitmask) | (old << bitoff);
-		new32 = (load32 & ~bitmask) | (new << bitoff);
-
-		/* Do 32 bit cmpxchg */
-		load32 = cmpxchg_u32(p, old32, new32);
-		if (load32 == old32)
-			return old;
-	}
-}
-
-/* xchg */
-
-static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
-{
-	int off = (unsigned long)ptr % sizeof(u32);
-	volatile u32 *p = ptr - off;
-#ifdef __BIG_ENDIAN
-	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
-#else
-	int bitoff = off * BITS_PER_BYTE;
-#endif
-	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
-	u32 oldv, newv;
-	u32 ret;
-
-	do {
-		oldv = READ_ONCE(*p);
-		ret = (oldv & bitmask) >> bitoff;
-		newv = (oldv & ~bitmask) | (x << bitoff);
-	} while (cmpxchg_u32(p, oldv, newv) != oldv);
-
-	return ret;
-}
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern unsigned long __cmpxchg_called_with_bad_pointer(void)
-	__compiletime_error("Bad argument size for cmpxchg");
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-		unsigned long new, int size)
-{
-	switch (size) {
-	case 1:
-	case 2:
-		return cmpxchg_small(ptr, old, new, size);
-	case 4:
-		return cmpxchg_u32(ptr, old, new);
-	default:
-		return __cmpxchg_called_with_bad_pointer();
-	}
-}
-
-#define arch_cmpxchg(ptr, o, n)						\
-	({								\
-		(__typeof__(*(ptr))) __cmpxchg((ptr),			\
-					       (unsigned long)(o),	\
-					       (unsigned long)(n),	\
-					       sizeof(*(ptr)));		\
-	})
-
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalidly-sized xchg().
- */
-extern unsigned long __xchg_called_with_bad_pointer(void)
-	__compiletime_error("Bad argument size for xchg");
-
-static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
-		int size)
-{
-	switch (size) {
-	case 1:
-	case 2:
-		return xchg_small(ptr, with, size);
-	case 4:
-		return xchg_u32(ptr, with);
-	default:
-		return __xchg_called_with_bad_pointer();
-	}
-}
-
-#define arch_xchg(ptr, with) 						\
-	({								\
-		(__typeof__(*(ptr))) __xchg((ptr),			\
-					    (unsigned long)(with),	\
-					    sizeof(*(ptr)));		\
-	})
+#define __xchg(ptr, new, size)						\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(new) __new = (new);					\
+	__typeof__(*(ptr)) __ret;					\
+	switch (size) {							\
+	case 4:								\
+		__ret = (__typeof__(*(ptr)))				\
+			xchg32(__ptr, (u32)__new);			\
+		break;							\
+	default:							\
+		BUILD_BUG();						\
+	}								\
+	__ret;								\
+})
+
+#define arch_xchg(ptr, x)						\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr)));	\
+})
 
 #endif /* __ASM_OPENRISC_CMPXCHG_H */
-- 
2.36.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  parent reply	other threads:[~2022-08-08  7:15 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-08  7:13 [PATCH V9 00/15] arch: Add qspinlock support and atomic cleanup guoren
2022-08-08  7:13 ` [PATCH V9 01/15] asm-generic: ticket-lock: Remove unnecessary atomic_read guoren
2022-08-08  7:13 ` [PATCH V9 02/15] asm-generic: ticket-lock: Use the same struct definitions with qspinlock guoren
2022-08-08  7:13 ` [PATCH V9 03/15] asm-generic: ticket-lock: Move into ticket_spinlock.h guoren
2022-08-08  7:13 ` [PATCH V9 04/15] asm-generic: ticket-lock: Keep ticket-lock the same semantic with qspinlock guoren
2022-08-08  7:13 ` [PATCH V9 05/15] asm-generic: spinlock: Add queued spinlock support in common header guoren
2022-08-08  7:13 ` [PATCH V9 06/15] riscv: atomic: Clean up unnecessary acquire and release definitions guoren
2022-08-08  7:13 ` [PATCH V9 07/15] riscv: cmpxchg: Remove xchg32 and xchg64 guoren
2022-08-08  7:13 ` [PATCH V9 08/15] riscv: cmpxchg: Forbid arch_cmpxchg64 for 32-bit guoren
2022-08-08  7:13 ` [PATCH V9 09/15] riscv: cmpxchg: Optimize cmpxchg64 guoren
2022-08-08  7:13 ` [PATCH V9 10/15] riscv: Enable ARCH_INLINE_READ*/WRITE*/SPIN* guoren
2022-08-08  7:13 ` [PATCH V9 11/15] riscv: Add qspinlock support guoren
2022-08-08  7:13 ` [PATCH V9 12/15] riscv: Add combo spinlock support guoren
2022-08-08  7:13 ` guoren [this message]
2022-08-08  7:13 ` [PATCH V9 14/15] openrisc: Move from ticket-lock to qspinlock guoren
2022-08-08  7:13 ` [PATCH V9 15/15] csky: spinlock: Use the generic header files guoren
2022-08-08  7:25 ` [PATCH V9 00/15] arch: Add qspinlock support and atomic cleanup Guo Ren

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220808071318.3335746-14-guoren@kernel.org \
    --to=guoren@kernel.org \
    --cc=arnd@arndb.de \
    --cc=boqun.feng@gmail.com \
    --cc=conor.dooley@microchip.com \
    --cc=guoren@linux.alibaba.com \
    --cc=hch@infradead.org \
    --cc=heiko@sntech.de \
    --cc=jonas@southpole.se \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=longman@redhat.com \
    --cc=palmer@rivosinc.com \
    --cc=peterz@infradead.org \
    --cc=shorne@gmail.com \
    --cc=stefan.kristiansson@saunalahti.fi \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox