From: Michael Clark <michaeljclark@mac.com>
To: Linux RISC-V <linux-riscv@lists.infradead.org>
Cc: RISC-V Patches <patches@groups.riscv.org>,
Michael Clark <michaeljclark@mac.com>
Subject: [PATCH 1/3] RISC-V: implement xchg_small and cmpxchg_small for char and short
Date: Mon, 11 Feb 2019 17:38:27 +1300 [thread overview]
Message-ID: <20190211043829.30096-2-michaeljclark@mac.com> (raw)
In-Reply-To: <20190211043829.30096-1-michaeljclark@mac.com>
This patch implements xchg and cmpxchg for char and short. xchg
and cmpxchg on small words are necessary to use the generic
qspinlock and qrwlock which are enabled in a subsequent patch.
The MIPS cmpxchg code is adapted into a macro template to implement
the additional three variants (relaxed|acquire|release)] supported
by the RISC-V memory model.
Cc: RISC-V Patches <patches@groups.riscv.org>
Cc: Linux RISC-V <linux-riscv@lists.infradead.org>
Signed-off-by: Michael Clark <michaeljclark@mac.com>
---
arch/riscv/include/asm/cmpxchg.h | 54 ++++++++++++++
arch/riscv/kernel/Makefile | 1 +
arch/riscv/kernel/cmpxchg.c | 118 +++++++++++++++++++++++++++++++
3 files changed, 173 insertions(+)
create mode 100644 arch/riscv/kernel/cmpxchg.c
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index c12833f7b6bd..64b3d36e2d6e 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -19,12 +19,34 @@
#include <asm/barrier.h>
#include <asm/fence.h>
+extern unsigned long __xchg_relaxed_small(volatile void *ptr, unsigned long new,
+ unsigned int size);
+extern unsigned long __xchg_acquire_small(volatile void *ptr, unsigned long new,
+ unsigned int size);
+extern unsigned long __xchg_release_small(volatile void *ptr, unsigned long new,
+ unsigned int size);
+extern unsigned long __xchg_small(volatile void *ptr, unsigned long new,
+ unsigned int size);
+
+extern unsigned long __cmpxchg_relaxed_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+extern unsigned long __cmpxchg_acquire_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+extern unsigned long __cmpxchg_release_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+
#define __xchg_relaxed(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__xchg_relaxed_small( \
+ (void*)ptr, (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w %0, %2, %1\n" \
@@ -58,6 +80,10 @@
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__xchg_acquire_small( \
+ (void*)ptr, (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w %0, %2, %1\n" \
@@ -93,6 +119,10 @@
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__xchg_release_small( \
+ (void*)ptr, (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
RISCV_RELEASE_BARRIER \
@@ -128,6 +158,10 @@
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__xchg_small( \
+ (void*)ptr, (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w.aqrl %0, %2, %1\n" \
@@ -179,6 +213,11 @@
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__cmpxchg_relaxed_small( \
+ (void*)__ptr, (unsigned long)__old, \
+ (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
"0: lr.w %0, %2\n" \
@@ -223,6 +262,11 @@
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__cmpxchg_acquire_small( \
+ (void*)__ptr, (unsigned long)__old, \
+ (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
"0: lr.w %0, %2\n" \
@@ -269,6 +313,11 @@
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__cmpxchg_release_small( \
+ (void*)__ptr, (unsigned long)__old, \
+ (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
RISCV_RELEASE_BARRIER \
@@ -315,6 +364,11 @@
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
+ case 1: \
+ case 2: \
+ __ret = (__typeof__(*(ptr)))__cmpxchg_small( \
+ (void*)__ptr, (unsigned long)__old, \
+ (unsigned long)__new, size); \
case 4: \
__asm__ __volatile__ ( \
"0: lr.w %0, %2\n" \
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index f13f7f276639..9f96ba34fd85 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -27,6 +27,7 @@ obj-y += riscv_ksyms.o
obj-y += stacktrace.o
obj-y += vdso.o
obj-y += cacheinfo.o
+obj-y += cmpxchg.o
obj-y += vdso/
CFLAGS_setup.o := -mcmodel=medany
diff --git a/arch/riscv/kernel/cmpxchg.c b/arch/riscv/kernel/cmpxchg.c
new file mode 100644
index 000000000000..6208d81e4461
--- /dev/null
+++ b/arch/riscv/kernel/cmpxchg.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <asm/cmpxchg.h>
+
+#define TEMPLATE_XCGH_SMALL(__func,__op) \
+unsigned long __func(volatile void *ptr, unsigned long new, \
+ unsigned int size) \
+{ \
+ u32 old32, new32, load32, mask; \
+ volatile u32 *ptr32; \
+ unsigned int shift; \
+ \
+ /* Check that ptr is naturally aligned */ \
+ WARN_ON((unsigned long)ptr & (size - 1)); \
+ \
+ /* Mask value to the correct size. */ \
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); \
+ new &= mask; \
+ \
+ /* \
+ * Calculate a shift & mask that corresponds to the value \
+ * we wish to exchange within the naturally aligned 4 byte \
+ * integer that includes it. \
+ */ \
+ shift = (unsigned long)ptr & 0x3; \
+ shift *= BITS_PER_BYTE; \
+ mask <<= shift; \
+ \
+ /* \
+ * Calculate a pointer to the naturally aligned 4 byte \
+ * integer that includes our byte, and load its value. \
+ */ \
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); \
+ load32 = *ptr32; \
+ \
+ do { \
+ old32 = load32; \
+ new32 = (load32 & ~mask) | (new << shift); \
+ load32 = __op(ptr32, old32, new32); \
+ } while (load32 != old32); \
+ \
+ return (load32 & mask) >> shift; \
+}
+
+TEMPLATE_XCGH_SMALL(__xchg_small,cmpxchg)
+TEMPLATE_XCGH_SMALL(__xchg_relaxed_small,cmpxchg_relaxed)
+TEMPLATE_XCGH_SMALL(__xchg_acquire_small,cmpxchg_acquire)
+TEMPLATE_XCGH_SMALL(__xchg_release_small,cmpxchg_release)
+
+#define TEMPLATE_CMPXCGH_SMALL(__func,__op) \
+unsigned long __func(volatile void *ptr, unsigned long old, \
+ unsigned long new, unsigned int size) \
+{ \
+ u32 old32, new32, load32, mask; \
+ volatile u32 *ptr32; \
+ unsigned int shift; \
+ u32 load; \
+ \
+ /* Check that ptr is naturally aligned */ \
+ WARN_ON((unsigned long)ptr & (size - 1)); \
+ \
+ /* Mask inputs to the correct size. */ \
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); \
+ old &= mask; \
+ new &= mask; \
+ \
+ /* \
+ * Calculate a shift & mask that corresponds to the value \
+ * we wish to exchange within the naturally aligned 4 byte \
+ * integer that includes it. \
+ */ \
+ shift = (unsigned long)ptr & 0x3; \
+ shift *= BITS_PER_BYTE; \
+ mask <<= shift; \
+ \
+ /* \
+ * Calculate a pointer to the naturally aligned 4 byte \
+ * integer that includes our byte, and load its value. \
+ */ \
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); \
+ load32 = *ptr32; \
+ \
+ while (true) { \
+ /* \
+ * Ensure the subword we want to exchange matches \
+ * the expected old value, and if not then bail. \
+ */ \
+ load = (load32 & mask) >> shift; \
+ if (load != old) \
+ return load; \
+ \
+ /* \
+ * Calculate the old & new values of the naturally \
+ * aligned 4 byte integer including the byte we want \
+ * to exchange. Attempt to exchange the old value \
+ * for the new value, and return if we succeed. \
+ */ \
+ old32 = (load32 & ~mask) | (old << shift); \
+ new32 = (load32 & ~mask) | (new << shift); \
+ load32 = __op(ptr32, old32, new32); \
+ if (load32 == old32) \
+ return old; \
+ } \
+}
+
+TEMPLATE_CMPXCGH_SMALL(__cmpxchg_small,cmpxchg)
+TEMPLATE_CMPXCGH_SMALL(__cmpxchg_relaxed_small,cmpxchg_relaxed)
+TEMPLATE_CMPXCGH_SMALL(__cmpxchg_acquire_small,cmpxchg_acquire)
+TEMPLATE_CMPXCGH_SMALL(__cmpxchg_release_small,cmpxchg_release)
--
2.17.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
next prev parent reply other threads:[~2019-02-11 4:38 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-11 4:38 [PATCH 0/3] RISC-V: use generic spinlock and rwlock Michael Clark
2019-02-11 4:38 ` Michael Clark [this message]
2019-02-12 7:17 ` [PATCH 1/3] RISC-V: implement xchg_small and cmpxchg_small for char and short Christoph Hellwig
2019-02-24 21:03 ` Michael Clark
2019-02-11 4:38 ` [PATCH 2/3] RISC-V: convert custom spinlock/rwlock to generic qspinlock/qrwlock Michael Clark
2019-02-11 4:38 ` [PATCH 3/3] MIPS: fix truncation in __cmpxchg_small for short values Michael Clark
2019-02-11 12:37 ` Jonas Gorski
2019-02-11 20:20 ` Paul Burton
2019-02-24 0:09 ` Michael Clark
2019-02-11 20:03 ` Paul Burton
2019-02-11 20:03 ` Paul Burton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190211043829.30096-2-michaeljclark@mac.com \
--to=michaeljclark@mac.com \
--cc=linux-riscv@lists.infradead.org \
--cc=patches@groups.riscv.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.