From: "wuqiang.matt" <wuqiang.matt@bytedance.com>
To: ubizjak@gmail.com, mark.rutland@arm.com, vgupta@kernel.org,
bcain@quicinc.com, jonas@southpole.se,
stefan.kristiansson@saunalahti.fi, shorne@gmail.com,
chris@zankel.net, jcmvbkbc@gmail.com, geert@linux-m68k.org,
andi.shyti@linux.intel.com, mingo@kernel.org,
palmer@rivosinc.com, andrzej.hajda@intel.com, arnd@arndb.de,
peterz@infradead.org, mhiramat@kernel.org
Cc: linux-arch@vger.kernel.org, linux-snps-arc@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-hexagon@vger.kernel.org,
linux-openrisc@vger.kernel.org,
linux-trace-kernel@vger.kernel.org, mattwu@163.com,
linux@roeck-us.net, "wuqiang.matt" <wuqiang.matt@bytedance.com>,
kernel test robot <lkp@intel.com>
Subject: [PATCH v3 4/5] arch,locking/atomic: hexagon: add arch_cmpxchg[64]_local
Date: Tue, 21 Nov 2023 22:23:46 +0800 [thread overview]
Message-ID: <20231121142347.241356-5-wuqiang.matt@bytedance.com> (raw)
In-Reply-To: <20231121142347.241356-1-wuqiang.matt@bytedance.com>
hexagonc hasn't arch_cmpxhg_local implemented, which causes
building failures for any references of try_cmpxchg_local,
reported by the kernel test robot.
This patch implements arch_cmpxchg[64]_local with the native
cmpxchg variant if the corresponding data size is supported,
otherwise generci_cmpxchg[64]_local is to be used.
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4-lkp@intel.com/
Signed-off-by: wuqiang.matt <wuqiang.matt@bytedance.com>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
1 file changed, 50 insertions(+), 1 deletion(-)
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index bf6cf5579cf4..302fa30f25aa 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -8,6 +8,8 @@
#ifndef _ASM_CMPXCHG_H
#define _ASM_CMPXCHG_H
+#include <linux/build_bug.h>
+
/*
* __arch_xchg - atomically exchange a register and a memory location
* @x: value to swap
@@ -51,13 +53,15 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
* variable casting.
*/
-#define arch_cmpxchg(ptr, old, new) \
+#define __cmpxchg_32(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __oldval = 0; \
\
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ \
asm volatile( \
"1: %0 = memw_locked(%1);\n" \
" { P0 = cmp.eq(%0,%2);\n" \
@@ -72,4 +76,49 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
__oldval; \
})
+#define __cmpxchg(ptr, old, val, size) \
+({ \
+ __typeof__(*(ptr)) oldval; \
+ \
+ switch (size) { \
+ case 4: \
+ oldval = __cmpxchg_32(ptr, old, val); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ oldval = val; \
+ break; \
+ } \
+ \
+ oldval; \
+})
+
+#define arch_cmpxchg(ptr, o, n) __cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * always make arch_cmpxchg[64]_local available, native cmpxchg
+ * will be used if available, then generic_cmpxchg[64]_local
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#define arch_cmpxchg_local(ptr, old, val) \
+({ \
+ __typeof__(*(ptr)) __retval; \
+ int __size = sizeof(*(ptr)); \
+ \
+ switch (__size) { \
+ case 4: \
+ __retval = __cmpxchg_32(ptr, old, val); \
+ break; \
+ default: \
+ __retval = __generic_cmpxchg_local(ptr, old, \
+ val, __size); \
+ break; \
+ } \
+ \
+ __retval; \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
#endif /* _ASM_CMPXCHG_H */
--
2.40.1
next prev parent reply other threads:[~2023-11-21 14:25 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-21 14:23 [PATCH v3 0/5] arch,locking/atomic: add arch_cmpxchg[64]_local wuqiang.matt
2023-11-21 14:23 ` [PATCH v3 1/5] arch,locking/atomic: arc: arch_cmpxchg should check data size wuqiang.matt
2023-11-22 22:17 ` Andi Shyti
2023-11-23 0:06 ` wuqiang.matt
2023-11-21 14:23 ` [PATCH v3 2/5] arch,locking/atomic: arc: add arch_cmpxchg[64]_local wuqiang.matt
2023-11-21 14:23 ` [PATCH v3 3/5] arch,locking/atomic: openrisc: " wuqiang.matt
2023-11-21 14:23 ` wuqiang.matt [this message]
2023-11-22 16:55 ` [PATCH v3 4/5] arch,locking/atomic: hexagon: " Brian Cain
2023-11-21 14:23 ` [PATCH v3 5/5] arch,locking/atomic: xtensa: define arch_cmpxchg_local as __cmpxchg_local wuqiang.matt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231121142347.241356-5-wuqiang.matt@bytedance.com \
--to=wuqiang.matt@bytedance.com \
--cc=andi.shyti@linux.intel.com \
--cc=andrzej.hajda@intel.com \
--cc=arnd@arndb.de \
--cc=bcain@quicinc.com \
--cc=chris@zankel.net \
--cc=geert@linux-m68k.org \
--cc=jcmvbkbc@gmail.com \
--cc=jonas@southpole.se \
--cc=linux-arch@vger.kernel.org \
--cc=linux-hexagon@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-openrisc@vger.kernel.org \
--cc=linux-snps-arc@lists.infradead.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=linux@roeck-us.net \
--cc=lkp@intel.com \
--cc=mark.rutland@arm.com \
--cc=mattwu@163.com \
--cc=mhiramat@kernel.org \
--cc=mingo@kernel.org \
--cc=palmer@rivosinc.com \
--cc=peterz@infradead.org \
--cc=shorne@gmail.com \
--cc=stefan.kristiansson@saunalahti.fi \
--cc=ubizjak@gmail.com \
--cc=vgupta@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).