From: Will Deacon <will@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: mark.rutland@arm.com, peterz@infradead.org,
catalin.marinas@arm.com, ndesaulniers@google.com,
robin.murphy@arm.com, Ard.Biesheuvel@arm.com,
andrew.murray@arm.com, natechancellor@gmail.com,
Will Deacon <will@kernel.org>
Subject: [PATCH v5 02/10] arm64: Use correct ll/sc atomic constraints
Date: Thu, 29 Aug 2019 16:48:26 +0100 [thread overview]
Message-ID: <20190829154834.26547-3-will@kernel.org> (raw)
In-Reply-To: <20190829154834.26547-1-will@kernel.org>
From: Andrew Murray <andrew.murray@arm.com>
The A64 ISA accepts distinct (but overlapping) ranges of immediates for:
* add arithmetic instructions ('I' machine constraint)
* sub arithmetic instructions ('J' machine constraint)
* 32-bit logical instructions ('K' machine constraint)
* 64-bit logical instructions ('L' machine constraint)
... but we currently use the 'I' constraint for many atomic operations
using sub or logical instructions, which is not always valid.
When CONFIG_ARM64_LSE_ATOMICS is not set, this allows invalid immediates
to be passed to instructions, potentially resulting in a build failure.
When CONFIG_ARM64_LSE_ATOMICS is selected the out-of-line ll/sc atomics
always use a register as they have no visibility of the value passed by
the caller.
This patch adds a constraint parameter to the ATOMIC_xx and
__CMPXCHG_CASE macros so that we can pass appropriate constraints for
each case, with uses updated accordingly.
Unfortunately prior to GCC 8.1.0 the 'K' constraint erroneously accepted
'4294967295', so we must instead force the use of a register.
Signed-off-by: Andrew Murray <andrew.murray@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
arch/arm64/include/asm/atomic_ll_sc.h | 89 ++++++++++++++++++-----------------
1 file changed, 47 insertions(+), 42 deletions(-)
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index c8c850bc3dfb..6dd011e0b434 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -26,7 +26,7 @@
* (the optimize attribute silently ignores these options).
*/
-#define ATOMIC_OP(op, asm_op) \
+#define ATOMIC_OP(op, asm_op, constraint) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
{ \
@@ -40,11 +40,11 @@ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
+ : #constraint "r" (i)); \
} \
__LL_SC_EXPORT(arch_atomic_##op);
-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE int \
__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
@@ -59,14 +59,14 @@ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(arch_atomic_##op##_return##name);
-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
@@ -81,7 +81,7 @@ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
@@ -99,8 +99,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS(add, add, I)
+ATOMIC_OPS(sub, sub, J)
#undef ATOMIC_OPS
#define ATOMIC_OPS(...) \
@@ -110,17 +110,17 @@ ATOMIC_OPS(sub, sub)
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(andnot, bic)
-ATOMIC_OPS(or, orr)
-ATOMIC_OPS(xor, eor)
+ATOMIC_OPS(and, and, )
+ATOMIC_OPS(andnot, bic, )
+ATOMIC_OPS(or, orr, )
+ATOMIC_OPS(xor, eor, )
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define ATOMIC64_OP(op, asm_op) \
+#define ATOMIC64_OP(op, asm_op, constraint) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \
{ \
@@ -134,11 +134,11 @@ __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \
" stxr %w1, %0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
+ : #constraint "r" (i)); \
} \
__LL_SC_EXPORT(arch_atomic64_##op);
-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE s64 \
__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
{ \
@@ -153,14 +153,14 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE s64 \
__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \
{ \
@@ -175,7 +175,7 @@ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
@@ -193,8 +193,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS(add, add, I)
+ATOMIC64_OPS(sub, sub, J)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(...) \
@@ -204,10 +204,10 @@ ATOMIC64_OPS(sub, sub)
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(andnot, bic)
-ATOMIC64_OPS(or, orr)
-ATOMIC64_OPS(xor, eor)
+ATOMIC64_OPS(and, and, L)
+ATOMIC64_OPS(andnot, bic, )
+ATOMIC64_OPS(or, orr, L)
+ATOMIC64_OPS(xor, eor, L)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
@@ -237,7 +237,7 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
}
__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
-#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
__LL_SC_INLINE u##sz \
__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long old, \
@@ -265,29 +265,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \
- : [old] "Kr" (old), [new] "r" (new) \
+ : [old] #constraint "r" (old), [new] "r" (new) \
: cl); \
\
return oldval; \
} \
__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
-__CMPXCHG_CASE(w, b, , 8, , , , )
-__CMPXCHG_CASE(w, h, , 16, , , , )
-__CMPXCHG_CASE(w, , , 32, , , , )
-__CMPXCHG_CASE( , , , 64, , , , )
-__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
-__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
-__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
-__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
-__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
-__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
-__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
-__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
-__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
-__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
+/*
+ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
+ * handle the 'K' constraint for the value 4294967295 - thus we use no
+ * constraint for 32 bit operations.
+ */
+__CMPXCHG_CASE(w, b, , 8, , , , , )
+__CMPXCHG_CASE(w, h, , 16, , , , , )
+__CMPXCHG_CASE(w, , , 32, , , , , )
+__CMPXCHG_CASE( , , , 64, , , , , L)
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
+__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
+__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
+__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE
--
2.11.0
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2019-08-29 15:49 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-29 15:48 [PATCH v5 00/10] arm64: avoid out-of-line ll/sc atomics Will Deacon
2019-08-29 15:48 ` [PATCH v5 01/10] jump_label: Don't warn on __exit jump entries Will Deacon
2019-08-29 15:48 ` Will Deacon [this message]
2019-08-29 15:48 ` [PATCH v5 03/10] arm64: atomics: avoid out-of-line ll/sc atomics Will Deacon
2019-09-03 6:00 ` Nathan Chancellor
2019-09-03 6:39 ` Will Deacon
2019-09-03 14:31 ` Andrew Murray
2019-09-03 14:45 ` Will Deacon
2019-09-03 15:15 ` Andrew Murray
2019-09-03 15:31 ` Andrew Murray
2019-09-03 16:37 ` Will Deacon
2019-09-03 22:04 ` Andrew Murray
2019-09-03 22:35 ` Nick Desaulniers
[not found] ` <CANW9uyuRFtNKMnSwmHWt_RebJA1ADXdZfeDHc6=yaaFH2NsyWg@mail.gmail.com>
2019-09-03 22:53 ` Nick Desaulniers
2019-09-04 10:20 ` Will Deacon
2019-09-04 17:28 ` Nick Desaulniers
2019-09-05 11:25 ` Andrew Murray
2019-09-06 19:44 ` Nick Desaulniers
2019-08-29 15:48 ` [PATCH v5 04/10] arm64: avoid using hard-coded registers for LSE atomics Will Deacon
2019-08-29 15:48 ` [PATCH v5 05/10] arm64: atomics: Remove atomic_ll_sc compilation unit Will Deacon
2019-08-29 17:47 ` Nick Desaulniers
2019-08-29 20:07 ` Tri Vo
2019-08-29 21:54 ` Will Deacon
2019-08-29 15:48 ` [PATCH v5 06/10] arm64: lse: Remove unused 'alt_lse' assembly macro Will Deacon
2019-08-29 23:39 ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 07/10] arm64: asm: Kill 'asm/atomic_arch.h' Will Deacon
2019-08-29 23:43 ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 08/10] arm64: lse: Make ARM64_LSE_ATOMICS depend on JUMP_LABEL Will Deacon
2019-08-29 23:44 ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 09/10] arm64: atomics: Undefine internal macros after use Will Deacon
2019-08-29 23:44 ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 10/10] arm64: atomics: Use K constraint when toolchain appears to support it Will Deacon
2019-08-29 16:54 ` Will Deacon
2019-08-29 17:45 ` Nick Desaulniers
2019-08-29 21:53 ` Will Deacon
2019-08-30 20:57 ` Nick Desaulniers
2019-08-30 0:08 ` Andrew Murray
2019-08-30 7:52 ` Will Deacon
2019-08-30 9:11 ` Andrew Murray
2019-08-30 10:17 ` Will Deacon
2019-08-30 11:57 ` Andrew Murray
2019-08-30 10:40 ` Mark Rutland
2019-08-30 11:53 ` Andrew Murray
2019-08-29 23:49 ` Andrew Murray
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190829154834.26547-3-will@kernel.org \
--to=will@kernel.org \
--cc=Ard.Biesheuvel@arm.com \
--cc=andrew.murray@arm.com \
--cc=catalin.marinas@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=natechancellor@gmail.com \
--cc=ndesaulniers@google.com \
--cc=peterz@infradead.org \
--cc=robin.murphy@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).