From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PATCH for-8.0 18/29] accel/tcg: Add aarch64 specific support in ldst_atomicity
Date: Fri, 18 Nov 2022 01:47:43 -0800 [thread overview]
Message-ID: <20221118094754.242910-19-richard.henderson@linaro.org> (raw)
In-Reply-To: <20221118094754.242910-1-richard.henderson@linaro.org>
We have code in atomic128.h noting that through GCC 8, there
was no support for atomic operations on __uint128. This has
been fixed in GCC 10. But we can still improve over any
basic compare-and-swap loop using the ldxp/stxp instructions.
Add fast paths for FEAT_LSE2, using the detection in tcg.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/ldst_atomicity.c.inc | 75 ++++++++++++++++++++++++++++++++--
1 file changed, 72 insertions(+), 3 deletions(-)
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index d2a3783193..186862b5ec 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -41,6 +41,8 @@
* but we're using tcg/tci/ instead.
*/
# define HAVE_al16_fast false
+#elif defined(__aarch64__)
+# define HAVE_al16_fast likely(have_lse2)
#elif defined(__x86_64__)
# define HAVE_al16_fast likely(have_atomic16)
#else
@@ -48,6 +50,8 @@
#endif
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
# define HAVE_al16 true
+#elif defined(__aarch64__)
+# define HAVE_al16 true
#else
# define HAVE_al16 false
#endif
@@ -168,6 +172,12 @@ load_atomic16(void *pv)
r.u = qatomic_read__nocheck(p);
return r.s;
+#elif defined(__aarch64__)
+ /* Via HAVE_al16_fast, FEAT_LSE2 is present: LDP becomes atomic. */
+ Int128Alias r;
+
+ asm("ldp %0, %R0, %1" : "=r"(r.u) : "m"(*(__uint128_t *)pv));
+ return r.s;
#elif defined(__x86_64__)
Int128Alias r;
@@ -246,7 +256,20 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* In system mode all guest pages are writable, and for user-only
* we have just checked writability. Try cmpxchg.
*/
-#if defined(CONFIG_CMPXCHG128)
+#if defined(__aarch64__)
+ /* We can do better than cmpxchg for AArch64. */
+ {
+ Int128Alias r;
+ uint32_t fail;
+
+ /* The load must be paired with the store to guarantee not tearing. */
+ asm("0: ldxp %0, %R0, %2\n\t"
+ "stxp %w1, %0, %R0, %2\n\t"
+ "cbnz %w1, 0b"
+ : "=&r"(r.u), "=&r"(fail) : "Q"(*p));
+ return r.s;
+ }
+#elif defined(CONFIG_CMPXCHG128)
/* Swap 0 with 0, with the side-effect of returning the old value. */
{
Int128Alias r;
@@ -393,6 +416,18 @@ load_atom_extract_al16_or_al8(void *pv, int s)
r = qatomic_read__nocheck(p16);
}
return r >> shr;
+#elif defined(__aarch64__)
+ /*
+ * Via HAVE_al16_fast, FEAT_LSE2 is present.
+ * LDP becomes single-copy atomic if 16-byte aligned, and
+ * single-copy atomic on the parts if 8-byte aligned.
+ */
+ uintptr_t pi = (uintptr_t)pv;
+ int shr = (pi & 7) * 8;
+ uint64_t l, h;
+
+ asm("ldp %0, %1, %2" : "=r"(l), "=r"(h) : "m"(*(__uint128_t *)(pi & ~7)));
+ return (l >> shr) | (h << (-shr & 63));
#elif defined(__x86_64__)
uintptr_t pi = (uintptr_t)pv;
int shr = (pi & 7) * 8;
@@ -739,7 +774,23 @@ store_atomic16(void *pv, Int128Alias val)
return;
}
#endif
-#if defined(CONFIG_CMPXCHG128)
+#if defined(__aarch64__)
+ /* We can do better than cmpxchg for AArch64. */
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ __uint128_t old;
+ uint32_t fail;
+
+ if (HAVE_al16_fast) {
+ /* Via HAVE_al16_fast, FEAT_LSE2 is present: STP becomes atomic. */
+ asm("stp %1, %R1, %0" : "=Q"(*pu) : "r"(val.u));
+ } else {
+ asm("0: ldxp %0, %R0, %1\n\t"
+ "stxp %w2, %3, %R3, %1\n\t"
+ "cbnz %w2, 0b"
+ : "=&r"(old), "=Q"(*pu), "=&r"(fail) : "r"(val.u));
+ }
+ return;
+#elif defined(CONFIG_CMPXCHG128)
{
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
__uint128_t o;
@@ -839,7 +890,25 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
static void ATTRIBUTE_ATOMIC128_OPT
store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
{
-#if defined(CONFIG_ATOMIC128)
+#if defined(__aarch64__)
+ /*
+ * GCC only implements __sync* primitives for int128 on aarch64.
+ * We can do better without the barriers, and integrating the
+ * arithmetic into the load-exclusive/store-conditional pair.
+ */
+ __uint128_t tmp, *pu = __builtin_assume_aligned(ps, 16);
+ uint32_t fail;
+
+ asm("0: ldxp %[t], %R[t], %[mem]\n\t"
+ "bic %[t], %[t], %[m]\n\t"
+ "bic %R[t], %R[t], %R[m]\n\t"
+ "orr %[t], %[t], %[v]\n\t"
+ "orr %R[t], %R[t], %R[v]\n\t"
+ "stxp %w[f], %[t], %R[t], %[mem]\n\t"
+ "cbnz %w[f], 0b\n"
+ : [mem] "+Q"(*pu), [f] "=&r"(fail), [t] "=&r"(tmp)
+ : [v] "r"(val.u), [m] "r"(msk.u));
+#elif defined(CONFIG_ATOMIC128)
__uint128_t *pu, old, new;
/* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
--
2.34.1
next prev parent reply other threads:[~2022-11-18 9:58 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-18 9:47 [PATCH for-8.0 00/29] tcg: Improve atomicity support Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 01/29] include/qemu/cpuid: Introduce xgetbv_low Richard Henderson
2022-11-21 12:15 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 02/29] include/exec/memop: Add bits describing atomicity Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 03/29] accel/tcg: Add cpu_in_serial_context Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 04/29] accel/tcg: Introduce tlb_read_idx Richard Henderson
2022-11-21 12:25 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 05/29] accel/tcg: Reorg system mode load helpers Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 06/29] accel/tcg: Reorg system mode store helpers Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 07/29] accel/tcg: Honor atomicity of loads Richard Henderson
2022-11-22 14:35 ` Peter Maydell
2022-11-22 18:04 ` Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 08/29] accel/tcg: Honor atomicity of stores Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 09/29] tcg/tci: Use cpu_{ld,st}_mmu Richard Henderson
2022-11-21 12:40 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 10/29] tcg: Unify helper_{be,le}_{ld,st}* Richard Henderson
2022-11-21 12:48 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 11/29] accel/tcg: Implement helper_{ld, st}*_mmu for user-only Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 12/29] tcg: Add 128-bit guest memory primitives Richard Henderson
2022-11-22 3:30 ` Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 13/29] meson: Detect atomic128 support with optimization Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 14/29] tcg/i386: Add have_atomic16 Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 15/29] include/qemu/int128: Add vector type to Int128Alias Richard Henderson
2022-11-21 23:45 ` Philippe Mathieu-Daudé
2022-11-22 18:21 ` Philippe Mathieu-Daudé
2022-11-22 18:31 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 16/29] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 17/29] tcg/aarch64: Add have_lse, have_lse2 Richard Henderson
2022-11-21 23:10 ` Philippe Mathieu-Daudé
2022-11-21 23:14 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` Richard Henderson [this message]
2022-11-18 9:47 ` [PATCH for-8.0 19/29] tcg: Introduce TCG_OPF_TYPE_MASK Richard Henderson
2022-11-21 16:12 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 20/29] tcg: Add INDEX_op_qemu_{ld,st}_i128 Richard Henderson
2022-11-21 22:59 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 21/29] tcg/i386: Introduce tcg_out_mov2 Richard Henderson
2022-11-21 16:21 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 22/29] tcg/i386: Introduce tcg_out_testi Richard Henderson
2022-11-21 16:22 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 23/29] tcg/i386: Use full load/store helpers in user-only mode Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 24/29] tcg/i386: Replace is64 with type in qemu_ld/st routines Richard Henderson
2022-11-21 16:27 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 25/29] tcg/i386: Mark Win64 call-saved vector regs as reserved Richard Henderson
2022-11-21 16:28 ` Philippe Mathieu-Daudé
2022-11-18 9:47 ` [PATCH for-8.0 26/29] tcg/i386: Examine MemOp for atomicity and alignment Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 27/29] tcg/i386: Support 128-bit load/store with have_atomic16 Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 28/29] tcg/i386: Add vex_v argument to tcg_out_vex_modrm_pool Richard Henderson
2022-11-18 9:47 ` [PATCH for-8.0 29/29] tcg/i386: Honor 64-bit atomicity in 32-bit mode Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221118094754.242910-19-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).