qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org
Subject: [PULL 27/30] tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32
Date: Thu,  3 Mar 2022 10:59:41 -1000	[thread overview]
Message-ID: <20220303205944.469445-28-richard.henderson@linaro.org> (raw)
In-Reply-To: <20220303205944.469445-1-richard.henderson@linaro.org>

AArch64 has both sign and zero-extending addressing modes, which
means that either treatment of guest addresses is equally efficient.
Enabling this for AArch64 gives us testing of the feature in CI.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/aarch64/tcg-target-sa32.h |  8 +++-
 tcg/aarch64/tcg-target.c.inc  | 81 ++++++++++++++++++++++++-----------
 2 files changed, 64 insertions(+), 25 deletions(-)

diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
index cb185b1526..c99e502e4c 100644
--- a/tcg/aarch64/tcg-target-sa32.h
+++ b/tcg/aarch64/tcg-target-sa32.h
@@ -1 +1,7 @@
-#define TCG_TARGET_SIGNED_ADDR32 0
+/*
+ * AArch64 has both SXTW and UXTW addressing modes, which means that
+ * it is agnostic to how guest addresses should be represented.
+ * Because aarch64 is more common than the other hosts that will
+ * want to use this feature, enable it for continuous testing.
+ */
+#define TCG_TARGET_SIGNED_ADDR32 1
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 077fc51401..4a3edd6963 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -361,6 +361,16 @@ typedef enum {
     LDST_LD_S_W = 3,  /* load and sign-extend into Wt */
 } AArch64LdstType;
 
+/*
+ * See aarch64/instrs/extendreg/DecodeRegExtend
+ * But note that option<1> == 0 is UNDEFINED for LDR/STR.
+ */
+typedef enum {
+    LDST_EXT_UXTW = 2,  /* zero-extend from uint32_t */
+    LDST_EXT_UXTX = 3,  /* zero-extend from uint64_t (i.e. no extension) */
+    LDST_EXT_SXTW = 6,  /* sign-extend from int32_t */
+} AArch64LdstExt;
+
 /* We encode the format of the insn into the beginning of the name, so that
    we can have the preprocessor help "typecheck" the insn vs the output
    function.  Arm didn't provide us with nice names for the formats, so we
@@ -806,12 +816,12 @@ static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
 }
 
 static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
-                              TCGReg rd, TCGReg base, TCGType ext,
+                              TCGReg rd, TCGReg base, AArch64LdstExt option,
                               TCGReg regoff)
 {
     /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
     tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
-              0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
+              option << 13 | base << 5 | (rd & 0x1f));
 }
 
 static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
@@ -1126,7 +1136,7 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
 
     /* Worst-case scenario, move offset to temp register, use reg offset.  */
     tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
-    tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
+    tcg_out_ldst_r(s, insn, rd, rn, LDST_EXT_UXTX, TCG_REG_TMP);
 }
 
 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
@@ -1765,31 +1775,31 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 
 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
                                    TCGReg data_r, TCGReg addr_r,
-                                   TCGType otype, TCGReg off_r)
+                                   AArch64LdstExt option, TCGReg off_r)
 {
     switch (memop & MO_SSIZE) {
     case MO_UB:
-        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, option, off_r);
         break;
     case MO_SB:
         tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
-                       data_r, addr_r, otype, off_r);
+                       data_r, addr_r, option, off_r);
         break;
     case MO_UW:
-        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, option, off_r);
         break;
     case MO_SW:
         tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
-                       data_r, addr_r, otype, off_r);
+                       data_r, addr_r, option, off_r);
         break;
     case MO_UL:
-        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, option, off_r);
         break;
     case MO_SL:
-        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, option, off_r);
         break;
     case MO_UQ:
-        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, option, off_r);
         break;
     default:
         tcg_abort();
@@ -1798,31 +1808,52 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
 
 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
                                    TCGReg data_r, TCGReg addr_r,
-                                   TCGType otype, TCGReg off_r)
+                                   AArch64LdstExt option, TCGReg off_r)
 {
     switch (memop & MO_SIZE) {
     case MO_8:
-        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, option, off_r);
         break;
     case MO_16:
-        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, option, off_r);
         break;
     case MO_32:
-        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, option, off_r);
         break;
     case MO_64:
-        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, option, off_r);
         break;
     default:
         tcg_abort();
     }
 }
 
+/*
+ * Bits for the option field of LDR/STR (register),
+ * for application to a guest address.
+ */
+static AArch64LdstExt ldst_ext_option(void)
+{
+#ifdef CONFIG_USER_ONLY
+    bool signed_addr32 = guest_base_signed_addr32;
+#else
+    bool signed_addr32 = TCG_TARGET_SIGNED_ADDR32;
+#endif
+
+    if (TARGET_LONG_BITS == 64) {
+        return LDST_EXT_UXTX;
+    } else if (signed_addr32) {
+        return LDST_EXT_SXTW;
+    } else {
+        return LDST_EXT_UXTW;
+    }
+}
+
 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
                             MemOpIdx oi, TCGType ext)
 {
     MemOp memop = get_memop(oi);
-    const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+    AArch64LdstExt option = ldst_ext_option();
 
     /* Byte swapping is left to middle-end expansion. */
     tcg_debug_assert((memop & MO_BSWAP) == 0);
@@ -1833,7 +1864,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 
     tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
     tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
-                           TCG_REG_X1, otype, addr_reg);
+                           TCG_REG_X1, option, addr_reg);
     add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
                         s->code_ptr, label_ptr);
 #else /* !CONFIG_SOFTMMU */
@@ -1843,10 +1874,11 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
     }
     if (USE_GUEST_BASE) {
         tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
-                               TCG_REG_GUEST_BASE, otype, addr_reg);
+                               TCG_REG_GUEST_BASE, option, addr_reg);
     } else {
+        /* This case is always a 64-bit guest with no extension. */
         tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
-                               addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+                               addr_reg, LDST_EXT_UXTX, TCG_REG_XZR);
     }
 #endif /* CONFIG_SOFTMMU */
 }
@@ -1855,7 +1887,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
                             MemOpIdx oi)
 {
     MemOp memop = get_memop(oi);
-    const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+    AArch64LdstExt option = ldst_ext_option();
 
     /* Byte swapping is left to middle-end expansion. */
     tcg_debug_assert((memop & MO_BSWAP) == 0);
@@ -1866,7 +1898,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 
     tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
     tcg_out_qemu_st_direct(s, memop, data_reg,
-                           TCG_REG_X1, otype, addr_reg);
+                           TCG_REG_X1, option, addr_reg);
     add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
                         data_reg, addr_reg, s->code_ptr, label_ptr);
 #else /* !CONFIG_SOFTMMU */
@@ -1876,10 +1908,11 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
     }
     if (USE_GUEST_BASE) {
         tcg_out_qemu_st_direct(s, memop, data_reg,
-                               TCG_REG_GUEST_BASE, otype, addr_reg);
+                               TCG_REG_GUEST_BASE, option, addr_reg);
     } else {
+        /* This case is always a 64-bit guest with no extension. */
         tcg_out_qemu_st_direct(s, memop, data_reg,
-                               addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+                               addr_reg, LDST_EXT_UXTX, TCG_REG_XZR);
     }
 #endif /* CONFIG_SOFTMMU */
 }
-- 
2.25.1



  parent reply	other threads:[~2022-03-03 21:22 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-03 20:59 [PULL 00/30] tcg patch queue Richard Henderson
2022-03-03 20:59 ` [PULL 01/30] tcg/optimize: only read val after const check Richard Henderson
2022-03-03 20:59 ` [PULL 02/30] tcg: Set MAX_OPC_PARAM_IARGS to 7 Richard Henderson
2022-03-03 20:59 ` [PULL 03/30] tcg: Add opcodes for vector nand, nor, eqv Richard Henderson
2022-03-03 20:59 ` [PULL 04/30] tcg/ppc: Implement vector NAND, NOR, EQV Richard Henderson
2022-03-03 20:59 ` [PULL 05/30] tcg/s390x: " Richard Henderson
2022-03-03 20:59 ` [PULL 06/30] tcg/i386: Detect AVX512 Richard Henderson
2022-03-03 20:59 ` [PULL 07/30] tcg/i386: Add tcg_out_evex_opc Richard Henderson
2022-03-03 20:59 ` [PULL 08/30] tcg/i386: Use tcg_can_emit_vec_op in expand_vec_cmp_noinv Richard Henderson
2022-03-03 20:59 ` [PULL 09/30] tcg/i386: Implement avx512 variable shifts Richard Henderson
2022-03-03 20:59 ` [PULL 10/30] tcg/i386: Implement avx512 scalar shift Richard Henderson
2022-03-03 20:59 ` [PULL 11/30] tcg/i386: Implement avx512 immediate sari shift Richard Henderson
2022-03-03 20:59 ` [PULL 12/30] tcg/i386: Implement avx512 immediate rotate Richard Henderson
2022-03-03 20:59 ` [PULL 13/30] tcg/i386: Implement avx512 variable rotate Richard Henderson
2022-03-03 20:59 ` [PULL 14/30] tcg/i386: Support avx512vbmi2 vector shift-double instructions Richard Henderson
2022-03-03 20:59 ` [PULL 15/30] tcg/i386: Expand vector word rotate as avx512vbmi2 shift-double Richard Henderson
2022-03-03 20:59 ` [PULL 16/30] tcg/i386: Remove rotls_vec from tcg_target_op_def Richard Henderson
2022-03-03 20:59 ` [PULL 17/30] tcg/i386: Expand scalar rotate with avx512 insns Richard Henderson
2022-03-03 20:59 ` [PULL 18/30] tcg/i386: Implement avx512 min/max/abs Richard Henderson
2022-03-03 20:59 ` [PULL 19/30] tcg/i386: Implement avx512 multiply Richard Henderson
2022-03-03 20:59 ` [PULL 20/30] tcg/i386: Implement more logical operations for avx512 Richard Henderson
2022-03-03 20:59 ` [PULL 21/30] tcg/i386: Implement bitsel " Richard Henderson
2022-03-03 20:59 ` [PULL 22/30] tcg: Add TCG_TARGET_SIGNED_ADDR32 Richard Henderson
2022-03-03 20:59 ` [PULL 23/30] accel/tcg: Split out g2h_tlbe Richard Henderson
2022-03-03 20:59 ` [PULL 24/30] accel/tcg: Support TCG_TARGET_SIGNED_ADDR32 for softmmu Richard Henderson
2022-03-03 20:59 ` [PULL 25/30] accel/tcg: Add guest_base_signed_addr32 for user-only Richard Henderson
2022-03-03 20:59 ` [PULL 26/30] linux-user: Support TCG_TARGET_SIGNED_ADDR32 Richard Henderson
2022-03-03 20:59 ` Richard Henderson [this message]
2022-03-03 20:59 ` [PULL 28/30] tcg/mips: " Richard Henderson
2022-03-03 20:59 ` [PULL 29/30] tcg/riscv: " Richard Henderson
2022-03-03 20:59 ` [PULL 30/30] tcg/loongarch64: " Richard Henderson
2022-03-04 15:22 ` [PULL 00/30] tcg patch queue Peter Maydell
2022-03-04 18:47   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220303205944.469445-28-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).