From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PULL 21/43] target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness
Date: Fri, 30 Apr 2021 11:34:15 +0100 [thread overview]
Message-ID: <20210430103437.4140-22-peter.maydell@linaro.org> (raw)
In-Reply-To: <20210430103437.4140-1-peter.maydell@linaro.org>
From: Richard Henderson <richard.henderson@linaro.org>
Create a finalize_memop function that computes alignment and
endianness and returns the final MemOp for the operation.
Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special
handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32
so that s->be_data is not added by the callers.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210419202257.161730-12-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/translate.h | 24 ++++++++
target/arm/translate.c | 100 +++++++++++++++++---------------
target/arm/translate-neon.c.inc | 9 +--
3 files changed, 79 insertions(+), 54 deletions(-)
diff --git a/target/arm/translate.h b/target/arm/translate.h
index b185c14a035..0c60b83b3d4 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -459,4 +459,28 @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
return statusptr;
}
+/**
+ * finalize_memop:
+ * @s: DisasContext
+ * @opc: size+sign+align of the memory operation
+ *
+ * Build the complete MemOp for a memory operation, including alignment
+ * and endianness.
+ *
+ * If (op & MO_AMASK) then the operation already contains the required
+ * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
+ * unaligned operation, e.g. for AccType_NORMAL.
+ *
+ * In the latter case, there are configuration bits that require alignment,
+ * and this is applied here. Note that there is no way to indicate that
+ * no alignment should ever be enforced; this must be handled manually.
+ */
+static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
+{
+ if (s->align_mem && !(opc & MO_AMASK)) {
+ opc |= MO_ALIGN;
+ }
+ return opc | s->be_data;
+}
+
#endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 970e537eae0..5bf68b782a1 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -908,7 +908,8 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
#define IS_USER_ONLY 0
#endif
-/* Abstractions of "generate code to do a guest load/store for
+/*
+ * Abstractions of "generate code to do a guest load/store for
* AArch32", where a vaddr is always 32 bits (and is zero
* extended if we're a 64 bit core) and data is also
* 32 bits unless specifically doing a 64 bit access.
@@ -916,7 +917,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
* that the address argument is TCGv_i32 rather than TCGv.
*/
-static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
+static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
{
TCGv addr = tcg_temp_new();
tcg_gen_extu_i32_tl(addr, a32);
@@ -928,47 +929,51 @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
return addr;
}
+/*
+ * Internal routines are used for NEON cases where the endianness
+ * and/or alignment has already been taken into account and manipulated.
+ */
+static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
+
+static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
+
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, MemOp opc)
{
- TCGv addr;
-
- if (s->align_mem) {
- opc |= MO_ALIGN;
- }
-
- addr = gen_aa32_addr(s, a32, opc);
- tcg_gen_qemu_ld_i32(val, addr, index, opc);
- tcg_temp_free(addr);
+ gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
}
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, MemOp opc)
{
- TCGv addr;
+ gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
+}
- if (s->align_mem) {
- opc |= MO_ALIGN;
+#define DO_GEN_LD(SUFF, OPC) \
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
}
- addr = gen_aa32_addr(s, a32, opc);
- tcg_gen_qemu_st_i32(val, addr, index, opc);
- tcg_temp_free(addr);
-}
-
-#define DO_GEN_LD(SUFF, OPC) \
-static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 a32, int index) \
-{ \
- gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
-}
-
-#define DO_GEN_ST(SUFF, OPC) \
-static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 a32, int index) \
-{ \
- gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
-}
+#define DO_GEN_ST(SUFF, OPC) \
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
+ }
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
{
@@ -6456,7 +6461,7 @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
addr = op_addr_rr_pre(s, a);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
disas_set_da_iss(s, mop, issinfo);
/*
@@ -6485,7 +6490,7 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
addr = op_addr_rr_pre(s, a);
tmp = load_reg(s, a->rt);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
disas_set_da_iss(s, mop, issinfo);
tcg_temp_free_i32(tmp);
@@ -6508,13 +6513,13 @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
addr = op_addr_rr_pre(s, a);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
store_reg(s, a->rt, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
store_reg(s, a->rt + 1, tmp);
/* LDRD w/ base writeback is undefined if the registers overlap. */
@@ -6537,13 +6542,13 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
addr = op_addr_rr_pre(s, a);
tmp = load_reg(s, a->rt);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, a->rt + 1);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
tcg_temp_free_i32(tmp);
op_addr_rr_post(s, a, addr, -4);
@@ -6608,7 +6613,7 @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
addr = op_addr_ri_pre(s, a);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
disas_set_da_iss(s, mop, issinfo);
/*
@@ -6637,7 +6642,7 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
addr = op_addr_ri_pre(s, a);
tmp = load_reg(s, a->rt);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
disas_set_da_iss(s, mop, issinfo);
tcg_temp_free_i32(tmp);
@@ -6653,13 +6658,13 @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
addr = op_addr_ri_pre(s, a);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
store_reg(s, a->rt, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
store_reg(s, rt2, tmp);
/* LDRD w/ base writeback is undefined if the registers overlap. */
@@ -6692,13 +6697,13 @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
addr = op_addr_ri_pre(s, a);
tmp = load_reg(s, a->rt);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rt2);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
tcg_temp_free_i32(tmp);
op_addr_ri_post(s, a, addr, -4);
@@ -6924,7 +6929,7 @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
addr = load_reg(s, a->rn);
tmp = load_reg(s, a->rt);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
tcg_temp_free_i32(tmp);
@@ -7080,7 +7085,7 @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
addr = load_reg(s, a->rn);
tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
tcg_temp_free_i32(addr);
@@ -8264,8 +8269,7 @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
addr = load_reg(s, a->rn);
tcg_gen_add_i32(addr, addr, tmp);
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
- half ? MO_UW | s->be_data : MO_UB);
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
tcg_temp_free_i32(addr);
tcg_gen_add_i32(tmp, tmp, tmp);
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
index 0e5828744bb..c82aa1412e2 100644
--- a/target/arm/translate-neon.c.inc
+++ b/target/arm/translate-neon.c.inc
@@ -559,8 +559,7 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
addr = tcg_temp_new_i32();
load_reg_var(s, addr, a->rn);
for (reg = 0; reg < nregs; reg++) {
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
- s->be_data | size);
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
if ((vd & 1) && vec_size == 16) {
/*
* We cannot write 16 bytes at once because the
@@ -650,13 +649,11 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
*/
for (reg = 0; reg < nregs; reg++) {
if (a->l) {
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
- s->be_data | a->size);
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
neon_store_element(vd, a->reg_idx, a->size, tmp);
} else { /* Store */
neon_load_element(tmp, vd, a->reg_idx, a->size);
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
- s->be_data | a->size);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
}
vd += a->stride;
tcg_gen_addi_i32(addr, addr, 1 << a->size);
--
2.20.1
next prev parent reply other threads:[~2021-04-30 10:53 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-30 10:33 [PULL 00/43] target-arm queue Peter Maydell
2021-04-30 10:33 ` [PULL 01/43] hw/arm/smmuv3: Support 16K translation granule Peter Maydell
2021-04-30 10:33 ` [PULL 02/43] target/arm: Make Thumb store insns UNDEF for Rn==1111 Peter Maydell
2021-04-30 10:33 ` [PULL 03/43] target/arm: Fix mte_checkN Peter Maydell
2021-04-30 10:33 ` [PULL 04/43] target/arm: Split out mte_probe_int Peter Maydell
2021-04-30 10:33 ` [PULL 05/43] target/arm: Fix unaligned checks for mte_check1, mte_probe1 Peter Maydell
2021-04-30 10:34 ` [PULL 06/43] test/tcg/aarch64: Add mte-5 Peter Maydell
2021-04-30 10:34 ` [PULL 07/43] target/arm: Replace MTEDESC ESIZE+TSIZE with SIZEM1 Peter Maydell
2021-04-30 10:34 ` [PULL 08/43] target/arm: Merge mte_check1, mte_checkN Peter Maydell
2021-04-30 10:34 ` [PULL 09/43] target/arm: Rename mte_probe1 to mte_probe Peter Maydell
2021-04-30 10:34 ` [PULL 10/43] target/arm: Simplify sve mte checking Peter Maydell
2021-04-30 10:34 ` [PULL 11/43] target/arm: Remove log2_esize parameter to gen_mte_checkN Peter Maydell
2021-04-30 10:34 ` [PULL 12/43] target/arm: Fix decode of align in VLDST_single Peter Maydell
2021-04-30 10:34 ` [PULL 13/43] target/arm: Rename TBFLAG_A32, SCTLR_B Peter Maydell
2021-04-30 10:34 ` [PULL 14/43] target/arm: Rename TBFLAG_ANY, PSTATE_SS Peter Maydell
2021-04-30 10:34 ` [PULL 15/43] target/arm: Add wrapper macros for accessing tbflags Peter Maydell
2021-04-30 10:34 ` [PULL 16/43] target/arm: Introduce CPUARMTBFlags Peter Maydell
2021-04-30 10:34 ` [PULL 17/43] target/arm: Move mode specific TB flags to tb->cs_base Peter Maydell
2021-04-30 10:34 ` [PULL 18/43] target/arm: Move TBFLAG_AM32 bits to the top Peter Maydell
2021-04-30 10:34 ` [PULL 19/43] target/arm: Move TBFLAG_ANY bits to the bottom Peter Maydell
2021-04-30 10:34 ` [PULL 20/43] target/arm: Add ALIGN_MEM to TBFLAG_ANY Peter Maydell
2021-04-30 10:34 ` Peter Maydell [this message]
2021-04-30 10:34 ` [PULL 22/43] target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64 Peter Maydell
2021-04-30 10:34 ` [PULL 23/43] target/arm: Fix SCTLR_B test for TCGv_i64 load/store Peter Maydell
2021-04-30 10:34 ` [PULL 24/43] target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness Peter Maydell
2021-04-30 10:34 ` [PULL 25/43] target/arm: Enforce word alignment for LDRD/STRD Peter Maydell
2021-04-30 10:34 ` [PULL 26/43] target/arm: Enforce alignment for LDA/LDAH/STL/STLH Peter Maydell
2021-04-30 10:34 ` [PULL 27/43] target/arm: Enforce alignment for LDM/STM Peter Maydell
2021-04-30 10:34 ` [PULL 28/43] target/arm: Enforce alignment for RFE Peter Maydell
2021-04-30 10:34 ` [PULL 29/43] target/arm: Enforce alignment for SRS Peter Maydell
2021-04-30 10:34 ` [PULL 30/43] target/arm: Enforce alignment for VLDM/VSTM Peter Maydell
2021-04-30 10:34 ` [PULL 31/43] target/arm: Enforce alignment for VLDR/VSTR Peter Maydell
2021-04-30 10:34 ` [PULL 32/43] target/arm: Enforce alignment for VLDn (all lanes) Peter Maydell
2021-04-30 10:34 ` [PULL 33/43] target/arm: Enforce alignment for VLDn/VSTn (multiple) Peter Maydell
2021-04-30 10:34 ` [PULL 34/43] target/arm: Enforce alignment for VLDn/VSTn (single) Peter Maydell
2021-04-30 10:34 ` [PULL 35/43] target/arm: Use finalize_memop for aa64 gpr load/store Peter Maydell
2021-04-30 10:34 ` [PULL 36/43] target/arm: Use finalize_memop for aa64 fpr load/store Peter Maydell
2021-04-30 10:34 ` [PULL 37/43] target/arm: Enforce alignment for aa64 load-acq/store-rel Peter Maydell
2021-04-30 10:34 ` [PULL 38/43] target/arm: Use MemOp for size + endian in aa64 vector ld/st Peter Maydell
2021-04-30 10:34 ` [PULL 39/43] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple) Peter Maydell
2021-04-30 10:34 ` [PULL 40/43] target/arm: Enforce alignment for aa64 vector LDn/STn (single) Peter Maydell
2021-04-30 10:34 ` [PULL 41/43] target/arm: Enforce alignment for sve LD1R Peter Maydell
2021-04-30 10:34 ` [PULL 42/43] hw: add compat machines for 6.1 Peter Maydell
2021-04-30 10:34 ` [PULL 43/43] hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows Peter Maydell
2021-04-30 11:18 ` [PULL 00/43] target-arm queue no-reply
2021-04-30 12:45 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210430103437.4140-22-peter.maydell@linaro.org \
--to=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).