From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, qemu-arm@nongnu.org
Subject: [PATCH v3 24/30] target/arm: Use finalize_memop for aa64 gpr load/store
Date: Mon, 11 Jan 2021 09:01:07 -1000 [thread overview]
Message-ID: <20210111190113.303726-25-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210111190113.303726-1-richard.henderson@linaro.org>
In the case of gpr load, merge the size and is_signed arguments;
otherwise, simply convert size to memop.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 78 ++++++++++++++++----------------------
1 file changed, 33 insertions(+), 45 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 69d401da21..5d93fcf25b 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -880,19 +880,19 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
* Store from GPR register to memory.
*/
static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
- TCGv_i64 tcg_addr, int size, int memidx,
+ TCGv_i64 tcg_addr, MemOp memop, int memidx,
bool iss_valid,
unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
- g_assert(size <= 3);
- tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
+ memop = finalize_memop(s, memop);
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
if (iss_valid) {
uint32_t syn;
syn = syn_data_abort_with_iss(0,
- size,
+ (memop & MO_SIZE),
false,
iss_srt,
iss_sf,
@@ -903,37 +903,28 @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
}
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
- TCGv_i64 tcg_addr, int size,
+ TCGv_i64 tcg_addr, MemOp memop,
bool iss_valid,
unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
- do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
+ do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
iss_valid, iss_srt, iss_sf, iss_ar);
}
/*
* Load from memory to GPR register
*/
-static void do_gpr_ld_memidx(DisasContext *s,
- TCGv_i64 dest, TCGv_i64 tcg_addr,
- int size, bool is_signed,
- bool extend, int memidx,
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ MemOp memop, bool extend, int memidx,
bool iss_valid, unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
- MemOp memop = s->be_data + size;
-
- g_assert(size <= 3);
-
- if (is_signed) {
- memop += MO_SIGN;
- }
-
+ memop = finalize_memop(s, memop);
tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
- if (extend && is_signed) {
- g_assert(size < 3);
+ if (extend && (memop & MO_SIGN)) {
+ g_assert((memop & MO_SIZE) <= MO_32);
tcg_gen_ext32u_i64(dest, dest);
}
@@ -941,8 +932,8 @@ static void do_gpr_ld_memidx(DisasContext *s,
uint32_t syn;
syn = syn_data_abort_with_iss(0,
- size,
- is_signed,
+ (memop & MO_SIZE),
+ (memop & MO_SIGN) != 0,
iss_srt,
iss_sf,
iss_ar,
@@ -951,14 +942,12 @@ static void do_gpr_ld_memidx(DisasContext *s,
}
}
-static void do_gpr_ld(DisasContext *s,
- TCGv_i64 dest, TCGv_i64 tcg_addr,
- int size, bool is_signed, bool extend,
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ MemOp memop, bool extend,
bool iss_valid, unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
- do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
- get_mem_index(s),
+ do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
iss_valid, iss_srt, iss_sf, iss_ar);
}
@@ -2687,7 +2676,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
false, rn != 31, size);
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
return;
@@ -2800,8 +2789,8 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
/* Only unsigned 32bit loads target 32bit registers. */
bool iss_sf = opc != 0;
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
- true, rt, iss_sf, false);
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ false, true, rt, iss_sf, false);
}
tcg_temp_free_i64(clean_addr);
}
@@ -2960,11 +2949,11 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
/* Do not modify tcg_rt before recognizing any exception
* from the second load.
*/
- do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
- false, 0, false, false);
+ do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
+ false, false, 0, false, false);
tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
- do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
- false, 0, false, false);
+ do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
+ false, false, 0, false, false);
tcg_gen_mov_i64(tcg_rt, tmp);
tcg_temp_free_i64(tmp);
@@ -3095,8 +3084,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
iss_valid, rt, iss_sf, false);
} else {
- do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
- is_signed, is_extended, memidx,
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ is_extended, memidx,
iss_valid, rt, iss_sf, false);
}
}
@@ -3200,9 +3189,8 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
do_gpr_st(s, tcg_rt, clean_addr, size,
true, rt, iss_sf, false);
} else {
- do_gpr_ld(s, tcg_rt, clean_addr, size,
- is_signed, is_extended,
- true, rt, iss_sf, false);
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ is_extended, true, rt, iss_sf, false);
}
}
}
@@ -3285,8 +3273,8 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
do_gpr_st(s, tcg_rt, clean_addr, size,
true, rt, iss_sf, false);
} else {
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
- true, rt, iss_sf, false);
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ is_extended, true, rt, iss_sf, false);
}
}
}
@@ -3373,7 +3361,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
* full load-acquire (we only need "load-acquire processor consistent"),
* but we choose to implement them as full LDAQ.
*/
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
return;
@@ -3446,7 +3434,7 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
is_wback || rn != 31, size);
tcg_rt = cpu_reg(s, rt);
- do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
/* extend */ false, /* iss_valid */ !is_wback,
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
@@ -3531,8 +3519,8 @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
* Load-AcquirePC semantics; we implement as the slightly more
* restrictive Load-Acquire.
*/
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
- true, rt, iss_sf, true);
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
+ extend, true, rt, iss_sf, true);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
}
--
2.25.1
next prev parent reply other threads:[~2021-01-11 19:39 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-11 19:00 [PATCH v3 00/30] target/arm: enforce alignment Richard Henderson
2021-01-11 19:00 ` [PATCH v3 01/30] target/arm: Fix decode of align in VLDST_single Richard Henderson
2021-01-11 19:00 ` [PATCH v3 02/30] target/arm: Rename TBFLAG_A32, SCTLR_B Richard Henderson
2021-01-11 19:00 ` [PATCH v3 03/30] target/arm: Rename TBFLAG_ANY, PSTATE_SS Richard Henderson
2021-01-11 19:00 ` [PATCH v3 04/30] target/arm: Add wrapper macros for accessing tbflags Richard Henderson
2021-01-11 19:00 ` [PATCH v3 05/30] target/arm: Introduce CPUARMTBFlags Richard Henderson
2021-01-11 19:00 ` [PATCH v3 06/30] target/arm: Move mode specific TB flags to tb->cs_base Richard Henderson
2021-01-11 19:00 ` [PATCH v3 07/30] target/arm: Move TBFLAG_AM32 bits to the top Richard Henderson
2021-01-11 19:00 ` [PATCH v3 08/30] target/arm: Move TBFLAG_ANY bits to the bottom Richard Henderson
2021-01-11 19:00 ` [PATCH v3 09/30] target/arm: Add ALIGN_MEM to TBFLAG_ANY Richard Henderson
2021-01-11 19:00 ` [PATCH v3 10/30] target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness Richard Henderson
2021-01-11 19:00 ` [PATCH v3 11/30] target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64 Richard Henderson
2021-01-11 19:00 ` [PATCH v3 12/30] target/arm: Fix SCTLR_B test for TCGv_i64 load/store Richard Henderson
2021-01-11 19:00 ` [PATCH v3 13/30] target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness Richard Henderson
2021-01-11 19:00 ` [PATCH v3 14/30] target/arm: Enforce word alignment for LDRD/STRD Richard Henderson
2021-01-11 19:00 ` [PATCH v3 15/30] target/arm: Enforce alignment for LDA/LDAH/STL/STLH Richard Henderson
2021-01-11 19:00 ` [PATCH v3 16/30] target/arm: Enforce alignment for LDM/STM Richard Henderson
2021-01-11 19:01 ` [PATCH v3 17/30] target/arm: Enforce alignment for RFE Richard Henderson
2021-01-11 19:01 ` [PATCH v3 18/30] target/arm: Enforce alignment for SRS Richard Henderson
2021-01-11 19:01 ` [PATCH v3 19/30] target/arm: Enforce alignment for VLDM/VSTM Richard Henderson
2021-01-11 19:01 ` [PATCH v3 20/30] target/arm: Enforce alignment for VLDR/VSTR Richard Henderson
2021-01-11 19:01 ` [PATCH v3 21/30] target/arm: Enforce alignment for VLDn (all lanes) Richard Henderson
2021-01-11 19:01 ` [PATCH v3 22/30] target/arm: Enforce alignment for VLDn/VSTn (multiple) Richard Henderson
2021-01-11 19:01 ` [PATCH v3 23/30] target/arm: Enforce alignment for VLDn/VSTn (single) Richard Henderson
2021-01-11 19:01 ` Richard Henderson [this message]
2021-01-11 19:01 ` [PATCH v3 25/30] target/arm: Use finalize_memop for aa64 fpr load/store Richard Henderson
2021-01-11 19:01 ` [PATCH v3 26/30] target/arm: Enforce alignment for aa64 load-acq/store-rel Richard Henderson
2021-01-11 19:01 ` [PATCH v3 27/30] target/arm: Use MemOp for size + endian in aa64 vector ld/st Richard Henderson
2021-01-11 19:01 ` [PATCH v3 28/30] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple) Richard Henderson
2021-01-11 19:01 ` [PATCH v3 29/30] target/arm: Enforce alignment for aa64 vector LDn/STn (single) Richard Henderson
2021-01-11 19:01 ` [PATCH v3 30/30] target/arm: Enforce alignment for sve LD1R Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210111190113.303726-25-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).