From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: git@xen0n.name, philmd@linaro.org, qemu-arm@nongnu.org,
qemu-riscv@nongnu.org, qemu-s390x@nongnu.org
Subject: [PATCH v4 09/54] tcg/aarch64: Introduce HostAddress
Date: Wed, 3 May 2023 07:56:44 +0100 [thread overview]
Message-ID: <20230503065729.1745843-10-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230503065729.1745843-1-richard.henderson@linaro.org>
Collect the 3 potential parts of the host address into a struct.
Reorg tcg_out_qemu_{ld,st}_direct to use it.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/aarch64/tcg-target.c.inc | 86 +++++++++++++++++++++++++-----------
1 file changed, 59 insertions(+), 27 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index ecbf6564fc..d8d464e4a0 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1587,6 +1587,12 @@ static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
tcg_out_insn(s, 3406, ADR, rd, offset);
}
+typedef struct {
+ TCGReg base;
+ TCGReg index;
+ TCGType index_ext;
+} HostAddress;
+
#ifdef CONFIG_SOFTMMU
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* MemOpIdx oi, uintptr_t ra)
@@ -1796,32 +1802,31 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
- TCGReg data_r, TCGReg addr_r,
- TCGType otype, TCGReg off_r)
+ TCGReg data_r, HostAddress h)
{
switch (memop & MO_SSIZE) {
case MO_UB:
- tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index);
break;
case MO_SB:
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
- data_r, addr_r, otype, off_r);
+ data_r, h.base, h.index_ext, h.index);
break;
case MO_UW:
- tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index);
break;
case MO_SW:
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
- data_r, addr_r, otype, off_r);
+ data_r, h.base, h.index_ext, h.index);
break;
case MO_UL:
- tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index);
break;
case MO_SL:
- tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index);
break;
case MO_UQ:
- tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index);
break;
default:
g_assert_not_reached();
@@ -1829,21 +1834,20 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
}
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
- TCGReg data_r, TCGReg addr_r,
- TCGType otype, TCGReg off_r)
+ TCGReg data_r, HostAddress h)
{
switch (memop & MO_SIZE) {
case MO_8:
- tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index);
break;
case MO_16:
- tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index);
break;
case MO_32:
- tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index);
break;
case MO_64:
- tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
+ tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index);
break;
default:
g_assert_not_reached();
@@ -1855,6 +1859,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
{
MemOp memop = get_memop(oi);
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+ HostAddress h;
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((memop & MO_BSWAP) == 0);
@@ -1863,8 +1868,14 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_insn_unit *label_ptr;
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1);
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
- TCG_REG_X1, addr_type, addr_reg);
+
+ h = (HostAddress){
+ .base = TCG_REG_X1,
+ .index = addr_reg,
+ .index_ext = addr_type
+ };
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
+
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
@@ -1873,12 +1884,19 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_test_alignment(s, true, addr_reg, a_bits);
}
if (USE_GUEST_BASE) {
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
- TCG_REG_GUEST_BASE, addr_type, addr_reg);
+ h = (HostAddress){
+ .base = TCG_REG_GUEST_BASE,
+ .index = addr_reg,
+ .index_ext = addr_type
+ };
} else {
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+ h = (HostAddress){
+ .base = addr_reg,
+ .index = TCG_REG_XZR,
+ .index_ext = TCG_TYPE_I64
+ };
}
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
#endif /* CONFIG_SOFTMMU */
}
@@ -1887,6 +1905,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
{
MemOp memop = get_memop(oi);
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+ HostAddress h;
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((memop & MO_BSWAP) == 0);
@@ -1895,8 +1914,14 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_insn_unit *label_ptr;
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0);
- tcg_out_qemu_st_direct(s, memop, data_reg,
- TCG_REG_X1, addr_type, addr_reg);
+
+ h = (HostAddress){
+ .base = TCG_REG_X1,
+ .index = addr_reg,
+ .index_ext = addr_type
+ };
+ tcg_out_qemu_st_direct(s, memop, data_reg, h);
+
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
@@ -1905,12 +1930,19 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_test_alignment(s, false, addr_reg, a_bits);
}
if (USE_GUEST_BASE) {
- tcg_out_qemu_st_direct(s, memop, data_reg,
- TCG_REG_GUEST_BASE, addr_type, addr_reg);
+ h = (HostAddress){
+ .base = TCG_REG_GUEST_BASE,
+ .index = addr_reg,
+ .index_ext = addr_type
+ };
} else {
- tcg_out_qemu_st_direct(s, memop, data_reg,
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+ h = (HostAddress){
+ .base = addr_reg,
+ .index = TCG_REG_XZR,
+ .index_ext = TCG_TYPE_I64
+ };
}
+ tcg_out_qemu_st_direct(s, memop, data_reg, h);
#endif /* CONFIG_SOFTMMU */
}
--
2.34.1
next prev parent reply other threads:[~2023-05-03 7:09 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-03 6:56 [PATCH v4 00/54] tcg: Simplify calls to load/store helpers Richard Henderson
2023-05-03 6:56 ` [PATCH v4 01/54] tcg/i386: Rationalize args to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:56 ` [PATCH v4 02/54] tcg/i386: Generalize multi-part load overlap test Richard Henderson
2023-05-03 6:56 ` [PATCH v4 03/54] tcg/i386: Introduce HostAddress Richard Henderson
2023-05-03 6:56 ` [PATCH v4 04/54] tcg/i386: Drop r0+r1 local variables from tcg_out_tlb_load Richard Henderson
2023-05-03 6:56 ` [PATCH v4 05/54] tcg/i386: Introduce tcg_out_testi Richard Henderson
2023-05-03 6:56 ` [PATCH v4 06/54] tcg/i386: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:56 ` [PATCH v4 07/54] tcg/i386: Use indexed addressing for softmmu fast path Richard Henderson
2023-05-03 6:56 ` [PATCH v4 08/54] tcg/aarch64: Rationalize args to tcg_out_qemu_{ld, st} Richard Henderson
2023-05-03 6:56 ` Richard Henderson [this message]
2023-05-03 6:56 ` [PATCH v4 10/54] tcg/aarch64: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:56 ` [PATCH v4 11/54] tcg/arm: Rationalize args to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:56 ` [PATCH v4 12/54] tcg/arm: Introduce HostAddress Richard Henderson
2023-05-03 6:56 ` [PATCH v4 13/54] tcg/arm: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:56 ` [PATCH v4 14/54] tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld, st} Richard Henderson
2023-05-03 6:56 ` [PATCH v4 15/54] tcg/loongarch64: Introduce HostAddress Richard Henderson
2023-05-03 6:56 ` [PATCH v4 16/54] tcg/loongarch64: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:56 ` [PATCH v4 17/54] tcg/mips: Rationalize args to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:56 ` [PATCH v4 18/54] tcg/mips: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:56 ` [PATCH v4 19/54] tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:56 ` [PATCH v4 20/54] tcg/ppc: Introduce HostAddress Richard Henderson
2023-05-03 6:56 ` [PATCH v4 21/54] tcg/ppc: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:56 ` [PATCH v4 22/54] tcg/riscv: Require TCG_TARGET_REG_BITS == 64 Richard Henderson
2023-05-03 6:56 ` [PATCH v4 23/54] tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:56 ` [PATCH v4 24/54] tcg/riscv: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:57 ` [PATCH v4 25/54] tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:57 ` [PATCH v4 26/54] tcg/s390x: Introduce HostAddress Richard Henderson
2023-05-03 6:57 ` [PATCH v4 27/54] tcg/s390x: Introduce prepare_host_addr Richard Henderson
2023-05-03 6:57 ` [PATCH v4 28/54] tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return Richard Henderson
2023-05-03 6:57 ` [PATCH v4 29/54] tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st} Richard Henderson
2023-05-03 6:57 ` [PATCH v4 30/54] tcg: Move TCGLabelQemuLdst to tcg.c Richard Henderson
2023-05-03 6:57 ` [PATCH v4 31/54] tcg: Replace REG_P with arg_loc_reg_p Richard Henderson
2023-05-03 6:57 ` [PATCH v4 32/54] tcg: Introduce arg_slot_stk_ofs Richard Henderson
2023-05-03 6:57 ` [PATCH v4 33/54] tcg: Widen helper_*_st[bw]_mmu val arguments Richard Henderson
2023-05-03 6:57 ` [PATCH v4 34/54] tcg: Add routines for calling slow-path helpers Richard Henderson
2023-05-03 6:57 ` [PATCH v4 35/54] tcg/i386: Convert tcg_out_qemu_ld_slow_path Richard Henderson
2023-05-03 6:57 ` [PATCH v4 36/54] tcg/i386: Convert tcg_out_qemu_st_slow_path Richard Henderson
2023-05-03 6:57 ` [PATCH v4 37/54] tcg/aarch64: Convert tcg_out_qemu_{ld,st}_slow_path Richard Henderson
2023-05-03 6:57 ` [PATCH v4 38/54] tcg/arm: " Richard Henderson
2023-05-03 6:57 ` [PATCH v4 39/54] tcg/loongarch64: Convert tcg_out_qemu_{ld, st}_slow_path Richard Henderson
2023-05-03 6:57 ` [PATCH v4 40/54] tcg/mips: Convert tcg_out_qemu_{ld,st}_slow_path Richard Henderson
2023-05-03 6:57 ` [PATCH v4 41/54] tcg/ppc: " Richard Henderson
2023-05-03 6:57 ` [PATCH v4 42/54] tcg/riscv: " Richard Henderson
2023-05-03 6:57 ` [PATCH v4 43/54] tcg/s390x: " Richard Henderson
2023-05-03 6:57 ` [PATCH v4 44/54] tcg/loongarch64: Simplify constraints on qemu_ld/st Richard Henderson
2023-05-03 6:57 ` [PATCH v4 45/54] tcg/mips: Remove MO_BSWAP handling Richard Henderson
2023-05-03 6:57 ` [PATCH v4 46/54] tcg/mips: Reorg tlb load within prepare_host_addr Richard Henderson
2023-05-03 6:57 ` [PATCH v4 47/54] tcg/mips: Simplify constraints on qemu_ld/st Richard Henderson
2023-05-03 6:57 ` [PATCH v4 48/54] tcg/ppc: Reorg tcg_out_tlb_read Richard Henderson
2023-05-03 6:57 ` [PATCH v4 49/54] tcg/ppc: Adjust constraints on qemu_ld/st Richard Henderson
2023-05-03 6:57 ` [PATCH v4 50/54] tcg/ppc: Remove unused constraints A, B, C, D Richard Henderson
2023-05-03 6:57 ` [PATCH v4 51/54] tcg/ppc: Remove unused constraint J Richard Henderson
2023-05-03 6:57 ` [PATCH v4 52/54] tcg/riscv: Simplify constraints on qemu_ld/st Richard Henderson
2023-05-03 6:57 ` [PATCH v4 53/54] tcg/s390x: Use ALGFR in constructing softmmu host address Richard Henderson
2023-05-03 6:57 ` [PATCH v4 54/54] tcg/s390x: Simplify constraints on qemu_ld/st Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230503065729.1745843-10-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=git@xen0n.name \
--cc=philmd@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=qemu-s390x@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).