From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-stable@nongnu.org
Subject: [PULL 1/6] tcg: Zero extend 32-bit addresses for TCI
Date: Fri, 5 Dec 2025 10:20:02 -0600 [thread overview]
Message-ID: <20251205162007.26405-2-richard.henderson@linaro.org> (raw)
In-Reply-To: <20251205162007.26405-1-richard.henderson@linaro.org>
For native code generation, zero-extending 32-bit addresses for
the slow path helpers happens in tcg_out_{ld,st}_helper_args,
but there isn't really a slow path for TCI, so that didn't happen.
Make the extension for TCI explicit in the opcode stream,
much like we already do for plugins and atomic helpers.
Cc: qemu-stable@nongnu.org
Fixes: 24e46e6c9d9 ("accel/tcg: Widen tcg-ldst.h addresses to uint64_t")
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-op-ldst.c | 72 +++++++++++++++++++++++++++++++++++++++--------
1 file changed, 61 insertions(+), 11 deletions(-)
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index 67c15fd4d0..1c0b06dbc7 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -135,6 +135,30 @@ static void tcg_gen_req_mo(TCGBar type)
}
}
+static TCGTemp *tci_extend_addr(TCGTemp *addr)
+{
+#ifdef CONFIG_TCG_INTERPRETER
+ /*
+ * 64-bit interpreter requires 64-bit addresses.
+ * Compare to the extension performed by tcg_out_{ld,st}_helper_args
+ * for native code generation.
+ */
+ if (TCG_TARGET_REG_BITS == 64 && tcg_ctx->addr_type == TCG_TYPE_I32) {
+ TCGv_i64 temp = tcg_temp_ebb_new_i64();
+ tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
+ return tcgv_i64_temp(temp);
+ }
+#endif
+ return addr;
+}
+
+static void maybe_free_addr(TCGTemp *addr, TCGTemp *copy)
+{
+ if (addr != copy) {
+ tcg_temp_free_internal(copy);
+ }
+}
+
/* Only required for loads, where value might overlap addr. */
static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
{
@@ -234,6 +258,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
MemOp orig_memop;
MemOpIdx orig_oi, oi;
TCGv_i64 copy_addr;
+ TCGTemp *addr_new;
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
@@ -248,10 +273,12 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
+ addr_new = tci_extend_addr(addr);
copy_addr = plugin_maybe_preserve_addr(addr);
- gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
+ gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr_new, oi);
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
QEMU_PLUGIN_MEM_R);
+ maybe_free_addr(addr, addr_new);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@@ -282,6 +309,7 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
{
TCGv_i32 swap = NULL;
MemOpIdx orig_oi, oi;
+ TCGTemp *addr_new;
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
@@ -304,8 +332,10 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
- gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
+ addr_new = tci_extend_addr(addr);
+ gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr_new, oi);
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
+ maybe_free_addr(addr, addr_new);
if (swap) {
tcg_temp_free_i32(swap);
@@ -326,6 +356,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
MemOp orig_memop;
MemOpIdx orig_oi, oi;
TCGv_i64 copy_addr;
+ TCGTemp *addr_new;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
@@ -350,10 +381,12 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
+ addr_new = tci_extend_addr(addr);
copy_addr = plugin_maybe_preserve_addr(addr);
- gen_ld_i64(val, addr, oi);
+ gen_ld_i64(val, addr_new, oi);
plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
QEMU_PLUGIN_MEM_R);
+ maybe_free_addr(addr, addr_new);
if ((orig_memop ^ memop) & MO_BSWAP) {
int flags = (orig_memop & MO_SIGN
@@ -388,6 +421,7 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
{
TCGv_i64 swap = NULL;
MemOpIdx orig_oi, oi;
+ TCGTemp *addr_new;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
@@ -418,8 +452,10 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
- gen_st_i64(val, addr, oi);
+ addr_new = tci_extend_addr(addr);
+ gen_st_i64(val, addr_new, oi);
plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
+ maybe_free_addr(addr, addr_new);
if (swap) {
tcg_temp_free_i64(swap);
@@ -530,6 +566,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
{
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
+ TCGTemp *addr_new;
check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
@@ -557,8 +594,10 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
+ addr_new = tci_extend_addr(addr);
gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I128, tcgv_i64_temp(lo),
- tcgv_i64_temp(hi), addr, oi);
+ tcgv_i64_temp(hi), addr_new, oi);
+ maybe_free_addr(addr, addr_new);
if (need_bswap) {
tcg_gen_bswap64_i64(lo, lo);
@@ -586,7 +625,9 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
y = TCGV128_LOW(val);
}
- gen_ld_i64(x, addr, make_memop_idx(mop[0], idx));
+ addr_new = tci_extend_addr(addr);
+ gen_ld_i64(x, addr_new, make_memop_idx(mop[0], idx));
+ maybe_free_addr(addr, addr_new);
if (need_bswap) {
tcg_gen_bswap64_i64(x, x);
@@ -602,7 +643,9 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
addr_p8 = tcgv_i64_temp(t);
}
- gen_ld_i64(y, addr_p8, make_memop_idx(mop[1], idx));
+ addr_new = tci_extend_addr(addr_p8);
+ gen_ld_i64(y, addr_new, make_memop_idx(mop[1], idx));
+ maybe_free_addr(addr_p8, addr_new);
tcg_temp_free_internal(addr_p8);
if (need_bswap) {
@@ -636,6 +679,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
{
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
+ TCGTemp *addr_new;
check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
@@ -666,8 +710,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
+ addr_new = tci_extend_addr(addr);
gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I128,
- tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
+ tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr_new, oi);
+ maybe_free_addr(addr, addr_new);
if (need_bswap) {
tcg_temp_free_i64(lo);
@@ -694,7 +740,9 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
x = b;
}
- gen_st_i64(x, addr, make_memop_idx(mop[0], idx));
+ addr_new = tci_extend_addr(addr);
+ gen_st_i64(x, addr_new, make_memop_idx(mop[0], idx));
+ maybe_free_addr(addr, addr_new);
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
@@ -706,13 +754,15 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
addr_p8 = tcgv_i64_temp(t);
}
+ addr_new = tci_extend_addr(addr_p8);
if (b) {
tcg_gen_bswap64_i64(b, y);
- gen_st_i64(b, addr_p8, make_memop_idx(mop[1], idx));
+ gen_st_i64(b, addr_new, make_memop_idx(mop[1], idx));
tcg_temp_free_i64(b);
} else {
- gen_st_i64(y, addr_p8, make_memop_idx(mop[1], idx));
+ gen_st_i64(y, addr_new, make_memop_idx(mop[1], idx));
}
+ maybe_free_addr(addr_p8, addr_new);
tcg_temp_free_internal(addr_p8);
} else {
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
--
2.43.0
next prev parent reply other threads:[~2025-12-05 16:20 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-05 16:20 [PULL 0/6] tcg patch queue Richard Henderson
2025-12-05 16:20 ` Richard Henderson [this message]
2025-12-06 6:59 ` [PULL 1/6] tcg: Zero extend 32-bit addresses for TCI Michael Tokarev
2025-12-08 9:18 ` Philippe Mathieu-Daudé
2025-12-05 16:20 ` [PULL 2/6] tcg/tci: Introduce INDEX_op_tci_qemu_{ld,st}_rrr Richard Henderson
2025-12-05 16:20 ` [PULL 3/6] tcg: Remove duplicate test from plugin_gen_mem_callbacks Richard Henderson
2025-12-05 16:20 ` [PULL 4/6] tcg/tci: Disable -Wundef FFI_GO_CLOSURES warning Richard Henderson
2025-12-05 16:20 ` [PULL 5/6] include/generic/host: Fix atomic128-cas.h.inc for Int128 structure Richard Henderson
2025-12-05 16:20 ` [PULL 6/6] include/aarch64/host: Fix atomic16_fetch_{and,or} Richard Henderson
2025-12-05 18:38 ` [PULL 0/6] tcg patch queue Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251205162007.26405-2-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-stable@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).