From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: ale@rev.ng, philmd@linaro.org, marcel.apfelbaum@gmail.com,
wangyanan55@huawei.com, anjo@rev.ng
Subject: [PATCH 09/84] tcg: Reduce copies for plugin_gen_mem_callbacks
Date: Wed, 3 May 2023 08:21:03 +0100 [thread overview]
Message-ID: <20230503072221.1746802-19-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230503072221.1746802-1-richard.henderson@linaro.org>
We only need to make copies for loads, when the destination
overlaps the address. For now, only eliminate the copy for
stores and 128-bit loads.
Rename plugin_prep_mem_callbacks to plugin_maybe_preserve_addr,
returning NULL if no copy is made.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-op-ldst.c | 38 ++++++++++++++++++++------------------
1 file changed, 20 insertions(+), 18 deletions(-)
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index 17fe35b93c..cbd85f793c 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -114,7 +114,8 @@ static void tcg_gen_req_mo(TCGBar type)
}
}
-static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
+/* Only required for loads, where value might overlap addr. */
+static TCGv plugin_maybe_preserve_addr(TCGv vaddr)
{
#ifdef CONFIG_PLUGIN
if (tcg_ctx->plugin_insn != NULL) {
@@ -124,17 +125,20 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
return temp;
}
#endif
- return vaddr;
+ return NULL;
}
-static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi,
- enum qemu_plugin_mem_rw rw)
+static void
+plugin_gen_mem_callbacks(TCGv copy_addr, TCGv orig_addr, MemOpIdx oi,
+ enum qemu_plugin_mem_rw rw)
{
#ifdef CONFIG_PLUGIN
if (tcg_ctx->plugin_insn != NULL) {
qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
- plugin_gen_empty_mem_callback(vaddr, info);
- tcg_temp_free(vaddr);
+ plugin_gen_empty_mem_callback(copy_addr ? : orig_addr, info);
+ if (copy_addr) {
+ tcg_temp_free(copy_addr);
+ }
}
#endif
}
@@ -143,6 +147,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
MemOp orig_memop;
MemOpIdx oi;
+ TCGv copy_addr;
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
@@ -157,9 +162,9 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
}
}
- addr = plugin_prep_mem_callbacks(addr);
+ copy_addr = plugin_maybe_preserve_addr(addr);
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
- plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
+ plugin_gen_mem_callbacks(copy_addr, addr, oi, QEMU_PLUGIN_MEM_R);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@@ -202,13 +207,12 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
memop &= ~MO_BSWAP;
}
- addr = plugin_prep_mem_callbacks(addr);
if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx);
} else {
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
}
- plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
+ plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W);
if (swap) {
tcg_temp_free_i32(swap);
@@ -219,6 +223,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
MemOp orig_memop;
MemOpIdx oi;
+ TCGv copy_addr;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
@@ -243,9 +248,9 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
}
}
- addr = plugin_prep_mem_callbacks(addr);
+ copy_addr = plugin_maybe_preserve_addr(addr);
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
- plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
+ plugin_gen_mem_callbacks(copy_addr, addr, oi, QEMU_PLUGIN_MEM_R);
if ((orig_memop ^ memop) & MO_BSWAP) {
int flags = (orig_memop & MO_SIGN
@@ -300,9 +305,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
memop &= ~MO_BSWAP;
}
- addr = plugin_prep_mem_callbacks(addr);
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
- plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
+ plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W);
if (swap) {
tcg_temp_free_i64(swap);
@@ -430,7 +434,6 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_debug_assert((memop & MO_SIGN) == 0);
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- addr = plugin_prep_mem_callbacks(addr);
/* TODO: For now, force 32-bit hosts to use the helper. */
if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
@@ -501,7 +504,7 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
maybe_free_addr64(a64);
}
- plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
+ plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_R);
}
void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
@@ -512,7 +515,6 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_debug_assert((memop & MO_SIGN) == 0);
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
- addr = plugin_prep_mem_callbacks(addr);
/* TODO: For now, force 32-bit hosts to use the helper. */
@@ -583,7 +585,7 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
maybe_free_addr64(a64);
}
- plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
+ plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W);
}
static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
--
2.34.1
next prev parent reply other threads:[~2023-05-03 7:32 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-03 7:20 [PATCH 00/84] tcg: Build once for system, once for user Richard Henderson
2023-05-03 7:20 ` [PULL v2 01/12] softmmu: Tidy dirtylimit_dirty_ring_full_time Richard Henderson
2023-05-03 7:20 ` [PATCH 01/84] tcg: Split out memory ops to tcg-op-ldst.c Richard Henderson
2023-05-03 7:20 ` [PULL v2 02/12] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1 Richard Henderson
2023-05-03 7:20 ` [PATCH 02/84] tcg: Widen gen_insn_data to uint64_t Richard Henderson
2023-05-03 7:20 ` [PATCH 03/84] accel/tcg: Widen tcg-ldst.h addresses " Richard Henderson
2023-05-03 7:20 ` [PULL v2 03/12] qemu/bitops.h: Limit rotate amounts Richard Henderson
2023-05-03 7:20 ` [PULL v2 04/12] qemu/host-utils.h: Add clz and ctz functions for lower-bit integers Richard Henderson
2023-05-03 7:20 ` [PATCH 04/84] tcg: Widen helper_{ld,st}_i128 addresses to uint64_t Richard Henderson
2023-05-03 7:20 ` [PULL v2 05/12] tcg: Add tcg_gen_gvec_andcs Richard Henderson
2023-05-03 7:20 ` [PATCH 05/84] tcg: Widen helper_atomic_* addresses to uint64_t Richard Henderson
2023-05-03 7:20 ` [PULL v2 06/12] tcg: Add tcg_gen_gvec_rotrs Richard Henderson
2023-05-03 7:20 ` [PATCH 06/84] tcg: Widen tcg_gen_code pc_start argument to uint64_t Richard Henderson
2023-05-03 7:20 ` [PATCH 07/84] accel/tcg: Merge gen_mem_wrapped with plugin_gen_empty_mem_callback Richard Henderson
2023-05-03 7:20 ` [PULL v2 07/12] qemu/int128: Re-shuffle Int128Alias members Richard Henderson
2023-05-03 7:21 ` [PATCH 08/84] accel/tcg: Merge do_gen_mem_cb into caller Richard Henderson
2023-05-03 7:21 ` [PULL v2 08/12] migration/xbzrle: Use __attribute__((target)) for avx512 Richard Henderson
2023-05-03 7:21 ` [PULL v2 09/12] accel/tcg: Add cpu_ld*_code_mmu Richard Henderson
2023-05-03 7:21 ` Richard Henderson [this message]
2023-05-03 7:21 ` [PATCH 10/84] accel/tcg: Widen plugin_gen_empty_mem_callback to i64 Richard Henderson
2023-05-03 7:21 ` [PULL v2 10/12] tcg/loongarch64: Conditionalize tcg_out_exts_i32_i64 Richard Henderson
-- strict thread matches above, loose matches on Subject: below --
2023-05-03 7:22 [RESEND PATCH 00/84] tcg: Build once for system, once for user Richard Henderson
2023-05-03 7:22 ` [PATCH 09/84] tcg: Reduce copies for plugin_gen_mem_callbacks Richard Henderson
2023-05-11 15:44 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230503072221.1746802-19-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=ale@rev.ng \
--cc=anjo@rev.ng \
--cc=marcel.apfelbaum@gmail.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=wangyanan55@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).