qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PATCH v2 09/30] tcg/tci: Use cpu_{ld,st}_mmu
Date: Wed, 15 Feb 2023 16:57:18 -1000	[thread overview]
Message-ID: <20230216025739.1211680-10-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230216025739.1211680-1-richard.henderson@linaro.org>

Unify the softmmu and the user-only paths by using the
official memory interface.  Avoid double logging of memory
operations to plugins by relying on the ones within the
cpu_*_mmu functions.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/tcg-op.c |   9 +++-
 tcg/tci.c    | 127 ++++++++-------------------------------------------
 2 files changed, 26 insertions(+), 110 deletions(-)

diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index c581ae77c4..da312dcf7e 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2916,7 +2916,12 @@ static void tcg_gen_req_mo(TCGBar type)
 
 static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
 {
-#ifdef CONFIG_PLUGIN
+    /*
+     * With TCI, we get memory tracing via cpu_{ld,st}_mmu.
+     * No need to instrument memory operations inline, and
+     * we don't want to log the same memory operation twice.
+     */
+#if defined(CONFIG_PLUGIN) && !defined(CONFIG_TCG_INTERPRETER)
     if (tcg_ctx->plugin_insn != NULL) {
         /* Save a copy of the vaddr for use after a load.  */
         TCGv temp = tcg_temp_new();
@@ -2930,7 +2935,7 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
 static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi,
                                      enum qemu_plugin_mem_rw rw)
 {
-#ifdef CONFIG_PLUGIN
+#if defined(CONFIG_PLUGIN) && !defined(CONFIG_TCG_INTERPRETER)
     if (tcg_ctx->plugin_insn != NULL) {
         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
         plugin_gen_empty_mem_callback(vaddr, info);
diff --git a/tcg/tci.c b/tcg/tci.c
index fc67e7e767..170dcf1262 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -292,87 +292,34 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
     MemOp mop = get_memop(oi);
     uintptr_t ra = (uintptr_t)tb_ptr;
 
-#ifdef CONFIG_SOFTMMU
     switch (mop & (MO_BSWAP | MO_SSIZE)) {
     case MO_UB:
-        return helper_ret_ldub_mmu(env, taddr, oi, ra);
+        return cpu_ldb_mmu(env, taddr, oi, ra);
     case MO_SB:
-        return helper_ret_ldsb_mmu(env, taddr, oi, ra);
+        return (int8_t)cpu_ldb_mmu(env, taddr, oi, ra);
     case MO_LEUW:
-        return helper_le_lduw_mmu(env, taddr, oi, ra);
+        return cpu_ldw_le_mmu(env, taddr, oi, ra);
     case MO_LESW:
-        return helper_le_ldsw_mmu(env, taddr, oi, ra);
+        return (int16_t)cpu_ldw_le_mmu(env, taddr, oi, ra);
     case MO_LEUL:
-        return helper_le_ldul_mmu(env, taddr, oi, ra);
+        return cpu_ldl_le_mmu(env, taddr, oi, ra);
     case MO_LESL:
-        return helper_le_ldsl_mmu(env, taddr, oi, ra);
+        return (int32_t)cpu_ldl_le_mmu(env, taddr, oi, ra);
     case MO_LEUQ:
-        return helper_le_ldq_mmu(env, taddr, oi, ra);
+        return cpu_ldq_le_mmu(env, taddr, oi, ra);
     case MO_BEUW:
-        return helper_be_lduw_mmu(env, taddr, oi, ra);
+        return cpu_ldw_be_mmu(env, taddr, oi, ra);
     case MO_BESW:
-        return helper_be_ldsw_mmu(env, taddr, oi, ra);
+        return (int16_t)cpu_ldw_be_mmu(env, taddr, oi, ra);
     case MO_BEUL:
-        return helper_be_ldul_mmu(env, taddr, oi, ra);
+        return cpu_ldl_be_mmu(env, taddr, oi, ra);
     case MO_BESL:
-        return helper_be_ldsl_mmu(env, taddr, oi, ra);
+        return (int32_t)cpu_ldl_be_mmu(env, taddr, oi, ra);
     case MO_BEUQ:
-        return helper_be_ldq_mmu(env, taddr, oi, ra);
+        return cpu_ldq_be_mmu(env, taddr, oi, ra);
     default:
         g_assert_not_reached();
     }
-#else
-    void *haddr = g2h(env_cpu(env), taddr);
-    unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
-    uint64_t ret;
-
-    set_helper_retaddr(ra);
-    if (taddr & a_mask) {
-        helper_unaligned_ld(env, taddr);
-    }
-    switch (mop & (MO_BSWAP | MO_SSIZE)) {
-    case MO_UB:
-        ret = ldub_p(haddr);
-        break;
-    case MO_SB:
-        ret = ldsb_p(haddr);
-        break;
-    case MO_LEUW:
-        ret = lduw_le_p(haddr);
-        break;
-    case MO_LESW:
-        ret = ldsw_le_p(haddr);
-        break;
-    case MO_LEUL:
-        ret = (uint32_t)ldl_le_p(haddr);
-        break;
-    case MO_LESL:
-        ret = (int32_t)ldl_le_p(haddr);
-        break;
-    case MO_LEUQ:
-        ret = ldq_le_p(haddr);
-        break;
-    case MO_BEUW:
-        ret = lduw_be_p(haddr);
-        break;
-    case MO_BESW:
-        ret = ldsw_be_p(haddr);
-        break;
-    case MO_BEUL:
-        ret = (uint32_t)ldl_be_p(haddr);
-        break;
-    case MO_BESL:
-        ret = (int32_t)ldl_be_p(haddr);
-        break;
-    case MO_BEUQ:
-        ret = ldq_be_p(haddr);
-        break;
-    default:
-        g_assert_not_reached();
-    }
-    clear_helper_retaddr();
-    return ret;
-#endif
 }
 
 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
@@ -381,67 +328,31 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
     MemOp mop = get_memop(oi);
     uintptr_t ra = (uintptr_t)tb_ptr;
 
-#ifdef CONFIG_SOFTMMU
     switch (mop & (MO_BSWAP | MO_SIZE)) {
     case MO_UB:
-        helper_ret_stb_mmu(env, taddr, val, oi, ra);
+        cpu_stb_mmu(env, taddr, val, oi, ra);
         break;
     case MO_LEUW:
-        helper_le_stw_mmu(env, taddr, val, oi, ra);
+        cpu_stw_le_mmu(env, taddr, val, oi, ra);
         break;
     case MO_LEUL:
-        helper_le_stl_mmu(env, taddr, val, oi, ra);
+        cpu_stl_le_mmu(env, taddr, val, oi, ra);
         break;
     case MO_LEUQ:
-        helper_le_stq_mmu(env, taddr, val, oi, ra);
+        cpu_stq_le_mmu(env, taddr, val, oi, ra);
         break;
     case MO_BEUW:
-        helper_be_stw_mmu(env, taddr, val, oi, ra);
+        cpu_stw_be_mmu(env, taddr, val, oi, ra);
         break;
     case MO_BEUL:
-        helper_be_stl_mmu(env, taddr, val, oi, ra);
+        cpu_stl_be_mmu(env, taddr, val, oi, ra);
         break;
     case MO_BEUQ:
-        helper_be_stq_mmu(env, taddr, val, oi, ra);
+        cpu_stq_be_mmu(env, taddr, val, oi, ra);
         break;
     default:
         g_assert_not_reached();
     }
-#else
-    void *haddr = g2h(env_cpu(env), taddr);
-    unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
-
-    set_helper_retaddr(ra);
-    if (taddr & a_mask) {
-        helper_unaligned_st(env, taddr);
-    }
-    switch (mop & (MO_BSWAP | MO_SIZE)) {
-    case MO_UB:
-        stb_p(haddr, val);
-        break;
-    case MO_LEUW:
-        stw_le_p(haddr, val);
-        break;
-    case MO_LEUL:
-        stl_le_p(haddr, val);
-        break;
-    case MO_LEUQ:
-        stq_le_p(haddr, val);
-        break;
-    case MO_BEUW:
-        stw_be_p(haddr, val);
-        break;
-    case MO_BEUL:
-        stl_be_p(haddr, val);
-        break;
-    case MO_BEUQ:
-        stq_be_p(haddr, val);
-        break;
-    default:
-        g_assert_not_reached();
-    }
-    clear_helper_retaddr();
-#endif
 }
 
 #if TCG_TARGET_REG_BITS == 64
-- 
2.34.1



  parent reply	other threads:[~2023-02-16  3:00 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-16  2:57 [PATCH v2 00/30] tcg: Improve atomicity support Richard Henderson
2023-02-16  2:57 ` [PATCH v2 01/30] include/qemu/cpuid: Introduce xgetbv_low Richard Henderson
2023-02-16  2:57 ` [PATCH v2 02/30] include/exec/memop: Add bits describing atomicity Richard Henderson
2023-02-28 17:56   ` Alex Bennée
2023-03-15 17:13   ` Philippe Mathieu-Daudé
2023-02-16  2:57 ` [PATCH v2 03/30] accel/tcg: Add cpu_in_serial_context Richard Henderson
2023-02-28 17:57   ` Alex Bennée
2023-03-15 16:07   ` Philippe Mathieu-Daudé
2023-02-16  2:57 ` [PATCH v2 04/30] accel/tcg: Introduce tlb_read_idx Richard Henderson
2023-02-28 17:59   ` Alex Bennée
2023-02-16  2:57 ` [PATCH v2 05/30] accel/tcg: Reorg system mode load helpers Richard Henderson
2023-02-28 18:08   ` Alex Bennée
2023-02-16  2:57 ` [PATCH v2 06/30] accel/tcg: Reorg system mode store helpers Richard Henderson
2023-02-16  2:57 ` [PATCH v2 07/30] accel/tcg: Honor atomicity of loads Richard Henderson
2023-02-28 18:19   ` Alex Bennée
2023-02-16  2:57 ` [PATCH v2 08/30] accel/tcg: Honor atomicity of stores Richard Henderson
2023-02-16  2:57 ` Richard Henderson [this message]
2023-02-16  2:57 ` [PATCH v2 10/30] tcg: Unify helper_{be,le}_{ld,st}* Richard Henderson
2023-02-16  2:57 ` [PATCH v2 11/30] accel/tcg: Implement helper_{ld, st}*_mmu for user-only Richard Henderson
2023-02-16  2:57 ` [PATCH v2 12/30] tcg: Add 128-bit guest memory primitives Richard Henderson
2023-02-16  2:57 ` [PATCH v2 13/30] meson: Detect atomic128 support with optimization Richard Henderson
2023-02-16  2:57 ` [PATCH v2 14/30] tcg/i386: Add have_atomic16 Richard Henderson
2023-03-15 17:06   ` Philippe Mathieu-Daudé
2023-02-16  2:57 ` [PATCH v2 15/30] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc Richard Henderson
2023-02-16  2:57 ` [PATCH v2 16/30] accel/tcg: Add aarch64 specific support in ldst_atomicity Richard Henderson
2023-02-16  2:57 ` [PATCH v2 17/30] tcg/aarch64: Detect have_lse, have_lse2 for linux Richard Henderson
2023-03-15 17:00   ` Philippe Mathieu-Daudé
2023-02-16  2:57 ` [PATCH v2 18/30] tcg/aarch64: Detect have_lse, have_lse2 for darwin Richard Henderson
2023-03-15 16:59   ` Philippe Mathieu-Daudé
2023-02-16  2:57 ` [PATCH v2 19/30] accel/tcg: Add have_lse2 support in ldst_atomicity Richard Henderson
2023-02-16  2:57 ` [PATCH v2 20/30] tcg: Introduce TCG_OPF_TYPE_MASK Richard Henderson
2023-02-16  2:57 ` [PATCH v2 21/30] tcg: Add INDEX_op_qemu_{ld,st}_i128 Richard Henderson
2023-02-16  2:57 ` [PATCH v2 22/30] tcg/i386: Introduce tcg_out_mov2 Richard Henderson
2023-02-16  2:57 ` [PATCH v2 23/30] tcg/i386: Introduce tcg_out_testi Richard Henderson
2023-02-16  2:57 ` [PATCH v2 24/30] tcg/i386: Use full load/store helpers in user-only mode Richard Henderson
2023-02-16  2:57 ` [PATCH v2 25/30] tcg/i386: Replace is64 with type in qemu_ld/st routines Richard Henderson
2023-02-16  2:57 ` [PATCH v2 26/30] tcg/i386: Mark Win64 call-saved vector regs as reserved Richard Henderson
2023-02-16  2:57 ` [PATCH v2 27/30] tcg/i386: Examine MemOp for atomicity and alignment Richard Henderson
2023-02-16  2:57 ` [PATCH v2 28/30] tcg/i386: Support 128-bit load/store with have_atomic16 Richard Henderson
2023-02-16  2:57 ` [PATCH v2 29/30] tcg/i386: Add vex_v argument to tcg_out_vex_modrm_pool Richard Henderson
2023-03-15 16:43   ` Philippe Mathieu-Daudé
2023-02-16  2:57 ` [PATCH v2 30/30] tcg/i386: Honor 64-bit atomicity in 32-bit mode Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230216025739.1211680-10-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).