qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, Peter Maydell <peter.maydell@linaro.org>
Subject: [PATCH v2 08/20] target/arm: Sink gen_mte_check1 into load/store_exclusive
Date: Thu, 25 May 2023 16:25:46 -0700	[thread overview]
Message-ID: <20230525232558.1758967-9-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230525232558.1758967-1-richard.henderson@linaro.org>

No need to duplicate this check across multiple call sites.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/tcg/translate-a64.c | 44 ++++++++++++++++------------------
 1 file changed, 21 insertions(+), 23 deletions(-)

diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 51f9d227e7..19f0f20896 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -2381,11 +2381,16 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
  * races in multi-threaded linux-user and when MTTCG softmmu is
  * enabled.
  */
-static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
-                               TCGv_i64 addr, int size, bool is_pair)
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn,
+                               int size, bool is_pair)
 {
     int idx = get_mem_index(s);
     MemOp memop;
+    TCGv_i64 dirty_addr, clean_addr;
+
+    s->is_ldex = true;
+    dirty_addr = cpu_reg_sp(s, rn);
+    clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, size);
 
     g_assert(size <= 3);
     if (is_pair) {
@@ -2393,7 +2398,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
         if (size == 2) {
             /* The pair must be single-copy atomic for the doubleword.  */
             memop = finalize_memop(s, MO_64 | MO_ALIGN);
-            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+            tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
             if (s->be_data == MO_LE) {
                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
@@ -2412,7 +2417,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
 
             memop = finalize_memop_atom(s, MO_128 | MO_ALIGN_16,
                                         MO_ATOM_IFALIGN_PAIR);
-            tcg_gen_qemu_ld_i128(t16, addr, idx, memop);
+            tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop);
 
             if (s->be_data == MO_LE) {
                 tcg_gen_extr_i128_i64(cpu_exclusive_val,
@@ -2426,14 +2431,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
         }
     } else {
         memop = finalize_memop(s, size | MO_ALIGN);
-        tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+        tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
         tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
     }
-    tcg_gen_mov_i64(cpu_exclusive_addr, addr);
+    tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr);
 }
 
 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
-                                TCGv_i64 addr, int size, int is_pair)
+                                int rn, int size, int is_pair)
 {
     /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
      *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
@@ -2449,9 +2454,12 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
      */
     TCGLabel *fail_label = gen_new_label();
     TCGLabel *done_label = gen_new_label();
-    TCGv_i64 tmp;
+    TCGv_i64 tmp, dirty_addr, clean_addr;
 
-    tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+    dirty_addr = cpu_reg_sp(s, rn);
+    clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, size);
+
+    tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label);
 
     tmp = tcg_temp_new_i64();
     if (is_pair) {
@@ -2639,9 +2647,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
         if (is_lasr) {
             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
         }
-        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
-                                    true, rn != 31, size);
-        gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
+        gen_store_exclusive(s, rs, rt, rt2, rn, size, false);
         return;
 
     case 0x4: /* LDXR */
@@ -2649,10 +2655,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
         if (rn == 31) {
             gen_check_sp_alignment(s);
         }
-        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
-                                    false, rn != 31, size);
-        s->is_ldex = true;
-        gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
+        gen_load_exclusive(s, rt, rt2, rn, size, false);
         if (is_lasr) {
             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
         }
@@ -2704,9 +2707,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
             if (is_lasr) {
                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
             }
-            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
-                                        true, rn != 31, size);
-            gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
+            gen_store_exclusive(s, rs, rt, rt2, rn, size, true);
             return;
         }
         if (rt2 == 31
@@ -2723,10 +2724,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
             if (rn == 31) {
                 gen_check_sp_alignment(s);
             }
-            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
-                                        false, rn != 31, size);
-            s->is_ldex = true;
-            gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
+            gen_load_exclusive(s, rt, rt2, rn, size, true);
             if (is_lasr) {
                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
             }
-- 
2.34.1



  parent reply	other threads:[~2023-05-25 23:28 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-25 23:25 [PATCH v2 00/20] target/arm: Implement FEAT_LSE2 Richard Henderson
2023-05-25 23:25 ` [PATCH v2 01/20] target/arm: Add commentary for CPUARMState.exclusive_high Richard Henderson
2023-05-26  8:56   ` Philippe Mathieu-Daudé
2023-05-26  9:49     ` Juan Quintela
2023-05-26 14:43       ` Richard Henderson
2023-05-30 15:11         ` Peter Maydell
2023-05-25 23:25 ` [PATCH v2 02/20] target/arm: Add feature test for FEAT_LSE2 Richard Henderson
2023-05-30 12:50   ` Philippe Mathieu-Daudé
2023-05-25 23:25 ` [PATCH v2 03/20] target/arm: Introduce finalize_memop_{atom,pair} Richard Henderson
2023-05-30 12:48   ` [PATCH v2 03/20] target/arm: Introduce finalize_memop_{atom, pair} Philippe Mathieu-Daudé
2023-05-30 15:24   ` Peter Maydell
2023-05-25 23:25 ` [PATCH v2 04/20] target/arm: Use tcg_gen_qemu_ld_i128 for LDXP Richard Henderson
2023-05-30 15:26   ` Peter Maydell
2023-05-25 23:25 ` [PATCH v2 05/20] target/arm: Use tcg_gen_qemu_{st, ld}_i128 for do_fp_{st, ld} Richard Henderson
2023-05-30 15:29   ` Peter Maydell
2023-05-25 23:25 ` [PATCH v2 06/20] target/arm: Use tcg_gen_qemu_st_i128 for STZG, STZ2G Richard Henderson
2023-05-25 23:25 ` [PATCH v2 07/20] target/arm: Use tcg_gen_qemu_{ld, st}_i128 in gen_sve_{ld, st}r Richard Henderson
2023-05-25 23:25 ` Richard Henderson [this message]
2023-05-25 23:25 ` [PATCH v2 09/20] target/arm: Load/store integer pair with one tcg operation Richard Henderson
2023-05-25 23:25 ` [PATCH v2 10/20] target/arm: Hoist finalize_memop out of do_gpr_{ld, st} Richard Henderson
2023-05-25 23:25 ` [PATCH v2 11/20] target/arm: Hoist finalize_memop out of do_fp_{ld, st} Richard Henderson
2023-05-30 12:59   ` Philippe Mathieu-Daudé
2023-05-25 23:25 ` [PATCH v2 12/20] target/arm: Pass memop to gen_mte_check1* Richard Henderson
2023-05-25 23:25 ` [PATCH v2 13/20] target/arm: Pass single_memop to gen_mte_checkN Richard Henderson
2023-05-25 23:25 ` [PATCH v2 14/20] target/arm: Check alignment in helper_mte_check Richard Henderson
2023-05-25 23:25 ` [PATCH v2 15/20] target/arm: Add SCTLR.nAA to TBFLAG_A64 Richard Henderson
2023-05-25 23:25 ` [PATCH v2 16/20] target/arm: Relax ordered/atomic alignment checks for LSE2 Richard Henderson
2023-05-25 23:25 ` [PATCH v2 17/20] target/arm: Move mte check for store-exclusive Richard Henderson
2023-05-25 23:25 ` [PATCH v2 18/20] tests/tcg/aarch64: Use stz2g in mte-7.c Richard Henderson
2023-05-30 15:30   ` Peter Maydell
2023-05-25 23:25 ` [PATCH v2 19/20] tests/tcg/multiarch: Adjust sigbus.c Richard Henderson
2023-05-25 23:25 ` [PATCH v2 20/20] target/arm: Enable FEAT_LSE2 for -cpu max Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230525232558.1758967-9-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).