From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH v1 06/19] target/arm: Sink gen_mte_check1 into load/store_exclusive
Date: Wed, 15 Feb 2023 17:08:41 -1000 [thread overview]
Message-ID: <20230216030854.1212208-7-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230216030854.1212208-1-richard.henderson@linaro.org>
No need to duplicate this check across multiple call sites.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 44 ++++++++++++++++++--------------------
1 file changed, 21 insertions(+), 23 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index b42e5848cc..cd86597172 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2514,11 +2514,16 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
* races in multi-threaded linux-user and when MTTCG softmmu is
* enabled.
*/
-static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
- TCGv_i64 addr, int size, bool is_pair)
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn,
+ int size, bool is_pair)
{
int idx = get_mem_index(s);
MemOp memop = s->be_data;
+ TCGv_i64 dirty_addr, clean_addr;
+
+ s->is_ldex = true;
+ dirty_addr = cpu_reg_sp(s, rn);
+ clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, size);
g_assert(size <= 3);
if (is_pair) {
@@ -2526,7 +2531,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
if (size == 2) {
/* The pair must be single-copy atomic for the doubleword. */
memop |= MO_64 | MO_ALIGN;
- tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+ tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
if (s->be_data == MO_LE) {
tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
@@ -2544,7 +2549,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i128 t16 = tcg_temp_new_i128();
memop |= MO_128 | MO_ALIGN_16 | MO_ATMAX_8;
- tcg_gen_qemu_ld_i128(t16, addr, idx, memop);
+ tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop);
tcg_gen_extr_i128_i64(cpu_exclusive_val, cpu_exclusive_high, t16);
tcg_temp_free_i128(t16);
@@ -2559,14 +2564,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
}
} else {
memop |= size | MO_ALIGN;
- tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+ tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
}
- tcg_gen_mov_i64(cpu_exclusive_addr, addr);
+ tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr);
}
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
- TCGv_i64 addr, int size, int is_pair)
+ int rn, int size, int is_pair)
{
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
* && (!is_pair || env->exclusive_high == [addr + datasize])) {
@@ -2582,9 +2587,12 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
*/
TCGLabel *fail_label = gen_new_label();
TCGLabel *done_label = gen_new_label();
- TCGv_i64 tmp;
+ TCGv_i64 tmp, dirty_addr, clean_addr;
- tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+ dirty_addr = cpu_reg_sp(s, rn);
+ clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, size);
+
+ tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label);
tmp = tcg_temp_new_i64();
if (is_pair) {
@@ -2774,9 +2782,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
if (is_lasr) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- true, rn != 31, size);
- gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
+ gen_store_exclusive(s, rs, rt, rt2, rn, size, false);
return;
case 0x4: /* LDXR */
@@ -2784,10 +2790,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
if (rn == 31) {
gen_check_sp_alignment(s);
}
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- false, rn != 31, size);
- s->is_ldex = true;
- gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
+ gen_load_exclusive(s, rt, rt2, rn, size, false);
if (is_lasr) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
@@ -2839,9 +2842,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
if (is_lasr) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- true, rn != 31, size);
- gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
+ gen_store_exclusive(s, rs, rt, rt2, rn, size, true);
return;
}
if (rt2 == 31
@@ -2858,10 +2859,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
if (rn == 31) {
gen_check_sp_alignment(s);
}
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- false, rn != 31, size);
- s->is_ldex = true;
- gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
+ gen_load_exclusive(s, rt, rt2, rn, size, true);
if (is_lasr) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
--
2.34.1
next prev parent reply other threads:[~2023-02-16 3:11 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-16 3:08 [PATCH v1 00/19] target/arm: Implement FEAT_LSE2 Richard Henderson
2023-02-16 3:08 ` [PATCH v1 01/19] target/arm: Make cpu_exclusive_high hold the high bits Richard Henderson
2023-02-23 15:14 ` Peter Maydell
2023-02-23 16:12 ` Richard Henderson
2023-02-23 16:51 ` Peter Maydell
2023-02-23 17:08 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 02/19] target/arm: Use tcg_gen_qemu_ld_i128 for LDXP Richard Henderson
2023-02-16 3:08 ` [PATCH v1 03/19] target/arm: Use tcg_gen_qemu_{st, ld}_i128 for do_fp_{st, ld} Richard Henderson
2023-02-23 15:23 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 04/19] target/arm: Use tcg_gen_qemu_st_i128 for STZG, STZ2G Richard Henderson
2023-02-23 15:24 ` Peter Maydell
2023-02-23 16:20 ` Richard Henderson
2023-02-23 16:53 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 05/19] target/arm: Use tcg_gen_qemu_{ld, st}_i128 in gen_sve_{ld, st}r Richard Henderson
2023-02-23 15:36 ` Peter Maydell
2023-02-16 3:08 ` Richard Henderson [this message]
2023-02-23 15:40 ` [PATCH v1 06/19] target/arm: Sink gen_mte_check1 into load/store_exclusive Peter Maydell
2023-02-16 3:08 ` [PATCH v1 07/19] target/arm: Add feature test for FEAT_LSE2 Richard Henderson
2023-02-23 15:43 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 08/19] target/arm: Add atom_data to DisasContext Richard Henderson
2023-02-23 15:47 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 09/19] target/arm: Load/store integer pair with one tcg operation Richard Henderson
2023-02-23 15:57 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 10/19] target/arm: Hoist finalize_memop out of do_gpr_{ld, st} Richard Henderson
2023-02-23 16:03 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 11/19] target/arm: Hoist finalize_memop out of do_fp_{ld, st} Richard Henderson
2023-02-23 16:04 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 12/19] target/arm: Pass memop to gen_mte_check1* Richard Henderson
2023-02-23 16:08 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 13/19] target/arm: Pass single_memop to gen_mte_checkN Richard Henderson
2023-02-23 16:10 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 14/19] target/arm: Check alignment in helper_mte_check Richard Henderson
2023-02-23 16:28 ` Peter Maydell
2023-02-23 16:38 ` Richard Henderson
2023-02-23 16:54 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 15/19] target/arm: Add SCTLR.nAA to TBFLAG_A64 Richard Henderson
2023-02-23 16:32 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 16/19] target/arm: Relax ordered/atomic alignment checks for LSE2 Richard Henderson
2023-02-23 16:49 ` Peter Maydell
2023-02-23 17:16 ` Richard Henderson
2023-02-23 17:22 ` Peter Maydell
2023-02-23 17:27 ` Richard Henderson
2023-02-16 3:08 ` [PATCH v1 17/19] target/arm: Move mte check for store-exclusive Richard Henderson
2023-02-23 16:36 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 18/19] test/tcg/multiarch: Adjust sigbus.c Richard Henderson
2023-02-23 16:36 ` Peter Maydell
2023-02-16 3:08 ` [PATCH v1 19/19] target/arm: Enable FEAT_LSE2 for -cpu max Richard Henderson
2023-02-23 16:37 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230216030854.1212208-7-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).