From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-arm@nongnu.org, qemu-devel@nongnu.org
Subject: [PATCH 20/20] target/arm: Convert load/store tags insns to decodetree
Date: Fri, 2 Jun 2023 16:52:23 +0100 [thread overview]
Message-ID: <20230602155223.2040685-21-peter.maydell@linaro.org> (raw)
In-Reply-To: <20230602155223.2040685-1-peter.maydell@linaro.org>
Convert the instructions in the load/store memory tags instruction
group to decodetree.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/tcg/a64.decode | 25 +++
target/arm/tcg/translate-a64.c | 352 ++++++++++++++++-----------------
2 files changed, 195 insertions(+), 182 deletions(-)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 48461a0540e..62ead5554fc 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -531,3 +531,28 @@ LD_single 0 . 001101 . 1 . ..... 10 . s:1 00 ..... ..... @ldst_single scal
LD_single 0 . 001101 . 1 . ..... 10 . 0 01 ..... ..... @ldst_single scale=3 sz=1 s=0
# Replicating load case
LD_single_repl 0 . 001101 . 1 . ..... 11 . 0 .. ..... ..... @ldst_single_repl s=0
+
+%tag_offset 12:s9 !function=scale_by_log2_tag_granule
+&ldst_tag rn rt imm p w
+@ldst_tag ........ .. . ......... .. rn:5 rt:5 &ldst_tag imm=%tag_offset
+@ldst_tag_mult ........ .. . 000000000 .. rn:5 rt:5 &ldst_tag imm=0
+
+STZGM 11011001 00 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
+STG 11011001 00 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+STG 11011001 00 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+STG 11011001 00 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
+
+LDG 11011001 01 1 ......... 00 ..... ..... @ldst_tag p=0 w=0
+STZG 11011001 01 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+STZG 11011001 01 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+STZG 11011001 01 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
+
+STGM 11011001 10 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
+ST2G 11011001 10 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+ST2G 11011001 10 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+ST2G 11011001 10 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
+
+LDGM 11011001 11 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
+STZ2G 11011001 11 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+STZ2G 11011001 11 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 128c2b8b4b5..6ca598291a7 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -83,6 +83,12 @@ static int uimm_scaled_down(DisasContext *s, int x)
return imm >> scale;
}
+/* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
+static int scale_by_log2_tag_granule(DisasContext *s, int x)
+{
+ return x << LOG2_TAG_GRANULE;
+}
+
/*
* Include the generated decoders.
*/
@@ -3538,181 +3544,180 @@ static bool trans_LD_single_repl(DisasContext *s, arg_ldst_single *a)
return true;
}
-/*
- * Load/Store memory tags
- *
- * 31 30 29 24 22 21 12 10 5 0
- * +-----+-------------+-----+---+------+-----+------+------+
- * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
- * +-----+-------------+-----+---+------+-----+------+------+
- */
-static void disas_ldst_tag(DisasContext *s, uint32_t insn)
+static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a)
{
- int rt = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
- int op2 = extract32(insn, 10, 2);
- int op1 = extract32(insn, 22, 2);
- bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
- int index = 0;
TCGv_i64 addr, clean_addr, tcg_rt;
+ int size = 4 << s->dcz_blocksize;
- /* We checked insn bits [29:24,21] in the caller. */
- if (extract32(insn, 30, 2) != 3) {
- goto do_unallocated;
+ if (!dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
}
- /*
- * @index is a tri-state variable which has 3 states:
- * < 0 : post-index, writeback
- * = 0 : signed offset
- * > 0 : pre-index, writeback
- */
- switch (op1) {
- case 0:
- if (op2 != 0) {
- /* STG */
- index = op2 - 2;
- } else {
- /* STZGM */
- if (s->current_el == 0 || offset != 0) {
- goto do_unallocated;
- }
- is_mult = is_zero = true;
- }
- break;
- case 1:
- if (op2 != 0) {
- /* STZG */
- is_zero = true;
- index = op2 - 2;
- } else {
- /* LDG */
- is_load = true;
- }
- break;
- case 2:
- if (op2 != 0) {
- /* ST2G */
- is_pair = true;
- index = op2 - 2;
- } else {
- /* STGM */
- if (s->current_el == 0 || offset != 0) {
- goto do_unallocated;
- }
- is_mult = true;
- }
- break;
- case 3:
- if (op2 != 0) {
- /* STZ2G */
- is_pair = is_zero = true;
- index = op2 - 2;
- } else {
- /* LDGM */
- if (s->current_el == 0 || offset != 0) {
- goto do_unallocated;
- }
- is_mult = is_load = true;
- }
- break;
-
- default:
- do_unallocated:
- unallocated_encoding(s);
- return;
- }
-
- if (is_mult
- ? !dc_isar_feature(aa64_mte, s)
- : !dc_isar_feature(aa64_mte_insn_reg, s)) {
- goto do_unallocated;
- }
-
- if (rn == 31) {
+ if (a->rn == 31) {
gen_check_sp_alignment(s);
}
- addr = read_cpu_reg_sp(s, rn, true);
- if (index >= 0) {
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ tcg_rt = cpu_reg(s, a->rt);
+
+ if (s->ata) {
+ gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
+ }
+ /*
+ * The non-tags portion of STZGM is mostly like DC_ZVA,
+ * except the alignment happens before the access.
+ */
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_helper_dc_zva(cpu_env, clean_addr);
+ return true;
+}
+
+static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
+{
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ if (!dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ tcg_rt = cpu_reg(s, a->rt);
+
+ if (s->ata) {
+ gen_helper_stgm(cpu_env, addr, tcg_rt);
+ } else {
+ MMUAccessType acc = MMU_DATA_STORE;
+ int size = 4 << GMID_EL1_BS;
+
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_probe_access(s, clean_addr, acc, size);
+ }
+ return true;
+}
+
+static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
+{
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ if (!dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ tcg_rt = cpu_reg(s, a->rt);
+
+ if (s->ata) {
+ gen_helper_ldgm(tcg_rt, cpu_env, addr);
+ } else {
+ MMUAccessType acc = MMU_DATA_LOAD;
+ int size = 4 << GMID_EL1_BS;
+
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_probe_access(s, clean_addr, acc, size);
+ /* The result tags are zeros. */
+ tcg_gen_movi_i64(tcg_rt, 0);
+ }
+ return true;
+}
+
+static bool trans_LDG(DisasContext *s, arg_ldst_tag *a)
+{
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
+ return false;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ if (!a->p) {
/* pre-index or signed offset */
- tcg_gen_addi_i64(addr, addr, offset);
+ tcg_gen_addi_i64(addr, addr, a->imm);
}
- if (is_mult) {
- tcg_rt = cpu_reg(s, rt);
+ tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
+ tcg_rt = cpu_reg(s, a->rt);
+ if (s->ata) {
+ gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+ } else {
+ clean_addr = clean_data_tbi(s, addr);
+ gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
+ gen_address_with_allocation_tag0(tcg_rt, addr);
+ }
- if (is_zero) {
- int size = 4 << s->dcz_blocksize;
-
- if (s->ata) {
- gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
- }
- /*
- * The non-tags portion of STZGM is mostly like DC_ZVA,
- * except the alignment happens before the access.
- */
- clean_addr = clean_data_tbi(s, addr);
- tcg_gen_andi_i64(clean_addr, clean_addr, -size);
- gen_helper_dc_zva(cpu_env, clean_addr);
- } else if (s->ata) {
- if (is_load) {
- gen_helper_ldgm(tcg_rt, cpu_env, addr);
- } else {
- gen_helper_stgm(cpu_env, addr, tcg_rt);
- }
- } else {
- MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
- int size = 4 << GMID_EL1_BS;
-
- clean_addr = clean_data_tbi(s, addr);
- tcg_gen_andi_i64(clean_addr, clean_addr, -size);
- gen_probe_access(s, clean_addr, acc, size);
-
- if (is_load) {
- /* The result tags are zeros. */
- tcg_gen_movi_i64(tcg_rt, 0);
- }
+ if (a->w) {
+ /* pre-index or post-index */
+ if (a->p) {
+ /* post-index */
+ tcg_gen_addi_i64(addr, addr, a->imm);
}
- return;
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
+ }
+ return true;
+}
+
+static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair)
+{
+ TCGv_i64 addr, tcg_rt;
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
}
- if (is_load) {
- tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
- tcg_rt = cpu_reg(s, rt);
- if (s->ata) {
- gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ if (!a->p) {
+ /* pre-index or signed offset */
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ }
+ tcg_rt = cpu_reg_sp(s, a->rt);
+ if (!s->ata) {
+ /*
+ * For STG and ST2G, we need to check alignment and probe memory.
+ * TODO: For STZG and STZ2G, we could rely on the stores below,
+ * at least for system mode; user-only won't enforce alignment.
+ */
+ if (is_pair) {
+ gen_helper_st2g_stub(cpu_env, addr);
} else {
- clean_addr = clean_data_tbi(s, addr);
- gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
- gen_address_with_allocation_tag0(tcg_rt, addr);
+ gen_helper_stg_stub(cpu_env, addr);
+ }
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ if (is_pair) {
+ gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
+ } else {
+ gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
}
} else {
- tcg_rt = cpu_reg_sp(s, rt);
- if (!s->ata) {
- /*
- * For STG and ST2G, we need to check alignment and probe memory.
- * TODO: For STZG and STZ2G, we could rely on the stores below,
- * at least for system mode; user-only won't enforce alignment.
- */
- if (is_pair) {
- gen_helper_st2g_stub(cpu_env, addr);
- } else {
- gen_helper_stg_stub(cpu_env, addr);
- }
- } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- if (is_pair) {
- gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
- } else {
- gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
- }
+ if (is_pair) {
+ gen_helper_st2g(cpu_env, addr, tcg_rt);
} else {
- if (is_pair) {
- gen_helper_st2g(cpu_env, addr, tcg_rt);
- } else {
- gen_helper_stg(cpu_env, addr, tcg_rt);
- }
+ gen_helper_stg(cpu_env, addr, tcg_rt);
}
}
@@ -3730,32 +3735,21 @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
}
}
- if (index != 0) {
+ if (a->w) {
/* pre-index or post-index */
- if (index < 0) {
+ if (a->p) {
/* post-index */
- tcg_gen_addi_i64(addr, addr, offset);
+ tcg_gen_addi_i64(addr, addr, a->imm);
}
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
}
+ return true;
}
-/* Loads and stores */
-static void disas_ldst(DisasContext *s, uint32_t insn)
-{
- switch (extract32(insn, 24, 6)) {
- case 0x19:
- if (extract32(insn, 21, 1) != 0) {
- disas_ldst_tag(s, insn);
- } else {
- unallocated_encoding(s);
- }
- break;
- default:
- unallocated_encoding(s);
- break;
- }
-}
+TRANS_FEAT(STG, aa64_mte_insn_reg, do_STG, a, false, false)
+TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false)
+TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true)
+TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
@@ -13651,12 +13645,6 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
static void disas_a64_legacy(DisasContext *s, uint32_t insn)
{
switch (extract32(insn, 25, 4)) {
- case 0x4:
- case 0x6:
- case 0xc:
- case 0xe: /* Loads and stores */
- disas_ldst(s, insn);
- break;
case 0x5:
case 0xd: /* Data processing - register */
disas_data_proc_reg(s, insn);
--
2.34.1
next prev parent reply other threads:[~2023-06-02 15:54 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-02 15:52 [PATCH 00/20] target/arm: Convert exception, system, loads and stores to decodetree Peter Maydell
2023-06-02 15:52 ` [PATCH 01/20] target/arm: Fix return value from LDSMIN/LDSMAX 8/16 bit atomics Peter Maydell
2023-06-03 5:35 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 02/20] target/arm: Convert hint instruction space to decodetree Peter Maydell
2023-06-03 5:42 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 03/20] target/arm: Convert barrier insns " Peter Maydell
2023-06-03 5:48 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 04/20] target/arm: Convert CFINV, XAFLAG and AXFLAG " Peter Maydell
2023-06-03 5:55 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 05/20] target/arm: Convert MSR (immediate) " Peter Maydell
2023-06-03 6:01 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 06/20] target/arm: Convert MSR (reg), MRS, SYS, SYSL " Peter Maydell
2023-06-03 6:05 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 07/20] target/arm: Convert exception generation instructions " Peter Maydell
2023-06-03 6:09 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 08/20] target/arm: Convert load/store exclusive and ordered " Peter Maydell
2023-06-03 22:32 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 09/20] target/arm: Convert LDXP, STXP, CASP, CAS " Peter Maydell
2023-06-03 22:44 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 10/20] target/arm: Convert load reg (literal) group " Peter Maydell
2023-06-03 22:49 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 11/20] target/arm: Convert load/store-pair " Peter Maydell
2023-06-03 23:05 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 12/20] target/arm: Convert ld/st reg+imm9 insns " Peter Maydell
2023-06-03 23:14 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 13/20] target/arm: Convert LDR/STR with 12-bit immediate " Peter Maydell
2023-06-02 20:51 ` Philippe Mathieu-Daudé
2023-06-03 16:18 ` Peter Maydell
2023-06-03 23:19 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 14/20] target/arm: Convert LDR/STR reg+reg " Peter Maydell
2023-06-03 23:27 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 15/20] target/arm: Convert atomic memory ops " Peter Maydell
2023-06-03 23:35 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 16/20] target/arm: Convert load (pointer auth) insns " Peter Maydell
2023-06-02 20:56 ` Philippe Mathieu-Daudé
2023-06-03 23:41 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 17/20] target/arm: Convert LDAPR/STLR (imm) " Peter Maydell
2023-06-03 23:55 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 18/20] target/arm: Convert load/store (multiple structures) " Peter Maydell
2023-06-04 0:00 ` Richard Henderson
2023-06-02 15:52 ` [PATCH 19/20] target/arm: Convert load/store single structure " Peter Maydell
2023-06-04 1:27 ` Richard Henderson
2023-06-02 15:52 ` Peter Maydell [this message]
2023-06-04 1:36 ` [PATCH 20/20] target/arm: Convert load/store tags insns " Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230602155223.2040685-21-peter.maydell@linaro.org \
--to=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).