From: Gabriel Brookman <brookmangabriel@gmail.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Gustavo Romero <gustavo.romero@linaro.org>,
Richard Henderson <richard.henderson@linaro.org>,
qemu-arm@nongnu.org, Laurent Vivier <laurent@vivier.eu>,
Pierrick Bouvier <pierrick.bouvier@linaro.org>,
Gabriel Brookman <brookmangabriel@gmail.com>
Subject: [PATCH v4 06/13] target/arm: add canonical tag check logic
Date: Mon, 09 Mar 2026 17:59:38 -0400 [thread overview]
Message-ID: <20260309-feat-mte4-v4-6-daaf0375620d@gmail.com> (raw)
In-Reply-To: <20260309-feat-mte4-v4-0-daaf0375620d@gmail.com>
This feature causes tag checks to compare logical address tags against
their canonical form rather than against allocation tags, when the check
happens in a canonically tagged memory region. Described in the ARM ARM
section "Logical Address Tagging".
Signed-off-by: Gabriel Brookman <brookmangabriel@gmail.com>
---
target/arm/cpu-features.h | 5 +++++
target/arm/cpu.h | 1 +
target/arm/internals.h | 31 ++++++++++++++++++++++++++++++-
target/arm/tcg/hflags.c | 4 ++++
target/arm/tcg/mte_helper.c | 21 +++++++++++++++++++++
target/arm/tcg/translate-a64.c | 7 +++++++
target/arm/tcg/translate.h | 1 +
7 files changed, 69 insertions(+), 1 deletion(-)
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index 38fc56b52e..5e3dc5256f 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -1154,6 +1154,11 @@ static inline bool isar_feature_aa64_mte_store_only(const ARMISARegisters *id)
return FIELD_EX64_IDREG(id, ID_AA64PFR2, MTESTOREONLY) == 1;
}
+static inline bool isar_feature_aa64_mte_mtx(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTEX) == 1;
+}
+
static inline bool isar_feature_aa64_sme(const ARMISARegisters *id)
{
return FIELD_EX64_IDREG(id, ID_AA64PFR1, SME) != 0;
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 7911912c3e..1f33c0d163 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2527,6 +2527,7 @@ FIELD(TBFLAG_A64, GCS_RVCEN, 42, 1)
FIELD(TBFLAG_A64, GCSSTR_EL, 43, 2)
FIELD(TBFLAG_A64, MTE_STORE_ONLY, 45, 1)
FIELD(TBFLAG_A64, MTE0_STORE_ONLY, 46, 1)
+FIELD(TBFLAG_A64, MTX, 47, 2)
/*
* Helpers for using the above. Note that only the A64 accessors use
diff --git a/target/arm/internals.h b/target/arm/internals.h
index a45119caa2..52597a351c 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1630,6 +1630,12 @@ static inline bool mtx_check(uint32_t desc, int bit55)
return (desc >> (R_MTEDESC_MTX_SHIFT + bit55)) & 1;
}
+/* Return whether or not the second nibble of a VA matches bit 55. */
+static inline bool tag_is_canonical(int ptr_tag, int bit55)
+{
+ return ((ptr_tag + bit55) & 0xf) == 0;
+}
+
/* Return true if tcma bits mean that the access is unchecked. */
static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
{
@@ -1637,11 +1643,34 @@ static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
* We had extracted bit55 and ptr_tag for other reasons, so fold
* (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
*/
- bool match = ((ptr_tag + bit55) & 0xf) == 0;
+ bool match = tag_is_canonical(ptr_tag, bit55);
bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
return tcma && match;
}
+/* Return true if Canonical Tagging is enabled. */
+static inline bool canonical_tagging_enabled(CPUARMState *env, bool selector)
+{
+ int mmu_idx;
+ uint64_t tcr, mtx_bit;
+
+ /* If mte4 is not implemented, then mtx is by definition not enabled */
+ if (!cpu_isar_feature(aa64_mte_mtx, env_archcpu(env))) {
+ return false;
+ }
+
+ mmu_idx = arm_mmu_idx_el(env, arm_current_el(env));
+ tcr = regime_tcr(env, mmu_idx);
+
+ /*
+ * In two-range regimes, mtx is governed by bit 60 or 61 of TCR, and in
+ * one-range regimes, bit 33 is used.
+ */
+ mtx_bit = regime_has_2_ranges(mmu_idx) ? 60 + selector : 33;
+
+ return extract64(tcr, mtx_bit, 1);
+}
+
/*
* For TBI, ideally, we would do nothing. Proper behaviour on fault is
* for the tag to be present in the FAR_ELx register. But for user-only
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index e753124c4c..40a934a8af 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -460,6 +460,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
/* Cache TCMA as well as TBI. */
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
+ /* Cache MTX. */
+ if (cpu_isar_feature(aa64_mte_mtx, env_archcpu(env))) {
+ DP_TBFLAG_A64(flags, MTX, mtx);
+ }
}
if (cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 1484087a19..b54fbd11c0 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -854,6 +854,13 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
MMU_DATA_LOAD, ra);
if (!mem1) {
+ /*
+ * If mtx is enabled, then the access is MemTag_CanonicallyTagged,
+ * otherwise it is Untagged. See AArch64.CheckTag.
+ */
+ if (mtx_check(desc, bit55)) {
+ return tag_is_canonical(ptr_tag, bit55);
+ }
return 1;
}
/* Perform all of the comparisons. */
@@ -867,6 +874,12 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
ptr_last - next_page + 1,
MMU_DATA_LOAD, ra);
+ /* If either region is canonically tagged, do a canonical tag check */
+ if (mtx_check(desc, bit55) && (!mem1 || !mem2)
+ && (!tag_is_canonical(ptr_tag, bit55))) {
+ return 0;
+ }
+
/*
* Perform all of the comparisons.
* Note the possible but unlikely case of the operation spanning
@@ -974,6 +987,7 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
goto done;
}
+
/*
* In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
* i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
@@ -995,6 +1009,13 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
dcz_bytes, MMU_DATA_LOAD, ra);
if (!mem) {
+ /*
+ * If mtx is enabled, then the access is MemTag_CanonicallyTagged,
+ * otherwise it is Untagged. See AArch64.CheckTag.
+ */
+ if (mtx_check(desc, bit55) && !tag_is_canonical(ptr_tag, bit55)) {
+ mte_check_fail(env, desc, ptr, ra);
+ }
goto done;
}
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 874174a15b..366830f7f0 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -311,6 +311,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop));
+ desc = FIELD_DP32(desc, MTEDESC, MTX, s->mtx);
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
ret = tcg_temp_new_i64();
@@ -344,6 +345,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop));
+ desc = FIELD_DP32(desc, MTEDESC, MTX, s->mtx);
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
ret = tcg_temp_new_i64();
@@ -3002,6 +3004,7 @@ static void handle_sys(DisasContext *s, bool isread,
desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, MTX, s->mtx);
tcg_rt = tcg_temp_new_i64();
gen_helper_mte_check_zva(tcg_rt, tcg_env,
@@ -4872,6 +4875,7 @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, true);
+ desc = FIELD_DP32(desc, MTEDESC, MTX, s->mtx);
/* SIZEM1 and ALIGN we leave 0 (byte write) */
}
/* The helper function always needs the memidx even with MTE disabled */
@@ -4926,11 +4930,13 @@ static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn)
if (s->mte_active[runpriv]) {
rdesc = FIELD_DP32(rdesc, MTEDESC, TBI, s->tbid);
rdesc = FIELD_DP32(rdesc, MTEDESC, TCMA, s->tcma);
+ rdesc = FIELD_DP32(rdesc, MTEDESC, MTX, s->mtx);
}
if (s->mte_active[wunpriv]) {
wdesc = FIELD_DP32(wdesc, MTEDESC, TBI, s->tbid);
wdesc = FIELD_DP32(wdesc, MTEDESC, TCMA, s->tcma);
wdesc = FIELD_DP32(wdesc, MTEDESC, WRITE, true);
+ wdesc = FIELD_DP32(wdesc, MTEDESC, MTX, s->mtx);
}
/* The helper function needs these parts of the descriptor regardless */
rdesc = FIELD_DP32(rdesc, MTEDESC, MIDX, rmemidx);
@@ -10700,6 +10706,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
dc->mte_store_only[0] = EX_TBFLAG_A64(tb_flags, MTE_STORE_ONLY);
dc->mte_store_only[1] = EX_TBFLAG_A64(tb_flags, MTE0_STORE_ONLY);
+ dc->mtx = EX_TBFLAG_A64(tb_flags, MTX);
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 74143161f4..846e383c70 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -82,6 +82,7 @@ typedef struct DisasContext {
uint8_t tbii; /* TBI1|TBI0 for insns */
uint8_t tbid; /* TBI1|TBI0 for data */
uint8_t tcma; /* TCMA1|TCMA0 for MTE */
+ uint8_t mtx; /* MTX1|MTX0 for MTE */
bool ns; /* Use non-secure CPREG bank on access */
int fp_excp_el; /* FP exception EL or 0 if enabled */
int sve_excp_el; /* SVE exception EL or 0 if enabled */
--
2.52.0
next prev parent reply other threads:[~2026-03-09 22:02 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-09 21:59 [PATCH v4 00/13] target/arm: add support for MTE4 Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 01/13] target/arm: implement MTE_PERM Gabriel Brookman
2026-04-04 23:17 ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 02/13] target/arm: add TCSO bitmasks to SCTLR Gabriel Brookman
2026-04-04 23:27 ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 03/13] target/arm: mte_check unemitted on STORE_ONLY load Gabriel Brookman
2026-04-04 23:37 ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 04/13] linux-user: add MTE_STORE_ONLY to prctl Gabriel Brookman
2026-04-04 23:39 ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 05/13] target/arm: tag check emitted when MTX and not TBI Gabriel Brookman
2026-04-05 0:31 ` Richard Henderson
2026-03-09 21:59 ` Gabriel Brookman [this message]
2026-04-05 21:46 ` [PATCH v4 06/13] target/arm: add canonical tag check logic Richard Henderson
2026-03-09 21:59 ` [PATCH v4 07/13] target/arm: ldg on canonical tag loads the tag Gabriel Brookman
2026-04-05 22:20 ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 08/13] target/arm: storing to canonical tag faults Gabriel Brookman
2026-04-05 22:37 ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 09/13] target/arm: with MTX, no tag bit bounds check Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 10/13] target/arm: with MTX, tag is not a part of PAuth Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 11/13] docs: add MTE4 features to docs Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 12/13] tests/tcg: add test for MTE FAR Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 13/13] tests/tcg: add test for MTE_STORE_ONLY Gabriel Brookman
2026-04-04 1:20 ` [PATCH v4 00/13] target/arm: add support for MTE4 Gabriel Brookman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260309-feat-mte4-v4-6-daaf0375620d@gmail.com \
--to=brookmangabriel@gmail.com \
--cc=gustavo.romero@linaro.org \
--cc=laurent@vivier.eu \
--cc=peter.maydell@linaro.org \
--cc=pierrick.bouvier@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.