All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gabriel Brookman <brookmangabriel@gmail.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	 Gustavo Romero <gustavo.romero@linaro.org>,
	 Richard Henderson <richard.henderson@linaro.org>,
	qemu-arm@nongnu.org,  Laurent Vivier <laurent@vivier.eu>,
	 Pierrick Bouvier <pierrick.bouvier@linaro.org>,
	 Gabriel Brookman <brookmangabriel@gmail.com>
Subject: [PATCH v4 05/13] target/arm: tag check emitted when MTX and not TBI
Date: Mon, 09 Mar 2026 17:59:37 -0400	[thread overview]
Message-ID: <20260309-feat-mte4-v4-5-daaf0375620d@gmail.com> (raw)
In-Reply-To: <20260309-feat-mte4-v4-0-daaf0375620d@gmail.com>

Previously, the TBI bit was used to mediate whether tag checks happened.
With MTE4, if the MTX bits are enabled, then tag checking happens even
if TBI is disabled. See AccessIsTagChecked.

Signed-off-by: Gabriel Brookman <brookmangabriel@gmail.com>
---
 target/arm/helper.c         | 10 ++++++++++
 target/arm/internals.h      | 10 +++++++++-
 target/arm/tcg/helper-a64.c |  9 +++++----
 target/arm/tcg/hflags.c     |  9 +++++----
 target/arm/tcg/mte_helper.c |  9 ++++++---
 5 files changed, 35 insertions(+), 12 deletions(-)

diff --git a/target/arm/helper.c b/target/arm/helper.c
index 987539524a..56858367fd 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -9613,6 +9613,16 @@ uint64_t arm_sctlr(CPUARMState *env, int el)
     return env->cp15.sctlr_el[el];
 }
 
+int aa64_va_parameter_mtx(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+    if (regime_has_2_ranges(mmu_idx)) {
+        return extract64(tcr, 60, 2);
+    } else {
+        /* Replicate the single MTX bit so we always have 2 bits.  */
+        return extract64(tcr, 33, 1) * 3;
+    }
+}
+
 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
 {
     if (regime_has_2_ranges(mmu_idx)) {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 8ec2750847..a45119caa2 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1411,6 +1411,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
                                    ARMMMUIdx mmu_idx, bool data,
                                    bool el1_is_aa32);
 
+int aa64_va_parameter_mtx(uint64_t tcr, ARMMMUIdx mmu_idx);
 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
@@ -1546,7 +1547,8 @@ FIELD(MTEDESC, TBI,   4, 2)
 FIELD(MTEDESC, TCMA,  6, 2)
 FIELD(MTEDESC, WRITE, 8, 1)
 FIELD(MTEDESC, ALIGN, 9, 3)
-FIELD(MTEDESC, SIZEM1, 12, 32 - 12)  /* size - 1 */
+FIELD(MTEDESC, MTX,   12, 2)
+FIELD(MTEDESC, SIZEM1, 14, 32 - 14)  /* size - 1 */
 
 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
@@ -1622,6 +1624,12 @@ static inline bool tbi_check(uint32_t desc, int bit55)
     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
 }
 
+/* Return true if mtx bits mean that the access is canonically checked.  */
+static inline bool mtx_check(uint32_t desc, int bit55)
+{
+    return (desc >> (R_MTEDESC_MTX_SHIFT + bit55)) & 1;
+}
+
 /* Return true if tcma bits mean that the access is unchecked.  */
 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
 {
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 2dec587d38..5f739d999c 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -1054,7 +1054,7 @@ static int mops_sizereg(uint32_t syndrome)
 }
 
 /*
- * Return true if TCMA and TBI bits mean we need to do MTE checks.
+ * Return true if the TCMA, TBI, and MTX bits mean we need to do MTE checks.
  * We only need to do this once per MOPS insn, not for every page.
  */
 static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
@@ -1062,12 +1062,13 @@ static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
     int bit55 = extract64(ptr, 55, 1);
 
     /*
-     * Note that tbi_check() returns true for "access checked" but
-     * tcma_check() returns true for "access unchecked".
+     * Note that tbi_check() and mtx_check() return true for "access checked",
+     * but tcma_check() returns true for "access unchecked".
      */
-    if (!tbi_check(desc, bit55)) {
+    if (!tbi_check(desc, bit55) && !mtx_check(desc, bit55)) {
         return false;
     }
+
     return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr));
 }
 
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index 75c55b1a6d..e753124c4c 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -245,13 +245,14 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
     uint64_t tcr = regime_tcr(env, mmu_idx);
     uint64_t hcr = arm_hcr_el2_eff(env);
     uint64_t sctlr;
-    int tbii, tbid;
+    int tbii, tbid, mtx;
 
     DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
 
     /* Get control bits for tagged addresses.  */
     tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
     tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
+    mtx = aa64_va_parameter_mtx(tcr, mmu_idx);
 
     DP_TBFLAG_A64(flags, TBII, tbii);
     DP_TBFLAG_A64(flags, TBID, tbid);
@@ -403,14 +404,14 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
         /*
          * Set MTE_ACTIVE if any access may be Checked, and leave clear
          * if all accesses must be Unchecked:
-         * 1) If no TBI, then there are no tags in the address to check,
+         * 1) If TBI and MTX are both unset, accesses are Unchecked.
          * 2) If Tag Check Override, then all accesses are Unchecked,
          * 3) If Tag Check Fail == 0, then Checked access have no effect,
          * 4) If no Allocation Tag Access, then all accesses are Unchecked.
          */
         if (allocation_tag_access_enabled(env, el, sctlr)) {
             DP_TBFLAG_A64(flags, ATA, 1);
-            if (tbid
+            if ((tbid || mtx)
                 && !(env->pstate & PSTATE_TCO)
                 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
                 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
@@ -436,7 +437,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
         }
         /* And again for unprivileged accesses, if required.  */
         if (EX_TBFLAG_A64(flags, UNPRIV)
-            && tbid
+            && (tbid || mtx)
             && !(env->pstate & PSTATE_TCO)
             && (sctlr & SCTLR_TCF0)
             && allocation_tag_access_enabled(env, 0, sctlr)) {
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 4deec80208..1484087a19 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -819,8 +819,11 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
     bit55 = extract64(ptr, 55, 1);
     *fault = ptr;
 
-    /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
-    if (unlikely(!tbi_check(desc, bit55))) {
+    /*
+     * If TBI and MTX are disabled, the access is unchecked, and ptr is not
+     * dirty.
+     */
+    if (unlikely(!tbi_check(desc, bit55) && !mtx_check(desc, bit55))) {
         return -1;
     }
 
@@ -961,7 +964,7 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
     bit55 = extract64(ptr, 55, 1);
 
     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
-    if (unlikely(!tbi_check(desc, bit55))) {
+    if (unlikely(!tbi_check(desc, bit55) && !mtx_check(desc, bit55))) {
         return ptr;
     }
 

-- 
2.52.0



  parent reply	other threads:[~2026-03-09 22:02 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-09 21:59 [PATCH v4 00/13] target/arm: add support for MTE4 Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 01/13] target/arm: implement MTE_PERM Gabriel Brookman
2026-04-04 23:17   ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 02/13] target/arm: add TCSO bitmasks to SCTLR Gabriel Brookman
2026-04-04 23:27   ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 03/13] target/arm: mte_check unemitted on STORE_ONLY load Gabriel Brookman
2026-04-04 23:37   ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 04/13] linux-user: add MTE_STORE_ONLY to prctl Gabriel Brookman
2026-04-04 23:39   ` Richard Henderson
2026-03-09 21:59 ` Gabriel Brookman [this message]
2026-04-05  0:31   ` [PATCH v4 05/13] target/arm: tag check emitted when MTX and not TBI Richard Henderson
2026-03-09 21:59 ` [PATCH v4 06/13] target/arm: add canonical tag check logic Gabriel Brookman
2026-04-05 21:46   ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 07/13] target/arm: ldg on canonical tag loads the tag Gabriel Brookman
2026-04-05 22:20   ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 08/13] target/arm: storing to canonical tag faults Gabriel Brookman
2026-04-05 22:37   ` Richard Henderson
2026-03-09 21:59 ` [PATCH v4 09/13] target/arm: with MTX, no tag bit bounds check Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 10/13] target/arm: with MTX, tag is not a part of PAuth Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 11/13] docs: add MTE4 features to docs Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 12/13] tests/tcg: add test for MTE FAR Gabriel Brookman
2026-03-09 21:59 ` [PATCH v4 13/13] tests/tcg: add test for MTE_STORE_ONLY Gabriel Brookman
2026-04-04  1:20 ` [PATCH v4 00/13] target/arm: add support for MTE4 Gabriel Brookman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260309-feat-mte4-v4-5-daaf0375620d@gmail.com \
    --to=brookmangabriel@gmail.com \
    --cc=gustavo.romero@linaro.org \
    --cc=laurent@vivier.eu \
    --cc=peter.maydell@linaro.org \
    --cc=pierrick.bouvier@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.