From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, Pierrick Bouvier <pierrick.bouvier@linaro.org>
Subject: [PATCH v7 27/73] target/arm: Introduce mmu indexes for GCS
Date: Wed, 8 Oct 2025 14:55:27 -0700 [thread overview]
Message-ID: <20251008215613.300150-28-richard.henderson@linaro.org> (raw)
In-Reply-To: <20251008215613.300150-1-richard.henderson@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/hw/core/cpu.h | 5 ++-
target/arm/mmuidx-internal.h | 8 ++++
target/arm/mmuidx.h | 71 ++++++++++++++++++++++++------------
target/arm/helper.c | 15 ++++++--
target/arm/mmuidx.c | 9 +++++
target/arm/ptw.c | 20 ++++++++++
target/arm/tcg/tlb-insns.c | 47 ++++++++++++++++--------
7 files changed, 132 insertions(+), 43 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 60d3f01900..bef702799a 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -198,9 +198,10 @@ struct CPUClass {
};
/*
- * Fix the number of mmu modes to 16.
+ * Fix the number of mmu modes across all targets.
+ * Current maximum is target/arm/.
*/
-#define NB_MMU_MODES 16
+#define NB_MMU_MODES 22
typedef uint32_t MMUIdxMap;
/* Use a fully associative victim tlb of 8 entries. */
diff --git a/target/arm/mmuidx-internal.h b/target/arm/mmuidx-internal.h
index 1d948aa6f4..f494ec348d 100644
--- a/target/arm/mmuidx-internal.h
+++ b/target/arm/mmuidx-internal.h
@@ -20,6 +20,7 @@ FIELD(MMUIDXINFO, PAN, 7, 1)
FIELD(MMUIDXINFO, USER, 8, 1)
FIELD(MMUIDXINFO, STAGE1, 9, 1)
FIELD(MMUIDXINFO, STAGE2, 10, 1)
+FIELD(MMUIDXINFO, GCS, 11, 1)
extern const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8];
@@ -92,4 +93,11 @@ static inline bool regime_is_stage2(ARMMMUIdx idx)
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE2);
}
+/* Return true if this mmu index implies AccessType_GCS. */
+static inline bool regime_is_gcs(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, GCS);
+}
+
#endif /* TARGET_ARM_MMUIDX_INTERNAL_H */
diff --git a/target/arm/mmuidx.h b/target/arm/mmuidx.h
index 5b9b4bc84f..8d8d27337e 100644
--- a/target/arm/mmuidx.h
+++ b/target/arm/mmuidx.h
@@ -58,24 +58,31 @@
* already heavyweight.
* 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
* because both are in use simultaneously for Secure EL2.
+ * 9. we need separate indexes for handling AccessType_GCS.
*
* This gives us the following list of cases:
*
* EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
+ * EL0 EL1&0 stage 1+2 +GCS
* EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
* EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
+ * EL1 EL1&0 stage 1+2 +GCS
* EL0 EL2&0
+ * EL0 EL2&0 +GCS
* EL2 EL2&0
* EL2 EL2&0 +PAN
+ * EL2 EL2&0 +GCS
* EL2 (aka NS PL2)
+ * EL2 +GCS
* EL3 (aka AArch32 S PL1 PL1&0)
+ * EL3 +GCS
* AArch32 S PL0 PL1&0 (we call this EL30_0)
* AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
* Stage2 Secure
* Stage2 NonSecure
* plus one TLB per Physical address space: S, NS, Realm, Root
*
- * for a total of 16 different mmu_idx.
+ * for a total of 22 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
@@ -114,9 +121,9 @@
* For M profile we arrange them to have a bit for priv, a bit for negpri
* and a bit for secure.
*/
-#define ARM_MMU_IDX_A 0x10 /* A profile */
-#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
-#define ARM_MMU_IDX_M 0x40 /* M profile */
+#define ARM_MMU_IDX_A 0x20 /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x40 /* does not have a TLB */
+#define ARM_MMU_IDX_M 0x80 /* M profile */
/* Meanings of the bits for M profile mmu idx values */
#define ARM_MMU_IDX_M_PRIV 0x1
@@ -125,22 +132,32 @@
#define ARM_MMU_IDX_TYPE_MASK \
(ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
-#define ARM_MMU_IDX_COREIDX_MASK 0xf
+#define ARM_MMU_IDX_COREIDX_MASK 0x1f
typedef enum ARMMMUIdx {
/*
* A-profile.
*/
- ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_0_GCS = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_GCS = 4 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E20_0 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_0_GCS = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_PAN = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_GCS = 9 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E2 = 10 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E2_GCS = 11 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E3 = 12 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E3_GCS = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_0 = 14 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_3_PAN = 15 | ARM_MMU_IDX_A,
/*
* Used for second stage of an S12 page table walk, or for descriptor
@@ -148,14 +165,14 @@ typedef enum ARMMMUIdx {
* are in use simultaneously for SecureEL2: the security state for
* the S2 ptw is selected by the NS bit from the S1 ptw.
*/
- ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2_S = 16 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 17 | ARM_MMU_IDX_A,
/* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_S = 18 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_NS = 19 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Root = 20 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Realm = 21 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
@@ -164,6 +181,8 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E0_GCS = 3 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1_GCS = 4 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
@@ -187,13 +206,19 @@ typedef enum ARMMMUIdx {
typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E10_0),
- TO_CORE_BIT(E20_0),
+ TO_CORE_BIT(E10_0_GCS),
TO_CORE_BIT(E10_1),
TO_CORE_BIT(E10_1_PAN),
- TO_CORE_BIT(E2),
+ TO_CORE_BIT(E10_1_GCS),
+ TO_CORE_BIT(E20_0),
+ TO_CORE_BIT(E20_0_GCS),
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
+ TO_CORE_BIT(E20_2_GCS),
+ TO_CORE_BIT(E2),
+ TO_CORE_BIT(E2_GCS),
TO_CORE_BIT(E3),
+ TO_CORE_BIT(E3_GCS),
TO_CORE_BIT(E30_0),
TO_CORE_BIT(E30_3_PAN),
TO_CORE_BIT(Stage2),
diff --git a/target/arm/helper.c b/target/arm/helper.c
index aed245fd86..6642cae0cc 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -420,7 +420,9 @@ int alle1_tlbmask(CPUARMState *env)
*/
return (ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_Stage2 |
ARMMMUIdxBit_Stage2_S);
}
@@ -808,12 +810,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
*/
if (changed & (SCR_NS | SCR_NSE)) {
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2));
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS));
}
}
@@ -2787,7 +2794,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
(arm_hcr_el2_eff(env) & HCR_E2H)) {
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
diff --git a/target/arm/mmuidx.c b/target/arm/mmuidx.c
index 61a682e655..42b003db9c 100644
--- a/target/arm/mmuidx.c
+++ b/target/arm/mmuidx.c
@@ -15,22 +15,29 @@
#define USER R_MMUIDXINFO_USER_MASK
#define S1 R_MMUIDXINFO_STAGE1_MASK
#define S2 R_MMUIDXINFO_STAGE2_MASK
+#define GCS R_MMUIDXINFO_GCS_MASK
const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8] = {
/*
* A-profile.
*/
[ARMMMUIdx_E10_0] = EL(0) | REL(1) | R2,
+ [ARMMMUIdx_E10_0_GCS] = EL(0) | REL(1) | R2 | GCS,
[ARMMMUIdx_E10_1] = EL(1) | REL(1) | R2,
[ARMMMUIdx_E10_1_PAN] = EL(1) | REL(1) | R2 | PAN,
+ [ARMMMUIdx_E10_1_GCS] = EL(1) | REL(1) | R2 | GCS,
[ARMMMUIdx_E20_0] = EL(0) | REL(2) | R2,
+ [ARMMMUIdx_E20_0_GCS] = EL(0) | REL(2) | R2 | GCS,
[ARMMMUIdx_E20_2] = EL(2) | REL(2) | R2,
[ARMMMUIdx_E20_2_PAN] = EL(2) | REL(2) | R2 | PAN,
+ [ARMMMUIdx_E20_2_GCS] = EL(2) | REL(2) | R2 | GCS,
[ARMMMUIdx_E2] = EL(2) | REL(2),
+ [ARMMMUIdx_E2_GCS] = EL(2) | REL(2) | GCS,
[ARMMMUIdx_E3] = EL(3) | REL(3),
+ [ARMMMUIdx_E3_GCS] = EL(3) | REL(3) | GCS,
[ARMMMUIdx_E30_0] = EL(0) | REL(3),
[ARMMMUIdx_E30_3_PAN] = EL(3) | REL(3) | PAN,
@@ -38,8 +45,10 @@ const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8] = {
[ARMMMUIdx_Stage2] = REL(2) | S2,
[ARMMMUIdx_Stage1_E0] = REL(1) | R2 | S1 | USER,
+ [ARMMMUIdx_Stage1_E0_GCS] = REL(1) | R2 | S1 | USER | GCS,
[ARMMMUIdx_Stage1_E1] = REL(1) | R2 | S1,
[ARMMMUIdx_Stage1_E1_PAN] = REL(1) | R2 | S1 | PAN,
+ [ARMMMUIdx_Stage1_E1_GCS] = REL(1) | R2 | S1 | GCS,
/*
* M-profile.
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index d6d4bf3b23..6c52ed1ad0 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -169,6 +169,10 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
return ARMMMUIdx_Stage1_E1;
case ARMMMUIdx_E10_1_PAN:
return ARMMMUIdx_Stage1_E1_PAN;
+ case ARMMMUIdx_E10_0_GCS:
+ return ARMMMUIdx_Stage1_E0_GCS;
+ case ARMMMUIdx_E10_1_GCS:
+ return ARMMMUIdx_Stage1_E1_GCS;
default:
return mmu_idx;
}
@@ -276,8 +280,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_TGE) {
@@ -286,8 +292,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
break;
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_DC) {
@@ -296,10 +304,14 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
break;
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
break;
@@ -3799,15 +3811,22 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
@@ -3836,6 +3855,7 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
ss = ARMSS_Secure;
break;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c
index 95c26c6d46..1a0a332583 100644
--- a/target/arm/tcg/tlb-insns.c
+++ b/target/arm/tcg/tlb-insns.c
@@ -149,7 +149,8 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = env_cpu(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
+ ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
}
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -159,7 +160,8 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E2);
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -202,7 +204,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
}
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -210,7 +212,8 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
/*
@@ -228,12 +231,16 @@ static int vae1_tlbmask(CPUARMState *env)
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
} else {
/* This is AArch64 only, so we don't need to touch the EL30_x TLBs */
mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
- ARMMMUIdxBit_E10_0;
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS;
}
return mask;
}
@@ -246,13 +253,20 @@ static int vae2_tlbmask(CPUARMState *env)
if (hcr & HCR_E2H) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
} else {
- mask = ARMMMUIdxBit_E2;
+ mask = ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS;
}
return mask;
}
+static int vae3_tlbmask(void)
+{
+ return ARMMMUIdxBit_E3 | ARMMMUIdxBit_E3_GCS;
+}
+
/* Return 56 if TBI is enabled, 64 otherwise. */
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
@@ -325,9 +339,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static int e2_tlbmask(CPUARMState *env)
{
return (ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2);
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -354,7 +371,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
+ tlb_flush_by_mmuidx(cs, vae3_tlbmask());
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -380,7 +397,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, vae3_tlbmask());
}
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -411,7 +428,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, vae3_tlbmask());
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -465,7 +482,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E3, bits);
+ vae3_tlbmask(), bits);
}
static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
@@ -963,7 +980,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env,
* flush-last-level-only.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
+ do_rvae_write(env, value, vae3_tlbmask(), tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
@@ -977,7 +994,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
* flush-last-level-only or inner/outer specific flushes.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
+ do_rvae_write(env, value, vae3_tlbmask(), true);
}
static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
--
2.43.0
next prev parent reply other threads:[~2025-10-08 22:04 UTC|newest]
Thread overview: 85+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-08 21:55 [PATCH v7 00/73] target/arm: Implement FEAT_GCS Richard Henderson
2025-10-08 21:55 ` [PATCH v7 01/73] tests/functional: update tests using TF-A/TF-RMM to support FEAT_GCS Richard Henderson
2025-10-08 21:55 ` [PATCH v7 02/73] target/arm: Add isar feature test for FEAT_S1PIE, FEAT_S2PIE Richard Henderson
2025-10-08 21:55 ` [PATCH v7 03/73] target/arm: Enable TCR2_ELx.PIE Richard Henderson
2025-10-08 21:55 ` [PATCH v7 04/73] target/arm: Implement PIR_ELx, PIRE0_ELx, S2PIR_EL2 registers Richard Henderson
2025-10-08 21:55 ` [PATCH v7 05/73] target/arm: Force HPD for stage2 translations Richard Henderson
2025-10-08 21:55 ` [PATCH v7 06/73] target/arm: Cache NV1 early in get_phys_addr_lpae Richard Henderson
2025-10-08 21:55 ` [PATCH v7 07/73] target/arm: Populate PIE in aa64_va_parameters Richard Henderson
2025-10-08 21:55 ` [PATCH v7 08/73] target/arm: Implement get_S1prot_indirect Richard Henderson
2025-10-08 21:55 ` [PATCH v7 09/73] target/arm: Implement get_S2prot_indirect Richard Henderson
2025-10-08 21:55 ` [PATCH v7 10/73] target/arm: Expand CPUARMState.exception.syndrome to 64 bits Richard Henderson
2025-10-09 14:14 ` Philippe Mathieu-Daudé
2025-10-09 17:43 ` Richard Henderson
2025-10-08 21:55 ` [PATCH v7 11/73] target/arm: Expand syndrome parameter to raise_exception* Richard Henderson
2025-10-08 21:55 ` [PATCH v7 12/73] target/arm: Implement dirtybit check for PIE Richard Henderson
2025-10-08 21:55 ` [PATCH v7 13/73] target/arm: Enable FEAT_S1PIE and FEAT_S2PIE on -cpu max Richard Henderson
2025-10-08 21:55 ` [PATCH v7 14/73] include/exec/memopidx: Adjust for 32 mmu indexes Richard Henderson
2025-10-09 14:03 ` Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 15/73] include/hw/core/cpu: Widen MMUIdxMap Richard Henderson
2025-10-08 21:55 ` [PATCH v7 16/73] target/arm: Split out mmuidx.h from cpu.h Richard Henderson
2025-10-08 21:55 ` [PATCH v7 17/73] target/arm: Convert arm_mmu_idx_to_el from switch to table Richard Henderson
2025-10-08 21:55 ` [PATCH v7 18/73] target/arm: Remove unused env argument from regime_el Richard Henderson
2025-10-08 21:55 ` [PATCH v7 19/73] target/arm: Convert regime_el from switch to table Richard Henderson
2025-10-08 21:55 ` [PATCH v7 20/73] target/arm: Convert regime_has_2_ranges " Richard Henderson
2025-10-08 21:55 ` [PATCH v7 21/73] target/arm: Remove unused env argument from regime_is_pan Richard Henderson
2025-10-08 21:55 ` [PATCH v7 22/73] target/arm: Convert regime_is_pan from switch to table Richard Henderson
2025-10-08 21:55 ` [PATCH v7 23/73] target/arm: Remove unused env argument from regime_is_user Richard Henderson
2025-10-08 21:55 ` [PATCH v7 24/73] target/arm: Convert regime_is_user from switch to table Richard Henderson
2025-10-08 21:55 ` [PATCH v7 25/73] target/arm: Convert arm_mmu_idx_is_stage1_of_2 " Richard Henderson
2025-10-08 21:55 ` [PATCH v7 26/73] target/arm: Convert regime_is_stage2 " Richard Henderson
2025-10-08 21:55 ` Richard Henderson [this message]
2025-10-08 21:55 ` [PATCH v7 28/73] target/arm: Introduce regime_to_gcs Richard Henderson
2025-10-08 21:55 ` [PATCH v7 29/73] target/arm: Support page protections for GCS mmu indexes Richard Henderson
2025-10-08 21:55 ` [PATCH v7 30/73] target/arm: Implement gcs bit for data abort Richard Henderson
2025-10-08 21:55 ` [PATCH v7 31/73] target/arm: Add GCS cpregs Richard Henderson
2025-10-08 21:55 ` [PATCH v7 32/73] target/arm: Add GCS enable and trap levels to DisasContext Richard Henderson
2025-10-08 21:55 ` [PATCH v7 33/73] target/arm: Implement FEAT_CHK Richard Henderson
2025-10-08 21:55 ` [PATCH v7 34/73] target/arm: Make helper_exception_return system-only Richard Henderson
2025-10-09 14:38 ` Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 35/73] target/arm: Export cpsr_{read_for, write_from}_spsr_elx Richard Henderson
2025-10-09 14:37 ` Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 36/73] target/arm: Expand pstate to 64 bits Richard Henderson
2025-10-08 21:55 ` [PATCH v7 37/73] target/arm: Add syndrome data for EC_GCS Richard Henderson
2025-10-08 21:55 ` [PATCH v7 38/73] target/arm: Add arm_hcr_el2_nvx_eff Richard Henderson
2025-10-09 14:34 ` Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 39/73] target/arm: Use arm_hcr_el2_nvx_eff in access_nv1 Richard Henderson
2025-10-09 14:34 ` Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 40/73] target/arm: Split out access_nv1_with_nvx Richard Henderson
2025-10-09 14:04 ` Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 41/73] target/arm: Implement EXLOCKException for ELR_ELx and SPSR_ELx Richard Henderson
2025-10-08 21:55 ` [PATCH v7 42/73] target/arm: Split {full,core}_a64_user_mem_index Richard Henderson
2025-10-09 14:05 ` [PATCH v7 42/73] target/arm: Split {full, core}_a64_user_mem_index Philippe Mathieu-Daudé
2025-10-08 21:55 ` [PATCH v7 43/73] target/arm: Introduce delay_exception{_el} Richard Henderson
2025-10-08 21:55 ` [PATCH v7 44/73] target/arm: Emit HSTR trap exception out of line Richard Henderson
2025-10-08 21:55 ` [PATCH v7 45/73] target/arm: Emit v7m LTPSIZE " Richard Henderson
2025-10-08 21:55 ` [PATCH v7 46/73] target/arm: Implement GCSSTR, GCSSTTR Richard Henderson
2025-10-08 21:55 ` [PATCH v7 47/73] target/arm: Implement GCSB Richard Henderson
2025-10-08 21:55 ` [PATCH v7 48/73] target/arm: Implement GCSPUSHM Richard Henderson
2025-10-08 21:55 ` [PATCH v7 49/73] target/arm: Implement GCSPOPM Richard Henderson
2025-10-08 21:55 ` [PATCH v7 50/73] target/arm: Implement GCSPUSHX Richard Henderson
2025-10-08 21:55 ` [PATCH v7 51/73] target/arm: Implement GCSPOPX Richard Henderson
2025-10-08 21:55 ` [PATCH v7 52/73] target/arm: Implement GCSPOPCX Richard Henderson
2025-10-08 21:55 ` [PATCH v7 53/73] target/arm: Implement GCSSS1 Richard Henderson
2025-10-08 21:55 ` [PATCH v7 54/73] target/arm: Implement GCSSS2 Richard Henderson
2025-10-08 21:55 ` [PATCH v7 55/73] target/arm: Add gcs record for BL Richard Henderson
2025-10-08 21:55 ` [PATCH v7 56/73] target/arm: Add gcs record for BLR Richard Henderson
2025-10-08 21:55 ` [PATCH v7 57/73] target/arm: Add gcs record for BLR with PAuth Richard Henderson
2025-10-08 21:55 ` [PATCH v7 58/73] target/arm: Load gcs record for RET Richard Henderson
2025-10-08 21:55 ` [PATCH v7 59/73] target/arm: Load gcs record for RET with PAuth Richard Henderson
2025-10-08 21:56 ` [PATCH v7 60/73] target/arm: Copy EXLOCKEn to EXLOCK on exception to the same EL Richard Henderson
2025-10-08 21:56 ` [PATCH v7 61/73] target/arm: Implement EXLOCK check during exception return Richard Henderson
2025-10-08 21:56 ` [PATCH v7 62/73] target/arm: Enable FEAT_GCS with -cpu max Richard Henderson
2025-10-09 14:33 ` Philippe Mathieu-Daudé
2025-10-08 21:56 ` [PATCH v7 63/73] linux-user/aarch64: Implement prctls for GCS Richard Henderson
2025-10-08 21:56 ` [PATCH v7 64/73] linux-user/aarch64: Allocate new gcs stack on clone Richard Henderson
2025-10-08 21:56 ` [PATCH v7 65/73] linux-user/aarch64: Release gcs stack on thread exit Richard Henderson
2025-10-08 21:56 ` [PATCH v7 66/73] linux-user/aarch64: Implement map_shadow_stack syscall Richard Henderson
2025-10-08 21:56 ` [PATCH v7 67/73] target/arm: Enable GCSPR_EL0 for read in user-mode Richard Henderson
2025-10-08 21:56 ` [PATCH v7 68/73] linux-user/aarch64: Inject SIGSEGV for GCS faults Richard Henderson
2025-10-08 21:56 ` [PATCH v7 69/73] linux-user/aarch64: Generate GCS signal records Richard Henderson
2025-10-08 21:56 ` [PATCH v7 70/73] linux-user/aarch64: Enable GCS in HWCAP Richard Henderson
2025-10-08 21:56 ` [PATCH v7 71/73] tests/tcg/aarch64: Add gcsstr Richard Henderson
2025-10-08 21:56 ` [PATCH v7 72/73] tests/tcg/aarch64: Add gcspushm Richard Henderson
2025-10-08 21:56 ` [PATCH v7 73/73] tests/tcg/aarch64: Add gcsss Richard Henderson
2025-10-10 11:40 ` [PATCH v7 00/73] target/arm: Implement FEAT_GCS Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251008215613.300150-28-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=pierrick.bouvier@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).