public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] arm64/mm: Drop TTBR_CNP_BIT and TTBR_ASID_MASK
@ 2026-02-25  3:51 Anshuman Khandual
  2026-02-25  3:51 ` [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
  2026-02-25  3:51 ` [PATCH 2/2] arm64/mm: Directly use TTBRx_EL1_CnP Anshuman Khandual
  0 siblings, 2 replies; 6+ messages in thread
From: Anshuman Khandual @ 2026-02-25  3:51 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Mark Rutland, Marc Zyngier, Oliver Upton, linux-kernel, kvmarm

Directly use existing tools sysreg format field macros TTBRx_EL1_CNP_BIT/
TTBRx_EL1_ASID_MASK, while also dropping off now redundant custom macros
TTBR_CNP_BIT and TTBR_ASID_MASK. With this change in place, there are no
more TTBR_EL1 register based custom macros left in the tree.

This was discussed and suggested earlier.

https://lore.kernel.org/linux-arm-kernel/aRb8ezhQd0c0jp9G@J2N7QTR9R3/

Series applies on v7.0-rc1.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oupton@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Cc: kvmarm@lists.linux.dev

Anshuman Khandual (2):
  arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  arm64/mm: Directly use TTBRx_EL1_CnP

 arch/arm64/include/asm/asm-uaccess.h   | 2 +-
 arch/arm64/include/asm/mmu.h           | 1 -
 arch/arm64/include/asm/mmu_context.h   | 2 +-
 arch/arm64/include/asm/pgtable-hwdef.h | 2 --
 arch/arm64/include/asm/uaccess.h       | 6 +++---
 arch/arm64/kernel/entry.S              | 2 +-
 arch/arm64/kernel/mte.c                | 4 ++--
 arch/arm64/kvm/at.c                    | 2 +-
 arch/arm64/kvm/hyp/nvhe/hyp-init.S     | 4 ++--
 arch/arm64/kvm/nested.c                | 4 ++--
 arch/arm64/mm/context.c                | 8 ++++----
 arch/arm64/mm/mmu.c                    | 2 +-
 12 files changed, 18 insertions(+), 21 deletions(-)

-- 
2.30.2


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  2026-02-25  3:51 [PATCH 0/2] arm64/mm: Drop TTBR_CNP_BIT and TTBR_ASID_MASK Anshuman Khandual
@ 2026-02-25  3:51 ` Anshuman Khandual
  2026-02-25  9:23   ` Marc Zyngier
  2026-02-25  3:51 ` [PATCH 2/2] arm64/mm: Directly use TTBRx_EL1_CnP Anshuman Khandual
  1 sibling, 1 reply; 6+ messages in thread
From: Anshuman Khandual @ 2026-02-25  3:51 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Mark Rutland, Marc Zyngier, Oliver Upton, linux-kernel, kvmarm

Replace all TTBR_ASID_MASK macro instances with TTBRx_EL1_ASID_MASK which
is a standard field mask from tools sysreg format. Drop the now redundant
custom macro TTBR_ASID_MASK. No functional change.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oupton@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Cc: kvmarm@lists.linux.dev
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/asm-uaccess.h | 2 +-
 arch/arm64/include/asm/mmu.h         | 1 -
 arch/arm64/include/asm/mmu_context.h | 2 +-
 arch/arm64/include/asm/uaccess.h     | 6 +++---
 arch/arm64/kernel/entry.S            | 2 +-
 arch/arm64/kvm/at.c                  | 2 +-
 arch/arm64/kvm/nested.c              | 4 ++--
 arch/arm64/mm/context.c              | 6 +++---
 8 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 9148f5a31968..12aa6a283249 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -15,7 +15,7 @@
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	.macro	__uaccess_ttbr0_disable, tmp1
 	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
-	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
+	bic	\tmp1, \tmp1, #TTBRx_EL1_ASID_MASK
 	sub	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET	// reserved_pg_dir
 	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
 	add	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 137a173df1ff..019b36cda380 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -10,7 +10,6 @@
 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
 #define USER_ASID_BIT	48
 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
-#define TTBR_ASID_MASK	(UL(0xffff) << 48)
 
 #ifndef __ASSEMBLER__
 
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index cc80af59c69e..5b1ecde9f14b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -210,7 +210,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
 	if (mm == &init_mm)
 		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
 	else
-		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
+		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << TTBRx_EL1_ASID_SHIFT;
 
 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 }
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 9810106a3f66..86dfc356ee6e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -62,7 +62,7 @@ static inline void __uaccess_ttbr0_disable(void)
 
 	local_irq_save(flags);
 	ttbr = read_sysreg(ttbr1_el1);
-	ttbr &= ~TTBR_ASID_MASK;
+	ttbr &= ~TTBRx_EL1_ASID_MASK;
 	/* reserved_pg_dir placed before swapper_pg_dir */
 	write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
 	/* Set reserved ASID */
@@ -85,8 +85,8 @@ static inline void __uaccess_ttbr0_enable(void)
 
 	/* Restore active ASID */
 	ttbr1 = read_sysreg(ttbr1_el1);
-	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
-	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+	ttbr1 &= ~TTBRx_EL1_ASID_MASK;		/* safety measure */
+	ttbr1 |= ttbr0 & TTBRx_EL1_ASID_MASK;
 	write_sysreg(ttbr1, ttbr1_el1);
 
 	/* Restore user page table */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f8018b5c1f9a..9e1bcc821a16 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -473,7 +473,7 @@ alternative_else_nop_endif
 	 */
 SYM_CODE_START_LOCAL(__swpan_entry_el1)
 	mrs	x21, ttbr0_el1
-	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
+	tst	x21, #TTBRx_EL1_ASID_MASK	// Check for the reserved ASID
 	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 	b.eq	1f				// TTBR0 access already disabled
 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 885bd5bb2f41..d5c342ccf0f9 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -560,7 +560,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
 			BUG();
 		}
 
-		wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
+		wr->asid = FIELD_GET(TTBRx_EL1_ASID_MASK, asid_ttbr);
 		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
 		    !(tcr & TCR_ASID16))
 			wr->asid &= GENMASK(7, 0);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 620126d1f0dc..82558fb2685f 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -1343,7 +1343,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
 			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
 		u16 asid;
 
-		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
+		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
 		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
 		    !(tcr & TCR_ASID16))
 			asid &= GENMASK(7, 0);
@@ -1459,7 +1459,7 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
 			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
 		u16 asid;
 
-		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
+		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
 		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
 		    !(tcr & TCR_ASID16))
 			asid &= GENMASK(7, 0);
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b2ac06246327..718c495832d0 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -358,11 +358,11 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
 
 	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
 	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
-		ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+		ttbr0 |= FIELD_PREP(TTBRx_EL1_ASID_MASK, asid);
 
 	/* Set ASID in TTBR1 since TCR.A1 is set */
-	ttbr1 &= ~TTBR_ASID_MASK;
-	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+	ttbr1 &= ~TTBRx_EL1_ASID_MASK;
+	ttbr1 |= FIELD_PREP(TTBRx_EL1_ASID_MASK, asid);
 
 	cpu_set_reserved_ttbr0_nosync();
 	write_sysreg(ttbr1, ttbr1_el1);
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] arm64/mm: Directly use TTBRx_EL1_CnP
  2026-02-25  3:51 [PATCH 0/2] arm64/mm: Drop TTBR_CNP_BIT and TTBR_ASID_MASK Anshuman Khandual
  2026-02-25  3:51 ` [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
@ 2026-02-25  3:51 ` Anshuman Khandual
  1 sibling, 0 replies; 6+ messages in thread
From: Anshuman Khandual @ 2026-02-25  3:51 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Mark Rutland, Marc Zyngier, Oliver Upton, linux-kernel, kvmarm

Replace all TTBR_CNP_BIT macro instances with TTBRx_EL1_CNP_BIT which
is a standard field from tools sysreg format. Drop the now redundant
custom macro TTBR_CNP_BIT. No functional change.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oupton@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Cc: kvmarm@lists.linux.dev
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/pgtable-hwdef.h | 2 --
 arch/arm64/kernel/mte.c                | 4 ++--
 arch/arm64/kvm/hyp/nvhe/hyp-init.S     | 4 ++--
 arch/arm64/mm/context.c                | 2 +-
 arch/arm64/mm/mmu.c                    | 2 +-
 5 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d49180bb7cb3..5e6809a462c7 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -223,8 +223,6 @@
  */
 #define S1_TABLE_AP		(_AT(pmdval_t, 3) << 61)
 
-#define TTBR_CNP_BIT		(UL(1) << 0)
-
 /*
  * TCR flags.
  */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 32148bf09c1d..eceead1686f2 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -315,8 +315,8 @@ void mte_cpu_setup(void)
 	 * CnP is not a boot feature so MTE gets enabled before CnP, but let's
 	 * make sure that is the case.
 	 */
-	BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
-	BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
+	BUG_ON(read_sysreg(ttbr0_el1) & TTBRx_EL1_CnP);
+	BUG_ON(read_sysreg(ttbr1_el1) & TTBRx_EL1_CnP);
 
 	/* Normal Tagged memory type at the corresponding MAIR index */
 	sysreg_clear_set(mair_el1,
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 0d42eedc7167..445eb0743af2 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -130,7 +130,7 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
 	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
 	phys_to_ttbr x2, x1
 alternative_if ARM64_HAS_CNP
-	orr	x2, x2, #TTBR_CNP_BIT
+	orr	x2, x2, #TTBRx_EL1_CnP
 alternative_else_nop_endif
 	msr	ttbr0_el2, x2
 
@@ -291,7 +291,7 @@ SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
 	/* Install the new pgtables */
 	phys_to_ttbr x5, x0
 alternative_if ARM64_HAS_CNP
-	orr	x5, x5, #TTBR_CNP_BIT
+	orr	x5, x5, #TTBRx_EL1_CnP
 alternative_else_nop_endif
 	msr	ttbr0_el2, x5
 
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 718c495832d0..0f4a28b87469 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -354,7 +354,7 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
 
 	/* Skip CNP for the reserved ASID */
 	if (system_supports_cnp() && asid)
-		ttbr0 |= TTBR_CNP_BIT;
+		ttbr0 |= TTBRx_EL1_CnP;
 
 	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
 	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a6a00accf4f9..c22678769c37 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -2188,7 +2188,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
 
 	if (cnp)
-		ttbr1 |= TTBR_CNP_BIT;
+		ttbr1 |= TTBRx_EL1_CnP;
 
 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  2026-02-25  3:51 ` [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
@ 2026-02-25  9:23   ` Marc Zyngier
  2026-02-25 10:40     ` Anshuman Khandual
  2026-02-25 10:50     ` Marc Zyngier
  0 siblings, 2 replies; 6+ messages in thread
From: Marc Zyngier @ 2026-02-25  9:23 UTC (permalink / raw)
  To: Anshuman Khandual
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Mark Rutland, Oliver Upton, linux-kernel, kvmarm

On Wed, 25 Feb 2026 03:51:56 +0000,
Anshuman Khandual <anshuman.khandual@arm.com> wrote:
> 
> Replace all TTBR_ASID_MASK macro instances with TTBRx_EL1_ASID_MASK which
> is a standard field mask from tools sysreg format. Drop the now redundant
> custom macro TTBR_ASID_MASK. No functional change.
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Marc Zyngier <maz@kernel.org>
> Cc: Oliver Upton <oupton@kernel.org>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> Cc: kvmarm@lists.linux.dev
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>  arch/arm64/include/asm/asm-uaccess.h | 2 +-
>  arch/arm64/include/asm/mmu.h         | 1 -
>  arch/arm64/include/asm/mmu_context.h | 2 +-
>  arch/arm64/include/asm/uaccess.h     | 6 +++---
>  arch/arm64/kernel/entry.S            | 2 +-
>  arch/arm64/kvm/at.c                  | 2 +-
>  arch/arm64/kvm/nested.c              | 4 ++--
>  arch/arm64/mm/context.c              | 6 +++---
>  8 files changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
> index 9148f5a31968..12aa6a283249 100644
> --- a/arch/arm64/include/asm/asm-uaccess.h
> +++ b/arch/arm64/include/asm/asm-uaccess.h
> @@ -15,7 +15,7 @@
>  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
>  	.macro	__uaccess_ttbr0_disable, tmp1
>  	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
> -	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
> +	bic	\tmp1, \tmp1, #TTBRx_EL1_ASID_MASK
>  	sub	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET	// reserved_pg_dir
>  	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
>  	add	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index 137a173df1ff..019b36cda380 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -10,7 +10,6 @@
>  #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
>  #define USER_ASID_BIT	48
>  #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
> -#define TTBR_ASID_MASK	(UL(0xffff) << 48)
>  
>  #ifndef __ASSEMBLER__
>  
> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
> index cc80af59c69e..5b1ecde9f14b 100644
> --- a/arch/arm64/include/asm/mmu_context.h
> +++ b/arch/arm64/include/asm/mmu_context.h
> @@ -210,7 +210,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
>  	if (mm == &init_mm)
>  		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
>  	else
> -		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
> +		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << TTBRx_EL1_ASID_SHIFT;
>

Could you please use FIELD_PREP() for this sort of constructs?

[...]

> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> index 885bd5bb2f41..d5c342ccf0f9 100644
> --- a/arch/arm64/kvm/at.c
> +++ b/arch/arm64/kvm/at.c
> @@ -560,7 +560,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
>  			BUG();
>  		}
>  
> -		wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
> +		wr->asid = FIELD_GET(TTBRx_EL1_ASID_MASK, asid_ttbr);
>  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
>  		    !(tcr & TCR_ASID16))
>  			wr->asid &= GENMASK(7, 0);
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 620126d1f0dc..82558fb2685f 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -1343,7 +1343,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
>  			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
>  		u16 asid;
>  
> -		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
> +		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
>  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
>  		    !(tcr & TCR_ASID16))
>  			asid &= GENMASK(7, 0);
> @@ -1459,7 +1459,7 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
>  			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
>  		u16 asid;
>  
> -		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
> +		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
>  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
>  		    !(tcr & TCR_ASID16))
>  			asid &= GENMASK(7, 0);

Given the 3 hunks above, there is clearly a better approach.

Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  2026-02-25  9:23   ` Marc Zyngier
@ 2026-02-25 10:40     ` Anshuman Khandual
  2026-02-25 10:50     ` Marc Zyngier
  1 sibling, 0 replies; 6+ messages in thread
From: Anshuman Khandual @ 2026-02-25 10:40 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Mark Rutland, Oliver Upton, linux-kernel, kvmarm

On 25/02/26 2:53 PM, Marc Zyngier wrote:
> On Wed, 25 Feb 2026 03:51:56 +0000,
> Anshuman Khandual <anshuman.khandual@arm.com> wrote:
>>
>> Replace all TTBR_ASID_MASK macro instances with TTBRx_EL1_ASID_MASK which
>> is a standard field mask from tools sysreg format. Drop the now redundant
>> custom macro TTBR_ASID_MASK. No functional change.
>>
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will@kernel.org>
>> Cc: Marc Zyngier <maz@kernel.org>
>> Cc: Oliver Upton <oupton@kernel.org>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: linux-kernel@vger.kernel.org
>> Cc: kvmarm@lists.linux.dev
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>  arch/arm64/include/asm/asm-uaccess.h | 2 +-
>>  arch/arm64/include/asm/mmu.h         | 1 -
>>  arch/arm64/include/asm/mmu_context.h | 2 +-
>>  arch/arm64/include/asm/uaccess.h     | 6 +++---
>>  arch/arm64/kernel/entry.S            | 2 +-
>>  arch/arm64/kvm/at.c                  | 2 +-
>>  arch/arm64/kvm/nested.c              | 4 ++--
>>  arch/arm64/mm/context.c              | 6 +++---
>>  8 files changed, 12 insertions(+), 13 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
>> index 9148f5a31968..12aa6a283249 100644
>> --- a/arch/arm64/include/asm/asm-uaccess.h
>> +++ b/arch/arm64/include/asm/asm-uaccess.h
>> @@ -15,7 +15,7 @@
>>  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
>>  	.macro	__uaccess_ttbr0_disable, tmp1
>>  	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
>> -	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
>> +	bic	\tmp1, \tmp1, #TTBRx_EL1_ASID_MASK
>>  	sub	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET	// reserved_pg_dir
>>  	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
>>  	add	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
>> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
>> index 137a173df1ff..019b36cda380 100644
>> --- a/arch/arm64/include/asm/mmu.h
>> +++ b/arch/arm64/include/asm/mmu.h
>> @@ -10,7 +10,6 @@
>>  #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
>>  #define USER_ASID_BIT	48
>>  #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
>> -#define TTBR_ASID_MASK	(UL(0xffff) << 48)
>>  
>>  #ifndef __ASSEMBLER__
>>  
>> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
>> index cc80af59c69e..5b1ecde9f14b 100644
>> --- a/arch/arm64/include/asm/mmu_context.h
>> +++ b/arch/arm64/include/asm/mmu_context.h
>> @@ -210,7 +210,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
>>  	if (mm == &init_mm)
>>  		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
>>  	else
>> -		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
>> +		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << TTBRx_EL1_ASID_SHIFT;
>>
> 
> Could you please use FIELD_PREP() for this sort of constructs?

Will replace with something like the following.

--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -210,7 +210,8 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
        if (mm == &init_mm)
                ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
        else
-               ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << TTBRx_EL1_ASID_SHIFT;
+               ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) |
+                      FIELD_PREP(TTBRx_EL1_ASID_MASK, ASID(mm));

        WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 }

> 
> [...]
> 
>> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
>> index 885bd5bb2f41..d5c342ccf0f9 100644
>> --- a/arch/arm64/kvm/at.c
>> +++ b/arch/arm64/kvm/at.c
>> @@ -560,7 +560,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
>>  			BUG();
>>  		}
>>  
>> -		wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
>> +		wr->asid = FIELD_GET(TTBRx_EL1_ASID_MASK, asid_ttbr);
>>  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
>>  		    !(tcr & TCR_ASID16))
>>  			wr->asid &= GENMASK(7, 0);
>> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
>> index 620126d1f0dc..82558fb2685f 100644
>> --- a/arch/arm64/kvm/nested.c
>> +++ b/arch/arm64/kvm/nested.c
>> @@ -1343,7 +1343,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
>>  			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
>>  		u16 asid;
>>  
>> -		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
>> +		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
>>  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
>>  		    !(tcr & TCR_ASID16))
>>  			asid &= GENMASK(7, 0);
>> @@ -1459,7 +1459,7 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
>>  			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
>>  		u16 asid;
>>  
>> -		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
>> +		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
>>  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
>>  		    !(tcr & TCR_ASID16))
>>  			asid &= GENMASK(7, 0);
> 
> Given the 3 hunks above, there is clearly a better approach.

Agreed.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  2026-02-25  9:23   ` Marc Zyngier
  2026-02-25 10:40     ` Anshuman Khandual
@ 2026-02-25 10:50     ` Marc Zyngier
  1 sibling, 0 replies; 6+ messages in thread
From: Marc Zyngier @ 2026-02-25 10:50 UTC (permalink / raw)
  To: Anshuman Khandual
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Mark Rutland, Oliver Upton, linux-kernel, kvmarm

On Wed, 25 Feb 2026 09:23:29 +0000,
Marc Zyngier <maz@kernel.org> wrote:
> 
> On Wed, 25 Feb 2026 03:51:56 +0000,
> Anshuman Khandual <anshuman.khandual@arm.com> wrote:
> > 
> > diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> > index 885bd5bb2f41..d5c342ccf0f9 100644
> > --- a/arch/arm64/kvm/at.c
> > +++ b/arch/arm64/kvm/at.c
> > @@ -560,7 +560,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
> >  			BUG();
> >  		}
> >  
> > -		wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
> > +		wr->asid = FIELD_GET(TTBRx_EL1_ASID_MASK, asid_ttbr);
> >  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
> >  		    !(tcr & TCR_ASID16))
> >  			wr->asid &= GENMASK(7, 0);
> > diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> > index 620126d1f0dc..82558fb2685f 100644
> > --- a/arch/arm64/kvm/nested.c
> > +++ b/arch/arm64/kvm/nested.c
> > @@ -1343,7 +1343,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
> >  			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
> >  		u16 asid;
> >  
> > -		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
> > +		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
> >  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
> >  		    !(tcr & TCR_ASID16))
> >  			asid &= GENMASK(7, 0);
> > @@ -1459,7 +1459,7 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
> >  			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
> >  		u16 asid;
> >  
> > -		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
> > +		asid = FIELD_GET(TTBRx_EL1_ASID_MASK, ttbr);
> >  		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
> >  		    !(tcr & TCR_ASID16))
> >  			asid &= GENMASK(7, 0);
> 
> Given the 3 hunks above, there is clearly a better approach.

https://lore.kernel.org/r/20260225104718.14209-1-maz@kernel.org

	M.

-- 
Without deviation from the norm, progress is not possible.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2026-02-25 10:50 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-25  3:51 [PATCH 0/2] arm64/mm: Drop TTBR_CNP_BIT and TTBR_ASID_MASK Anshuman Khandual
2026-02-25  3:51 ` [PATCH 1/2] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
2026-02-25  9:23   ` Marc Zyngier
2026-02-25 10:40     ` Anshuman Khandual
2026-02-25 10:50     ` Marc Zyngier
2026-02-25  3:51 ` [PATCH 2/2] arm64/mm: Directly use TTBRx_EL1_CnP Anshuman Khandual

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox