linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes
@ 2025-11-03  5:26 Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 1/6] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
                   ` (6 more replies)
  0 siblings, 7 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

This series contains some TTBRx_EL1 related changes, aimed at standardizing
TTBRx_EL1 register field accesses via tools sysreg format and also explains
52 PA specific handling methods via a new macro along with in code comments

This series applies on v6.18-rc4

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org

Anshuman Khandual (6):
  arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  arm64/mm: Directly use TTBRx_EL1_CnP
  arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK
  arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
  arm64/mm: Describe 52 PA folding into TTBRx_EL1
  arm64/mm: Describe TTBR1_BADDR_4852_OFFSET

 arch/arm64/include/asm/asm-uaccess.h   |  2 +-
 arch/arm64/include/asm/assembler.h     |  3 ++-
 arch/arm64/include/asm/mmu_context.h   |  2 +-
 arch/arm64/include/asm/pgtable-hwdef.h | 23 ++++++++++++++++++++---
 arch/arm64/include/asm/pgtable.h       |  5 +++--
 arch/arm64/include/asm/uaccess.h       |  6 +++---
 arch/arm64/kernel/entry.S              |  2 +-
 arch/arm64/kernel/mte.c                |  4 ++--
 arch/arm64/mm/context.c                |  8 ++++----
 arch/arm64/mm/mmu.c                    |  2 +-
 10 files changed, 38 insertions(+), 19 deletions(-)

-- 
2.30.2



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/6] arm64/mm: Directly use TTBRx_EL1_ASID_MASK
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
@ 2025-11-03  5:26 ` Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 2/6] arm64/mm: Directly use TTBRx_EL1_CnP Anshuman Khandual
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

Replace all TTBR_ASID_MASK macro instances with TTBRx_EL1_ASID_MASK which
is a standard field mask from tools sysreg format. No functional change.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/asm-uaccess.h | 2 +-
 arch/arm64/include/asm/mmu_context.h | 2 +-
 arch/arm64/include/asm/uaccess.h     | 6 +++---
 arch/arm64/kernel/entry.S            | 2 +-
 arch/arm64/mm/context.c              | 6 +++---
 5 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 9148f5a31968..12aa6a283249 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -15,7 +15,7 @@
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	.macro	__uaccess_ttbr0_disable, tmp1
 	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
-	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
+	bic	\tmp1, \tmp1, #TTBRx_EL1_ASID_MASK
 	sub	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET	// reserved_pg_dir
 	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
 	add	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 0dbe3b29049b..59f8b9b40184 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -218,7 +218,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
 	if (mm == &init_mm)
 		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
 	else
-		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
+		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << TTBRx_EL1_ASID_SHIFT;
 
 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 }
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 1aa4ecb73429..52daf9c2ba42 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -62,7 +62,7 @@ static inline void __uaccess_ttbr0_disable(void)
 
 	local_irq_save(flags);
 	ttbr = read_sysreg(ttbr1_el1);
-	ttbr &= ~TTBR_ASID_MASK;
+	ttbr &= ~TTBRx_EL1_ASID_MASK;
 	/* reserved_pg_dir placed before swapper_pg_dir */
 	write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
 	/* Set reserved ASID */
@@ -85,8 +85,8 @@ static inline void __uaccess_ttbr0_enable(void)
 
 	/* Restore active ASID */
 	ttbr1 = read_sysreg(ttbr1_el1);
-	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
-	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+	ttbr1 &= ~TTBRx_EL1_ASID_MASK;		/* safety measure */
+	ttbr1 |= ttbr0 & TTBRx_EL1_ASID_MASK;
 	write_sysreg(ttbr1, ttbr1_el1);
 
 	/* Restore user page table */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f8018b5c1f9a..9e1bcc821a16 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -473,7 +473,7 @@ alternative_else_nop_endif
 	 */
 SYM_CODE_START_LOCAL(__swpan_entry_el1)
 	mrs	x21, ttbr0_el1
-	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
+	tst	x21, #TTBRx_EL1_ASID_MASK	// Check for the reserved ASID
 	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 	b.eq	1f				// TTBR0 access already disabled
 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b2ac06246327..718c495832d0 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -358,11 +358,11 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
 
 	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
 	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
-		ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+		ttbr0 |= FIELD_PREP(TTBRx_EL1_ASID_MASK, asid);
 
 	/* Set ASID in TTBR1 since TCR.A1 is set */
-	ttbr1 &= ~TTBR_ASID_MASK;
-	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+	ttbr1 &= ~TTBRx_EL1_ASID_MASK;
+	ttbr1 |= FIELD_PREP(TTBRx_EL1_ASID_MASK, asid);
 
 	cpu_set_reserved_ttbr0_nosync();
 	write_sysreg(ttbr1, ttbr1_el1);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/6] arm64/mm: Directly use TTBRx_EL1_CnP
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 1/6] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
@ 2025-11-03  5:26 ` Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 3/6] arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK Anshuman Khandual
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

Replace all TTBR_CNP_BIT macro instances with TTBRx_EL1_CNP_BIT which
is a standard field from tools sysreg format. No functional change.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/kernel/mte.c | 4 ++--
 arch/arm64/mm/context.c | 2 +-
 arch/arm64/mm/mmu.c     | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 43f7a2f39403..ba2e3e9d1f73 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -315,8 +315,8 @@ void mte_cpu_setup(void)
 	 * CnP is not a boot feature so MTE gets enabled before CnP, but let's
 	 * make sure that is the case.
 	 */
-	BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
-	BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
+	BUG_ON(read_sysreg(ttbr0_el1) & TTBRx_EL1_CnP);
+	BUG_ON(read_sysreg(ttbr1_el1) & TTBRx_EL1_CnP);
 
 	/* Normal Tagged memory type at the corresponding MAIR index */
 	sysreg_clear_set(mair_el1,
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 718c495832d0..0f4a28b87469 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -354,7 +354,7 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
 
 	/* Skip CNP for the reserved ASID */
 	if (system_supports_cnp() && asid)
-		ttbr0 |= TTBR_CNP_BIT;
+		ttbr0 |= TTBRx_EL1_CnP;
 
 	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
 	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b8d37eb037fc..e80bb623ef53 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -2084,7 +2084,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
 
 	if (cnp)
-		ttbr1 |= TTBR_CNP_BIT;
+		ttbr1 |= TTBRx_EL1_CnP;
 
 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/6] arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 1/6] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 2/6] arm64/mm: Directly use TTBRx_EL1_CnP Anshuman Khandual
@ 2025-11-03  5:26 ` Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1 Anshuman Khandual
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

TTBR_BADDR_MASK_52 discards bit[1] which is RES0, when TTBRx_EL1 register
contains 52 bits PA. Let's just keep the custom macro but redefine it via
tools sysreg register field format TTBRx_EL1_BADDR_MASK.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/pgtable-hwdef.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index f3b77deedfa2..e192c4dc624b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -332,7 +332,7 @@
 /*
  * TTBR_ELx[1] is RES0 in this configuration.
  */
-#define TTBR_BADDR_MASK_52	GENMASK_ULL(47, 2)
+#define TTBR_BADDR_MASK_52	(TTBRx_EL1_BADDR_MASK & ~GENMASK(1, 1))
 #endif
 
 #ifdef CONFIG_ARM64_VA_BITS_52
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
                   ` (2 preceding siblings ...)
  2025-11-03  5:26 ` [PATCH 3/6] arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK Anshuman Khandual
@ 2025-11-03  5:26 ` Anshuman Khandual
  2025-11-04 15:17   ` Mark Rutland
  2025-11-03  5:26 ` [PATCH 5/6] arm64/mm: Describe 52 PA folding " Anshuman Khandual
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

Even though 48 bit PA representation in TTBRx_EL1 does not involve shifting
partial bits like 52 bit variant does, they sill need to be masked properly
for correctness. Hence mask 48 bit PA with TTBRx_EL1_BADDR_MASK.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/assembler.h | 1 +
 arch/arm64/include/asm/pgtable.h   | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 23be85d93348..d5eb09fc5f8a 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -609,6 +609,7 @@ alternative_endif
 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
 #else
 	mov	\ttbr, \phys
+	and	\ttbr, \ttbr, #TTBRx_EL1_BADDR_MASK
 #endif
 	.endm
 
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0944e296dd4a..c3110040c137 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1604,7 +1604,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
 #ifdef CONFIG_ARM64_PA_BITS_52
 #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
 #else
-#define phys_to_ttbr(addr)	(addr)
+#define phys_to_ttbr(addr)	(addr & TTBRx_EL1_BADDR_MASK)
 #endif
 
 /*
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 5/6] arm64/mm: Describe 52 PA folding into TTBRx_EL1
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
                   ` (3 preceding siblings ...)
  2025-11-03  5:26 ` [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1 Anshuman Khandual
@ 2025-11-03  5:26 ` Anshuman Khandual
  2025-11-03  5:26 ` [PATCH 6/6] arm64/mm: Describe TTBR1_BADDR_4852_OFFSET Anshuman Khandual
  2025-11-13  9:18 ` [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
  6 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

A 52 bit physical address gets stored in TTBR_BADDR_MASK_52 in a folded
manner. Shifting PA[51:0] right ward by '46' bits aligns PA[51:48] into
TTBRx_EL1[5:2] which gets ORed for the final TTBRx_EL1 encoding.

Define TTBR_BADDR_HIGH_52_PA_PIVOT which describes this inflection point
where this right shift is done thus bringing some clarity to this 52 PA
address folding process in TTBRx_EL1.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/assembler.h     |  2 +-
 arch/arm64/include/asm/pgtable-hwdef.h | 14 ++++++++++++++
 arch/arm64/include/asm/pgtable.h       |  3 ++-
 3 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index d5eb09fc5f8a..731b29d0506c 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -605,7 +605,7 @@ alternative_endif
  */
 	.macro	phys_to_ttbr, ttbr, phys
 #ifdef CONFIG_ARM64_PA_BITS_52
-	orr	\ttbr, \phys, \phys, lsr #46
+	orr	\ttbr, \phys, \phys, lsr #TTBR_BADDR_52_PA_PIVOT
 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
 #else
 	mov	\ttbr, \phys
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index e192c4dc624b..fb9f651375a9 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -333,6 +333,20 @@
  * TTBR_ELx[1] is RES0 in this configuration.
  */
 #define TTBR_BADDR_MASK_52	(TTBRx_EL1_BADDR_MASK & ~GENMASK(1, 1))
+
+/*
+ * A 52 bit physical address gets stored in TTBR_BADDR_MASK_52 i.e
+ * GENMASK(47, 2) in a folded manner. Shifting PA[51:0] right ward
+ * by 46 bits aligns PA[51:48] into TTBRx_EL1[5:2] which gets ORed
+ * subsequently for the final TTBRx_EL1 encoding.
+ *
+ * 47                                              5          2  0
+ * +----------------------------------------------+-----------+--+
+ * |                      PA[47:X]                | PA[51:48] |  |
+ * +----------------------------------------------+-----------+--+
+ *
+ */
+#define TTBR_BADDR_52_PA_PIVOT 46
 #endif
 
 #ifdef CONFIG_ARM64_VA_BITS_52
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c3110040c137..3457045c1045 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1602,7 +1602,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
 #ifdef CONFIG_ARM64_PA_BITS_52
-#define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
+#define phys_to_ttbr(addr)	(((addr) | ((addr) >> TTBR_BADDR_52_PA_PIVOT)) & \
+				 TTBR_BADDR_MASK_52)
 #else
 #define phys_to_ttbr(addr)	(addr & TTBRx_EL1_BADDR_MASK)
 #endif
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 6/6] arm64/mm: Describe TTBR1_BADDR_4852_OFFSET
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
                   ` (4 preceding siblings ...)
  2025-11-03  5:26 ` [PATCH 5/6] arm64/mm: Describe 52 PA folding " Anshuman Khandual
@ 2025-11-03  5:26 ` Anshuman Khandual
  2025-11-13  9:18 ` [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
  6 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-03  5:26 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Anshuman Khandual, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

TTBR1_BADDR_4852_OFFSET is a constant offset which gets added into kernel
page table physical address for TTBR1_EL1 when kernel is build for 52 bit
VA but found to be running on 48 bit VA capable system. Although there is
no explanation on how the macro is computed.

Describe TTBR1_BADDR_4852_OFFSET computation in detail via deriving from
all required parameters involved thus improving clarity and readability.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/pgtable-hwdef.h | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index fb9f651375a9..e3d070fdae6a 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -350,9 +350,12 @@
 #endif
 
 #ifdef CONFIG_ARM64_VA_BITS_52
+#define PTRS_PER_PGD_52_VA (UL(1) << (52 - PGDIR_SHIFT))
+#define PTRS_PER_PGD_48_VA (UL(1) << (48 - PGDIR_SHIFT))
+#define PTRS_PER_PGD_EXTRA (PTRS_PER_PGD_52_VA - PTRS_PER_PGD_48_VA)
+
 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */
-#define TTBR1_BADDR_4852_OFFSET	(((UL(1) << (52 - PGDIR_SHIFT)) - \
-				 (UL(1) << (48 - PGDIR_SHIFT))) * 8)
+#define TTBR1_BADDR_4852_OFFSET (PTRS_PER_PGD_EXTRA << PTDESC_ORDER)
 #endif
 
 #endif
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
  2025-11-03  5:26 ` [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1 Anshuman Khandual
@ 2025-11-04 15:17   ` Mark Rutland
  2025-11-05  3:35     ` Anshuman Khandual
  0 siblings, 1 reply; 12+ messages in thread
From: Mark Rutland @ 2025-11-04 15:17 UTC (permalink / raw)
  To: Anshuman Khandual
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

On Mon, Nov 03, 2025 at 05:26:16AM +0000, Anshuman Khandual wrote:
> Even though 48 bit PA representation in TTBRx_EL1 does not involve shifting
> partial bits like 52 bit variant does, they sill need to be masked properly
> for correctness. Hence mask 48 bit PA with TTBRx_EL1_BADDR_MASK.

There is no need for the address "to be masked properly for
correctness".

We added masking for 52-bit PAs due to the need to shuffle the bits
around. There is no need for that when using 48-bit PAs, since the
address must be below 2^48, and the address must be suitably aligned.

If any bits are set outside of this mask, that is a bug in the caller.

Mark.

> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>  arch/arm64/include/asm/assembler.h | 1 +
>  arch/arm64/include/asm/pgtable.h   | 2 +-
>  2 files changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 23be85d93348..d5eb09fc5f8a 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -609,6 +609,7 @@ alternative_endif
>  	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
>  #else
>  	mov	\ttbr, \phys
> +	and	\ttbr, \ttbr, #TTBRx_EL1_BADDR_MASK
>  #endif
>  	.endm
>  
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 0944e296dd4a..c3110040c137 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1604,7 +1604,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
>  #ifdef CONFIG_ARM64_PA_BITS_52
>  #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
>  #else
> -#define phys_to_ttbr(addr)	(addr)
> +#define phys_to_ttbr(addr)	(addr & TTBRx_EL1_BADDR_MASK)
>  #endif
>  
>  /*
> -- 
> 2.30.2
> 


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
  2025-11-04 15:17   ` Mark Rutland
@ 2025-11-05  3:35     ` Anshuman Khandual
  0 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-05  3:35 UTC (permalink / raw)
  To: Mark Rutland
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel



On 04/11/25 8:47 PM, Mark Rutland wrote:
> On Mon, Nov 03, 2025 at 05:26:16AM +0000, Anshuman Khandual wrote:
>> Even though 48 bit PA representation in TTBRx_EL1 does not involve shifting
>> partial bits like 52 bit variant does, they sill need to be masked properly
>> for correctness. Hence mask 48 bit PA with TTBRx_EL1_BADDR_MASK.
> 
> There is no need for the address "to be masked properly for
> correctness".
> 
> We added masking for 52-bit PAs due to the need to shuffle the bits
> around. There is no need for that when using 48-bit PAs, since the
> address must be below 2^48, and the address must be suitably aligned.
> 
> If any bits are set outside of this mask, that is a bug in the caller.
> 
> Mark.

Agreed - probably should not be masking out an wrong address from the caller
in order to proceed further with TTBRx_EL1 and then cause a problem down the
line.
> 
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will@kernel.org>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: linux-kernel@vger.kernel.org
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>  arch/arm64/include/asm/assembler.h | 1 +
>>  arch/arm64/include/asm/pgtable.h   | 2 +-
>>  2 files changed, 2 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
>> index 23be85d93348..d5eb09fc5f8a 100644
>> --- a/arch/arm64/include/asm/assembler.h
>> +++ b/arch/arm64/include/asm/assembler.h
>> @@ -609,6 +609,7 @@ alternative_endif
>>  	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
>>  #else
>>  	mov	\ttbr, \phys
>> +	and	\ttbr, \ttbr, #TTBRx_EL1_BADDR_MASK
>>  #endif
>>  	.endm
>>  
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index 0944e296dd4a..c3110040c137 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -1604,7 +1604,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
>>  #ifdef CONFIG_ARM64_PA_BITS_52
>>  #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
>>  #else
>> -#define phys_to_ttbr(addr)	(addr)
>> +#define phys_to_ttbr(addr)	(addr & TTBRx_EL1_BADDR_MASK)
>>  #endif
>>  
>>  /*
>> -- 
>> 2.30.2
>>



^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes
  2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
                   ` (5 preceding siblings ...)
  2025-11-03  5:26 ` [PATCH 6/6] arm64/mm: Describe TTBR1_BADDR_4852_OFFSET Anshuman Khandual
@ 2025-11-13  9:18 ` Anshuman Khandual
  2025-11-14  9:55   ` Mark Rutland
  6 siblings, 1 reply; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-13  9:18 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Catalin Marinas, Will Deacon, Ryan Roberts, Ard Biesheuvel,
	linux-kernel

On 03/11/25 10:56 AM, Anshuman Khandual wrote:
> This series contains some TTBRx_EL1 related changes, aimed at standardizing
> TTBRx_EL1 register field accesses via tools sysreg format and also explains
> 52 PA specific handling methods via a new macro along with in code comments
> 
> This series applies on v6.18-rc4
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Ard Biesheuvel <ardb@kernel.org>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> 
> Anshuman Khandual (6):
>   arm64/mm: Directly use TTBRx_EL1_ASID_MASK
>   arm64/mm: Directly use TTBRx_EL1_CnP
>   arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK
>   arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
>   arm64/mm: Describe 52 PA folding into TTBRx_EL1
>   arm64/mm: Describe TTBR1_BADDR_4852_OFFSET
> 
>  arch/arm64/include/asm/asm-uaccess.h   |  2 +-
>  arch/arm64/include/asm/assembler.h     |  3 ++-
>  arch/arm64/include/asm/mmu_context.h   |  2 +-
>  arch/arm64/include/asm/pgtable-hwdef.h | 23 ++++++++++++++++++++---
>  arch/arm64/include/asm/pgtable.h       |  5 +++--
>  arch/arm64/include/asm/uaccess.h       |  6 +++---
>  arch/arm64/kernel/entry.S              |  2 +-
>  arch/arm64/kernel/mte.c                |  4 ++--
>  arch/arm64/mm/context.c                |  8 ++++----
>  arch/arm64/mm/mmu.c                    |  2 +-
>  10 files changed, 38 insertions(+), 19 deletions(-)

Gentle ping. Beside [PATCH 4/6] (which can be dropped as indicated by Mark)
any concerns regarding reset of these changes here ?


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes
  2025-11-13  9:18 ` [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
@ 2025-11-14  9:55   ` Mark Rutland
  2025-11-19  1:07     ` Anshuman Khandual
  0 siblings, 1 reply; 12+ messages in thread
From: Mark Rutland @ 2025-11-14  9:55 UTC (permalink / raw)
  To: Anshuman Khandual
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

On Thu, Nov 13, 2025 at 02:48:44PM +0530, Anshuman Khandual wrote:
> On 03/11/25 10:56 AM, Anshuman Khandual wrote:
> > This series contains some TTBRx_EL1 related changes, aimed at standardizing
> > TTBRx_EL1 register field accesses via tools sysreg format and also explains
> > 52 PA specific handling methods via a new macro along with in code comments
> > 
> > This series applies on v6.18-rc4
> > 
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: Will Deacon <will@kernel.org>
> > Cc: Ryan Roberts <ryan.roberts@arm.com>
> > Cc: Ard Biesheuvel <ardb@kernel.org>
> > Cc: linux-arm-kernel@lists.infradead.org
> > Cc: linux-kernel@vger.kernel.org
> > 
> > Anshuman Khandual (6):
> >   arm64/mm: Directly use TTBRx_EL1_ASID_MASK
> >   arm64/mm: Directly use TTBRx_EL1_CnP
> >   arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK
> >   arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
> >   arm64/mm: Describe 52 PA folding into TTBRx_EL1
> >   arm64/mm: Describe TTBR1_BADDR_4852_OFFSET
> > 
> >  arch/arm64/include/asm/asm-uaccess.h   |  2 +-
> >  arch/arm64/include/asm/assembler.h     |  3 ++-
> >  arch/arm64/include/asm/mmu_context.h   |  2 +-
> >  arch/arm64/include/asm/pgtable-hwdef.h | 23 ++++++++++++++++++++---
> >  arch/arm64/include/asm/pgtable.h       |  5 +++--
> >  arch/arm64/include/asm/uaccess.h       |  6 +++---
> >  arch/arm64/kernel/entry.S              |  2 +-
> >  arch/arm64/kernel/mte.c                |  4 ++--
> >  arch/arm64/mm/context.c                |  8 ++++----
> >  arch/arm64/mm/mmu.c                    |  2 +-
> >  10 files changed, 38 insertions(+), 19 deletions(-)
> 
> Gentle ping. Beside [PATCH 4/6] (which can be dropped as indicated by Mark)
> any concerns regarding reset of these changes here ?

Overall I don;t think this series actually improves anything; it just
shuffles things around, and leaves conversions half-done. I don't think
we must take this as-is.

For patches 1 and 2, the changes would be fine if we were also getting
rid of TTBR_ASID_MASK and TTBR_CNP_BIT, but we don't, apparently because
those are still used by KVM. It feels like those two patches should be
split into a separate series that *only* moves code over to generate
sysreg definitions, also updates KVM, and removes the unused legacy
definitions.

For patch 3, I think the change makes the code harder to read, and
harder to understand, because there's no context to explain why we're
masking out a single bit. I don't think this is actually an improvement.
See below for related notes for patch 5.

For patch 4, as above, I think the patch can be dropped.

For patch 5, this could be OK, but we should define
TTBR_BADDR_52_PA_PIVOT as (51 - 5) and avoid the magic number entirely.
IMO it'd be nicer to just extract and re-insert the bits; I think our
current logic is unnecessarily micro-optimized so that this can be
implemented with a shifted-OR + AND, whereas I think we could burn a
temporary register and use BFX + BFI + AND, and that would be clearer as
to *which* bits we're trying to move.

For patch 6, I guess this is fine; I don't have a strong feeling either
way.

Mark.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes
  2025-11-14  9:55   ` Mark Rutland
@ 2025-11-19  1:07     ` Anshuman Khandual
  0 siblings, 0 replies; 12+ messages in thread
From: Anshuman Khandual @ 2025-11-19  1:07 UTC (permalink / raw)
  To: Mark Rutland
  Cc: linux-arm-kernel, Catalin Marinas, Will Deacon, Ryan Roberts,
	Ard Biesheuvel, linux-kernel

On 14/11/25 3:25 PM, Mark Rutland wrote:
> On Thu, Nov 13, 2025 at 02:48:44PM +0530, Anshuman Khandual wrote:
>> On 03/11/25 10:56 AM, Anshuman Khandual wrote:
>>> This series contains some TTBRx_EL1 related changes, aimed at standardizing
>>> TTBRx_EL1 register field accesses via tools sysreg format and also explains
>>> 52 PA specific handling methods via a new macro along with in code comments
>>>
>>> This series applies on v6.18-rc4
>>>
>>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>>> Cc: Will Deacon <will@kernel.org>
>>> Cc: Ryan Roberts <ryan.roberts@arm.com>
>>> Cc: Ard Biesheuvel <ardb@kernel.org>
>>> Cc: linux-arm-kernel@lists.infradead.org
>>> Cc: linux-kernel@vger.kernel.org
>>>
>>> Anshuman Khandual (6):
>>>   arm64/mm: Directly use TTBRx_EL1_ASID_MASK
>>>   arm64/mm: Directly use TTBRx_EL1_CnP
>>>   arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK
>>>   arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1
>>>   arm64/mm: Describe 52 PA folding into TTBRx_EL1
>>>   arm64/mm: Describe TTBR1_BADDR_4852_OFFSET
>>>
>>>  arch/arm64/include/asm/asm-uaccess.h   |  2 +-
>>>  arch/arm64/include/asm/assembler.h     |  3 ++-
>>>  arch/arm64/include/asm/mmu_context.h   |  2 +-
>>>  arch/arm64/include/asm/pgtable-hwdef.h | 23 ++++++++++++++++++++---
>>>  arch/arm64/include/asm/pgtable.h       |  5 +++--
>>>  arch/arm64/include/asm/uaccess.h       |  6 +++---
>>>  arch/arm64/kernel/entry.S              |  2 +-
>>>  arch/arm64/kernel/mte.c                |  4 ++--
>>>  arch/arm64/mm/context.c                |  8 ++++----
>>>  arch/arm64/mm/mmu.c                    |  2 +-
>>>  10 files changed, 38 insertions(+), 19 deletions(-)
>>
>> Gentle ping. Beside [PATCH 4/6] (which can be dropped as indicated by Mark)
>> any concerns regarding reset of these changes here ?
> 
> Overall I don;t think this series actually improves anything; it just
> shuffles things around, and leaves conversions half-done. I don't think
> we must take this as-is.
> 
> For patches 1 and 2, the changes would be fine if we were also getting
> rid of TTBR_ASID_MASK and TTBR_CNP_BIT, but we don't, apparently because
> those are still used by KVM. It feels like those two patches should be
> split into a separate series that *only* moves code over to generate
> sysreg definitions, also updates KVM, and removes the unused legacy
> definitions.

Sure thing. I will spin out a separate series as suggested.
> 
> For patch 3, I think the change makes the code harder to read, and
> harder to understand, because there's no context to explain why we're
> masking out a single bit. I don't think this is actually an improvement.

We are masking out a single bit here as per the ARM DDI 0487
L.B in the page number D24-8540.

------------------------------------------------------------
When TTBR0_EL1.BADDR represents a 52-bit addresses, all of the following apply:
• Bits A[51:48] of the stage 1 translation table base address bits are in register bits[5:2].
• Register bit[1] is RES0.
• The smallest permitted value of x is 6.
• When x>6, register bits[(x-1):6] are RES0.
------------------------------------------------------------

Is not deriving the applicable address mask in the 52 bit PA
context from the original base address mask better than hard
coded mask GENMASK_ULL(47, 2) ? Just wondering - would it be
better to add some more context in the comment above it ?
> See below for related notes for patch 5.
> 
> For patch 4, as above, I think the patch can be dropped.

Agreed.
> 
> For patch 5, this could be OK, but we should define
> TTBR_BADDR_52_PA_PIVOT as (51 - 5) and avoid the magic number entirely.

Agreed.
> IMO it'd be nicer to just extract and re-insert the bits; I think our
> current logic is unnecessarily micro-optimized so that this can be
> implemented with a shifted-OR + AND, whereas I think we could burn a
> temporary register and use BFX + BFI + AND, and that would be clearer as
> to *which* bits we're trying to move.

Will that only be applicable for phys_to_ttbr()'s assembly variant
and the macro definition in <asm/pgtable.h> which does the right
shifted OR followed by AND still remains unchanged ?
> 
> For patch 6, I guess this is fine; I don't have a strong feeling either
> way.

IMHO it will be great to have this patch included.


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2025-11-19  1:07 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-03  5:26 [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
2025-11-03  5:26 ` [PATCH 1/6] arm64/mm: Directly use TTBRx_EL1_ASID_MASK Anshuman Khandual
2025-11-03  5:26 ` [PATCH 2/6] arm64/mm: Directly use TTBRx_EL1_CnP Anshuman Khandual
2025-11-03  5:26 ` [PATCH 3/6] arm64/mm: Represent TTBR_BADDR_MASK_52 with TTBRx_EL1_BADDR_MASK Anshuman Khandual
2025-11-03  5:26 ` [PATCH 4/6] arm64/mm: Ensure correct 48 bit PA gets into TTBRx_EL1 Anshuman Khandual
2025-11-04 15:17   ` Mark Rutland
2025-11-05  3:35     ` Anshuman Khandual
2025-11-03  5:26 ` [PATCH 5/6] arm64/mm: Describe 52 PA folding " Anshuman Khandual
2025-11-03  5:26 ` [PATCH 6/6] arm64/mm: Describe TTBR1_BADDR_4852_OFFSET Anshuman Khandual
2025-11-13  9:18 ` [PATCH 0/6] arm64/mm: TTBRx_EL1 related changes Anshuman Khandual
2025-11-14  9:55   ` Mark Rutland
2025-11-19  1:07     ` Anshuman Khandual

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).