linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/3] fix wrong usage of memory allocation APIs under PREEMPT_RT in arm64
@ 2026-01-02 15:07 Yeoreum Yun
  2026-01-02 15:07 ` [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t Yeoreum Yun
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Yeoreum Yun @ 2026-01-02 15:07 UTC (permalink / raw)
  To: catalin.marinas, will, ryan.roberts, akpm, david, kevin.brodsky,
	quic_zhenhuah, dev.jain, yang, chaitanyas.prakash, bigeasy,
	clrkwllms, rostedt, lorenzo.stoakes, ardb, jackmanb, vbabka,
	mhocko
  Cc: linux-arm-kernel, linux-kernel, linux-rt-devel, Yeoreum Yun

Under PREEMPT_RT, calling generic memory allocation/free APIs
(e.x) __get_free_pages(), pgtable_alloc(), free_pages() and etc
with preemption disabled is not allowed, but allow only nolock() APIs series
because it may acquire a spin lock that becomes sleepable on RT,
potentially causing a sleep during page allocation
(See Documentation/core-api/real-time/differences.rst, Memory allocation section).

However, In arm64, __linear_map_split_to_ptes() and
__kpti_install_ng_mappings() called by stopper thread via stop_machine()
use generic memory allocation/free APIs.

This patchset fixes this problem and based on v6.19-rc1

Patch History
==============
from v3 to v4:
  - rebase to v6.19-rc3
  - introduce pgtable_alloc_t
  - following several suggestions from @Ryan Roberts
  - https://lore.kernel.org/all/20251218194750.395301-1-yeoreum.yun@arm.com/

from v2 to v3:
  - remove split-mode and split_args.
    pass proper function pointer while spliting.
  - rename function name.
  - https://lore.kernel.org/all/20251217182007.2345700-1-yeoreum.yun@arm.com/

from v1 to v2:
  - drop pagetable_alloc_nolock()
  - following @Ryan Roberts suggestion.
  - https://lore.kernel.org/all/20251212161832.2067134-1-yeoreum.yun@arm.com/


Yeoreum Yun (3):
  arm64: mmu: introduce pgtable_alloc_t
  arm64: mmu: avoid allocating pages while splitting the linear mapping
  arm64: mmu: avoid allocating pages while installing ng-mapping for
    KPTI

 arch/arm64/mm/mmu.c | 243 ++++++++++++++++++++++++++++++++++----------
 1 file changed, 188 insertions(+), 55 deletions(-)

--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t
  2026-01-02 15:07 [PATCH v4 0/3] fix wrong usage of memory allocation APIs under PREEMPT_RT in arm64 Yeoreum Yun
@ 2026-01-02 15:07 ` Yeoreum Yun
  2026-01-02 15:28   ` Ryan Roberts
  2026-01-02 15:07 ` [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping Yeoreum Yun
  2026-01-02 15:07 ` [PATCH v4 3/3] arm64: mmu: avoid allocating pages while installing ng-mapping for KPTI Yeoreum Yun
  2 siblings, 1 reply; 9+ messages in thread
From: Yeoreum Yun @ 2026-01-02 15:07 UTC (permalink / raw)
  To: catalin.marinas, will, ryan.roberts, akpm, david, kevin.brodsky,
	quic_zhenhuah, dev.jain, yang, chaitanyas.prakash, bigeasy,
	clrkwllms, rostedt, lorenzo.stoakes, ardb, jackmanb, vbabka,
	mhocko
  Cc: linux-arm-kernel, linux-kernel, linux-rt-devel, Yeoreum Yun

This is preparation patch to use preallocated page tables
for linear_map_split_to_ptes().

Define pgtable_alloc_t type for callback used by create_XXX_mapping().

Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
 arch/arm64/mm/mmu.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8e1d80a7033e..4b4908ae189b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -74,6 +74,8 @@ EXPORT_SYMBOL(empty_zero_page);
 static DEFINE_SPINLOCK(swapper_pgdir_lock);
 static DEFINE_MUTEX(fixmap_lock);
 
+typedef phys_addr_t (pgtable_alloc_t)(enum pgtable_type);
+
 void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
 {
 	pgd_t *fixmap_pgdp;
@@ -197,7 +199,7 @@ static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
 static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 			       unsigned long end, phys_addr_t phys,
 			       pgprot_t prot,
-			       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+			       pgtable_alloc_t pgtable_alloc,
 			       int flags)
 {
 	unsigned long next;
@@ -252,7 +254,7 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 
 static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
 		    phys_addr_t phys, pgprot_t prot,
-		    phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags)
+		    pgtable_alloc_t pgtable_alloc, int flags)
 {
 	unsigned long next;
 
@@ -292,7 +294,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
 static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 			       unsigned long end, phys_addr_t phys,
 			       pgprot_t prot,
-			       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+			       pgtable_alloc_t pgtable_alloc,
 			       int flags)
 {
 	int ret;
@@ -349,7 +351,7 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 
 static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
 			  phys_addr_t phys, pgprot_t prot,
-			  phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+			  pgtable_alloc_t pgtable_alloc,
 			  int flags)
 {
 	int ret = 0;
@@ -415,7 +417,7 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
 
 static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
 			  phys_addr_t phys, pgprot_t prot,
-			  phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+			  pgtable_alloc_t pgtable_alloc,
 			  int flags)
 {
 	int ret;
@@ -467,7 +469,7 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
 static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
 				       unsigned long virt, phys_addr_t size,
 				       pgprot_t prot,
-				       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+				       pgtable_alloc_t pgtable_alloc,
 				       int flags)
 {
 	int ret;
@@ -500,7 +502,7 @@ static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
 static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 				unsigned long virt, phys_addr_t size,
 				pgprot_t prot,
-				phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+				pgtable_alloc_t pgtable_alloc,
 				int flags)
 {
 	int ret;
@@ -516,7 +518,7 @@ static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 				     unsigned long virt, phys_addr_t size,
 				     pgprot_t prot,
-				     phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+				     pgtable_alloc_t pgtable_alloc,
 				     int flags)
 {
 	int ret;
-- 
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping
  2026-01-02 15:07 [PATCH v4 0/3] fix wrong usage of memory allocation APIs under PREEMPT_RT in arm64 Yeoreum Yun
  2026-01-02 15:07 ` [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t Yeoreum Yun
@ 2026-01-02 15:07 ` Yeoreum Yun
  2026-01-02 15:44   ` Ryan Roberts
  2026-01-02 15:07 ` [PATCH v4 3/3] arm64: mmu: avoid allocating pages while installing ng-mapping for KPTI Yeoreum Yun
  2 siblings, 1 reply; 9+ messages in thread
From: Yeoreum Yun @ 2026-01-02 15:07 UTC (permalink / raw)
  To: catalin.marinas, will, ryan.roberts, akpm, david, kevin.brodsky,
	quic_zhenhuah, dev.jain, yang, chaitanyas.prakash, bigeasy,
	clrkwllms, rostedt, lorenzo.stoakes, ardb, jackmanb, vbabka,
	mhocko
  Cc: linux-arm-kernel, linux-kernel, linux-rt-devel, Yeoreum Yun

linear_map_split_to_ptes() currently allocates page tables while
splitting the linear mapping into PTEs under stop_machine() using GFP_ATOMIC.

This is fine for non-PREEMPT_RT configurations.
However, it becomes problematic on PREEMPT_RT, because
generic memory allocation/free APIs (e.g. pgtable_alloc(), __get_free_pages(), etc.)
cannot be called from a non-preemptible context, except for the _nolock() variants.
This is because generic memory allocation/free paths are sleepable,
as they rely on spin_lock(), which becomes sleepable on PREEMPT_RT.

In other words, even calling pgtable_alloc() with GFP_ATOMIC is not permitted
in __linear_map_split_to_pte() when it is executed by the stopper thread,
 where preemption is disabled on PREEMPT_RT.

To address this, the required number of page tables is first collected
and preallocated, and the preallocated page tables are then used
when splitting the linear mapping in __linear_map_split_to_pte().

Fixes: 3df6979d222b ("arm64: mm: split linear mapping if BBML2 unsupported on secondary CPUs")
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
 arch/arm64/mm/mmu.c | 204 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 166 insertions(+), 38 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4b4908ae189b..cc086e91a506 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -529,18 +529,14 @@ static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 		panic("Failed to create page tables\n");
 }
 
-static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
-				       enum pgtable_type pgtable_type)
-{
-	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
-	struct ptdesc *ptdesc = pagetable_alloc(gfp & ~__GFP_ZERO, 0);
-	phys_addr_t pa;
-
-	if (!ptdesc)
-		return INVALID_PHYS_ADDR;
-
-	pa = page_to_phys(ptdesc_page(ptdesc));
+static struct ptdesc **split_pgtables;
+static unsigned long split_pgtables_count;
+static unsigned long split_pgtables_idx;
 
+static __always_inline void __pgd_pgtable_init(struct mm_struct *mm,
+					       struct ptdesc *ptdesc,
+					       enum pgtable_type pgtable_type)
+{
 	switch (pgtable_type) {
 	case TABLE_PTE:
 		BUG_ON(!pagetable_pte_ctor(mm, ptdesc));
@@ -555,26 +551,49 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
 		pagetable_p4d_ctor(ptdesc);
 		break;
 	}
-
-	return pa;
 }
 
-static phys_addr_t
-pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp)
+static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
+				       enum pgtable_type pgtable_type)
 {
-	return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type);
+	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
+	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
+
+	if (!ptdesc)
+		return INVALID_PHYS_ADDR;
+
+	__pgd_pgtable_init(mm, ptdesc, pgtable_type);
+
+	return page_to_phys(ptdesc_page(ptdesc));
 }
 
-static phys_addr_t __maybe_unused
+static phys_addr_t
 pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
 {
-	return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL);
+	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
 }
 
 static phys_addr_t
 pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
 {
-	return  __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type);
+	return  __pgd_pgtable_alloc(NULL, pgtable_type);
+}
+
+static phys_addr_t
+pgd_pgtable_get_preallocated(enum pgtable_type pgtable_type)
+{
+	struct ptdesc *ptdesc;
+
+	if (WARN_ON(split_pgtables_idx >= split_pgtables_count))
+		return INVALID_PHYS_ADDR;
+
+	ptdesc = split_pgtables[split_pgtables_idx++];
+	if (!ptdesc)
+		return INVALID_PHYS_ADDR;
+
+	__pgd_pgtable_init(&init_mm, ptdesc, pgtable_type);
+
+	return page_to_phys(ptdesc_page(ptdesc));
 }
 
 static void split_contpte(pte_t *ptep)
@@ -586,7 +605,9 @@ static void split_contpte(pte_t *ptep)
 		__set_pte(ptep, pte_mknoncont(__ptep_get(ptep)));
 }
 
-static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
+static int split_pmd(pmd_t *pmdp, pmd_t pmd,
+		     pgtable_alloc_t pgtable_alloc,
+		     bool to_cont)
 {
 	pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
 	unsigned long pfn = pmd_pfn(pmd);
@@ -595,7 +616,7 @@ static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
 	pte_t *ptep;
 	int i;
 
-	pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp);
+	pte_phys = pgtable_alloc(TABLE_PTE);
 	if (pte_phys == INVALID_PHYS_ADDR)
 		return -ENOMEM;
 	ptep = (pte_t *)phys_to_virt(pte_phys);
@@ -630,7 +651,9 @@ static void split_contpmd(pmd_t *pmdp)
 		set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp)));
 }
 
-static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont)
+static int split_pud(pud_t *pudp, pud_t pud,
+		     pgtable_alloc_t pgtable_alloc,
+		     bool to_cont)
 {
 	pudval_t tableprot = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
 	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
@@ -640,7 +663,7 @@ static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont)
 	pmd_t *pmdp;
 	int i;
 
-	pmd_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PMD, gfp);
+	pmd_phys = pgtable_alloc(TABLE_PMD);
 	if (pmd_phys == INVALID_PHYS_ADDR)
 		return -ENOMEM;
 	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
@@ -709,7 +732,7 @@ static int split_kernel_leaf_mapping_locked(unsigned long addr)
 	if (!pud_present(pud))
 		goto out;
 	if (pud_leaf(pud)) {
-		ret = split_pud(pudp, pud, GFP_PGTABLE_KERNEL, true);
+		ret = split_pud(pudp, pud, pgd_pgtable_alloc_init_mm, true);
 		if (ret)
 			goto out;
 	}
@@ -734,7 +757,7 @@ static int split_kernel_leaf_mapping_locked(unsigned long addr)
 		 */
 		if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
 			goto out;
-		ret = split_pmd(pmdp, pmd, GFP_PGTABLE_KERNEL, true);
+		ret = split_pmd(pmdp, pmd, pgd_pgtable_alloc_init_mm, true);
 		if (ret)
 			goto out;
 	}
@@ -832,12 +855,12 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
 static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr,
 				   unsigned long next, struct mm_walk *walk)
 {
-	gfp_t gfp = *(gfp_t *)walk->private;
+	pgtable_alloc_t *pgtable_alloc = walk->private;
 	pud_t pud = pudp_get(pudp);
 	int ret = 0;
 
 	if (pud_leaf(pud))
-		ret = split_pud(pudp, pud, gfp, false);
+		ret = split_pud(pudp, pud, pgtable_alloc, false);
 
 	return ret;
 }
@@ -845,14 +868,14 @@ static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr,
 static int split_to_ptes_pmd_entry(pmd_t *pmdp, unsigned long addr,
 				   unsigned long next, struct mm_walk *walk)
 {
-	gfp_t gfp = *(gfp_t *)walk->private;
+	pgtable_alloc_t *pgtable_alloc = walk->private;
 	pmd_t pmd = pmdp_get(pmdp);
 	int ret = 0;
 
 	if (pmd_leaf(pmd)) {
 		if (pmd_cont(pmd))
 			split_contpmd(pmdp);
-		ret = split_pmd(pmdp, pmd, gfp, false);
+		ret = split_pmd(pmdp, pmd, pgtable_alloc, false);
 
 		/*
 		 * We have split the pmd directly to ptes so there is no need to
@@ -881,13 +904,15 @@ static const struct mm_walk_ops split_to_ptes_ops = {
 	.pte_entry	= split_to_ptes_pte_entry,
 };
 
-static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp)
+static int range_split_to_ptes(unsigned long start, unsigned long end,
+			       pgtable_alloc_t pgtable_alloc)
 {
 	int ret;
 
 	arch_enter_lazy_mmu_mode();
 	ret = walk_kernel_page_table_range_lockless(start, end,
-					&split_to_ptes_ops, NULL, &gfp);
+						    &split_to_ptes_ops, NULL,
+						    pgtable_alloc);
 	arch_leave_lazy_mmu_mode();
 
 	return ret;
@@ -904,6 +929,103 @@ static void __init init_idmap_kpti_bbml2_flag(void)
 	smp_mb();
 }
 
+static int __init
+collect_to_split_pud_entry(pud_t *pudp, unsigned long addr,
+			   unsigned long next, struct mm_walk *walk)
+{
+	pud_t pud = pudp_get(pudp);
+
+	if (pud_leaf(pud)) {
+		split_pgtables_count += 1 + PTRS_PER_PMD;
+		walk->action = ACTION_CONTINUE;
+	}
+
+	return 0;
+}
+
+static int __init
+collect_to_split_pmd_entry(pmd_t *pmdp, unsigned long addr,
+			   unsigned long next, struct mm_walk *walk)
+{
+	pmd_t pmd = pmdp_get(pmdp);
+
+	if (pmd_leaf(pmd))
+		split_pgtables_count++;
+
+	walk->action = ACTION_CONTINUE;
+
+	return 0;
+}
+
+static void __init linear_map_free_split_pgtables(void)
+{
+	int i;
+
+	if (!split_pgtables_count || !split_pgtables)
+		goto skip_free;
+
+	for (i = split_pgtables_idx; i < split_pgtables_count; i++) {
+		if (split_pgtables[i])
+			pagetable_free(split_pgtables[i]);
+	}
+
+	kvfree(split_pgtables);
+
+skip_free:
+	split_pgtables = NULL;
+	split_pgtables_count = 0;
+	split_pgtables_idx = 0;
+}
+
+static int __init linear_map_prealloc_split_pgtables(void)
+{
+	int ret, i;
+	unsigned long lstart = _PAGE_OFFSET(vabits_actual);
+	unsigned long lend = PAGE_END;
+	unsigned long kstart = (unsigned long)lm_alias(_stext);
+	unsigned long kend = (unsigned long)lm_alias(__init_begin);
+
+	const struct mm_walk_ops collect_to_split_ops = {
+		.pud_entry	= collect_to_split_pud_entry,
+		.pmd_entry	= collect_to_split_pmd_entry
+	};
+
+	split_pgtables_idx = 0;
+	split_pgtables_count = 0;
+
+	ret = walk_kernel_page_table_range_lockless(lstart, kstart,
+						    &collect_to_split_ops,
+						    NULL, NULL);
+	if (!ret)
+		ret = walk_kernel_page_table_range_lockless(kend, lend,
+							    &collect_to_split_ops,
+							    NULL, NULL);
+	if (ret || !split_pgtables_count)
+		goto error;
+
+	ret = -ENOMEM;
+
+	split_pgtables = kvmalloc(split_pgtables_count * sizeof(struct ptdesc *),
+				  GFP_KERNEL | __GFP_ZERO);
+	if (!split_pgtables)
+		goto error;
+
+	for (i = 0; i < split_pgtables_count; i++) {
+		/* The page table will be filled during splitting, so zeroing it is unnecessary. */
+		split_pgtables[i] = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
+		if (!split_pgtables[i])
+			goto error;
+	}
+
+	ret = 0;
+
+error:
+	if (ret)
+		linear_map_free_split_pgtables();
+
+	return ret;
+}
+
 static int __init linear_map_split_to_ptes(void *__unused)
 {
 	/*
@@ -929,9 +1051,9 @@ static int __init linear_map_split_to_ptes(void *__unused)
 		 * PTE. The kernel alias remains static throughout runtime so
 		 * can continue to be safely mapped with large mappings.
 		 */
-		ret = range_split_to_ptes(lstart, kstart, GFP_ATOMIC);
+		ret = range_split_to_ptes(lstart, kstart, pgd_pgtable_get_preallocated);
 		if (!ret)
-			ret = range_split_to_ptes(kend, lend, GFP_ATOMIC);
+			ret = range_split_to_ptes(kend, lend, pgd_pgtable_get_preallocated);
 		if (ret)
 			panic("Failed to split linear map\n");
 		flush_tlb_kernel_range(lstart, lend);
@@ -964,10 +1086,16 @@ static int __init linear_map_split_to_ptes(void *__unused)
 
 void __init linear_map_maybe_split_to_ptes(void)
 {
-	if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort()) {
-		init_idmap_kpti_bbml2_flag();
-		stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
-	}
+	if (!linear_map_requires_bbml2 || system_supports_bbml2_noabort())
+		return;
+
+	if (linear_map_prealloc_split_pgtables())
+		panic("Failed to split linear map\n");
+
+	init_idmap_kpti_bbml2_flag();
+	stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
+
+	linear_map_free_split_pgtables();
 }
 
 /*
@@ -1098,7 +1226,7 @@ bool arch_kfence_init_pool(void)
 		return true;
 
 	mutex_lock(&pgtable_split_lock);
-	ret = range_split_to_ptes(start, end, GFP_PGTABLE_KERNEL);
+	ret = range_split_to_ptes(start, end, pgd_pgtable_alloc_init_mm);
 	mutex_unlock(&pgtable_split_lock);
 
 	/*
-- 
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v4 3/3] arm64: mmu: avoid allocating pages while installing ng-mapping for KPTI
  2026-01-02 15:07 [PATCH v4 0/3] fix wrong usage of memory allocation APIs under PREEMPT_RT in arm64 Yeoreum Yun
  2026-01-02 15:07 ` [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t Yeoreum Yun
  2026-01-02 15:07 ` [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping Yeoreum Yun
@ 2026-01-02 15:07 ` Yeoreum Yun
  2 siblings, 0 replies; 9+ messages in thread
From: Yeoreum Yun @ 2026-01-02 15:07 UTC (permalink / raw)
  To: catalin.marinas, will, ryan.roberts, akpm, david, kevin.brodsky,
	quic_zhenhuah, dev.jain, yang, chaitanyas.prakash, bigeasy,
	clrkwllms, rostedt, lorenzo.stoakes, ardb, jackmanb, vbabka,
	mhocko
  Cc: linux-arm-kernel, linux-kernel, linux-rt-devel, Yeoreum Yun

The current __kpti_install_ng_mappings() allocates a temporary PGD
while installing the NG mapping for KPTI under stop_machine(),
using GFP_ATOMIC.

This is fine in the non-PREEMPT_RT case. However, it becomes a problem
under PREEMPT_RT because generic memory allocation/free APIs
(e.g., pgtable_alloc(), __get_free_pages(), etc.) cannot be invoked
in a non-preemptible context, except for the *_nolock() variants.
These generic allocators may sleep due to their use of spin_lock().

In other words, calling __get_free_pages(), even with GFP_ATOMIC,
is not allowed in __kpti_install_ng_mappings(), which is executed by
the stopper thread where preemption is disabled under PREEMPT_RT.

To address this, preallocate the page needed for the temporary PGD
before invoking __kpti_install_ng_mappings() via stop_machine().

Fixes: 47546a1912fc ("arm64: mm: install KPTI nG mappings with MMU enabled")
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
---
 arch/arm64/mm/mmu.c | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index cc086e91a506..7a7804dba381 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1362,7 +1362,7 @@ static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type)
 	return kpti_ng_temp_alloc;
 }
 
-static int __init __kpti_install_ng_mappings(void *__unused)
+static int __init __kpti_install_ng_mappings(void *data)
 {
 	typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
 	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
@@ -1370,10 +1370,9 @@ static int __init __kpti_install_ng_mappings(void *__unused)
 
 	int cpu = smp_processor_id();
 	int levels = CONFIG_PGTABLE_LEVELS;
-	int order = order_base_2(levels);
 	u64 kpti_ng_temp_pgd_pa = 0;
 	pgd_t *kpti_ng_temp_pgd;
-	u64 alloc = 0;
+	u64 alloc = *(u64 *)data;
 
 	if (levels == 5 && !pgtable_l5_enabled())
 		levels = 4;
@@ -1384,8 +1383,6 @@ static int __init __kpti_install_ng_mappings(void *__unused)
 
 	if (!cpu) {
 		int ret;
-
-		alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
 		kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
 		kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
 
@@ -1416,16 +1413,16 @@ static int __init __kpti_install_ng_mappings(void *__unused)
 	remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
 	cpu_uninstall_idmap();
 
-	if (!cpu) {
-		free_pages(alloc, order);
+	if (!cpu)
 		arm64_use_ng_mappings = true;
-	}
 
 	return 0;
 }
 
 void __init kpti_install_ng_mappings(void)
 {
+	int order = order_base_2(CONFIG_PGTABLE_LEVELS);
+	u64 alloc;
 	/* Check whether KPTI is going to be used */
 	if (!arm64_kernel_unmapped_at_el0())
 		return;
@@ -1438,8 +1435,14 @@ void __init kpti_install_ng_mappings(void)
 	if (arm64_use_ng_mappings)
 		return;
 
+	alloc = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+	if (!alloc)
+		panic("Failed to alloc page tables\n");
+
 	init_idmap_kpti_bbml2_flag();
-	stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
+	stop_machine(__kpti_install_ng_mappings, &alloc, cpu_online_mask);
+
+	free_pages(alloc, order);
 }
 
 static pgprot_t __init kernel_exec_prot(void)
-- 
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t
  2026-01-02 15:07 ` [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t Yeoreum Yun
@ 2026-01-02 15:28   ` Ryan Roberts
  2026-01-02 16:16     ` Yeoreum Yun
  0 siblings, 1 reply; 9+ messages in thread
From: Ryan Roberts @ 2026-01-02 15:28 UTC (permalink / raw)
  To: Yeoreum Yun, catalin.marinas, will, akpm, david, kevin.brodsky,
	quic_zhenhuah, dev.jain, yang, chaitanyas.prakash, bigeasy,
	clrkwllms, rostedt, lorenzo.stoakes, ardb, jackmanb, vbabka,
	mhocko
  Cc: linux-arm-kernel, linux-kernel, linux-rt-devel

On 02/01/2026 15:07, Yeoreum Yun wrote:
> This is preparation patch to use preallocated page tables
> for linear_map_split_to_ptes().
> 
> Define pgtable_alloc_t type for callback used by create_XXX_mapping().
> 
> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  arch/arm64/mm/mmu.c | 18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 8e1d80a7033e..4b4908ae189b 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -74,6 +74,8 @@ EXPORT_SYMBOL(empty_zero_page);
>  static DEFINE_SPINLOCK(swapper_pgdir_lock);
>  static DEFINE_MUTEX(fixmap_lock);
>  
> +typedef phys_addr_t (pgtable_alloc_t)(enum pgtable_type);
> +
>  void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
>  {
>  	pgd_t *fixmap_pgdp;
> @@ -197,7 +199,7 @@ static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
>  static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  			       unsigned long end, phys_addr_t phys,
>  			       pgprot_t prot,
> -			       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +			       pgtable_alloc_t pgtable_alloc,
>  			       int flags)
>  {
>  	unsigned long next;
> @@ -252,7 +254,7 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  
>  static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>  		    phys_addr_t phys, pgprot_t prot,
> -		    phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags)
> +		    pgtable_alloc_t pgtable_alloc, int flags)
>  {
>  	unsigned long next;
>  
> @@ -292,7 +294,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>  static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>  			       unsigned long end, phys_addr_t phys,
>  			       pgprot_t prot,
> -			       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +			       pgtable_alloc_t pgtable_alloc,
>  			       int flags)
>  {
>  	int ret;
> @@ -349,7 +351,7 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>  
>  static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>  			  phys_addr_t phys, pgprot_t prot,
> -			  phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +			  pgtable_alloc_t pgtable_alloc,
>  			  int flags)
>  {
>  	int ret = 0;
> @@ -415,7 +417,7 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>  
>  static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>  			  phys_addr_t phys, pgprot_t prot,
> -			  phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +			  pgtable_alloc_t pgtable_alloc,
>  			  int flags)
>  {
>  	int ret;
> @@ -467,7 +469,7 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>  static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
>  				       unsigned long virt, phys_addr_t size,
>  				       pgprot_t prot,
> -				       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +				       pgtable_alloc_t pgtable_alloc,
>  				       int flags)
>  {
>  	int ret;
> @@ -500,7 +502,7 @@ static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
>  static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
>  				unsigned long virt, phys_addr_t size,
>  				pgprot_t prot,
> -				phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +				pgtable_alloc_t pgtable_alloc,
>  				int flags)
>  {
>  	int ret;
> @@ -516,7 +518,7 @@ static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
>  static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
>  				     unsigned long virt, phys_addr_t size,
>  				     pgprot_t prot,
> -				     phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> +				     pgtable_alloc_t pgtable_alloc,
>  				     int flags)
>  {
>  	int ret;



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping
  2026-01-02 15:07 ` [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping Yeoreum Yun
@ 2026-01-02 15:44   ` Ryan Roberts
  2026-01-02 16:14     ` Yeoreum Yun
  0 siblings, 1 reply; 9+ messages in thread
From: Ryan Roberts @ 2026-01-02 15:44 UTC (permalink / raw)
  To: Yeoreum Yun, catalin.marinas, will, akpm, david, kevin.brodsky,
	quic_zhenhuah, dev.jain, yang, chaitanyas.prakash, bigeasy,
	clrkwllms, rostedt, lorenzo.stoakes, ardb, jackmanb, vbabka,
	mhocko
  Cc: linux-arm-kernel, linux-kernel, linux-rt-devel

On 02/01/2026 15:07, Yeoreum Yun wrote:
> linear_map_split_to_ptes() currently allocates page tables while
> splitting the linear mapping into PTEs under stop_machine() using GFP_ATOMIC.
> 
> This is fine for non-PREEMPT_RT configurations.
> However, it becomes problematic on PREEMPT_RT, because
> generic memory allocation/free APIs (e.g. pgtable_alloc(), __get_free_pages(), etc.)
> cannot be called from a non-preemptible context, except for the _nolock() variants.
> This is because generic memory allocation/free paths are sleepable,
> as they rely on spin_lock(), which becomes sleepable on PREEMPT_RT.
> 
> In other words, even calling pgtable_alloc() with GFP_ATOMIC is not permitted
> in __linear_map_split_to_pte() when it is executed by the stopper thread,
>  where preemption is disabled on PREEMPT_RT.
> 
> To address this, the required number of page tables is first collected
> and preallocated, and the preallocated page tables are then used
> when splitting the linear mapping in __linear_map_split_to_pte().
> 
> Fixes: 3df6979d222b ("arm64: mm: split linear mapping if BBML2 unsupported on secondary CPUs")
> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>

Looks good from my perspective.

I have a couple more small comments below. With those addressed:

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  arch/arm64/mm/mmu.c | 204 +++++++++++++++++++++++++++++++++++---------
>  1 file changed, 166 insertions(+), 38 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 4b4908ae189b..cc086e91a506 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -529,18 +529,14 @@ static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
>  		panic("Failed to create page tables\n");
>  }
>  
> -static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
> -				       enum pgtable_type pgtable_type)
> -{
> -	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
> -	struct ptdesc *ptdesc = pagetable_alloc(gfp & ~__GFP_ZERO, 0);
> -	phys_addr_t pa;
> -
> -	if (!ptdesc)
> -		return INVALID_PHYS_ADDR;
> -
> -	pa = page_to_phys(ptdesc_page(ptdesc));
> +static struct ptdesc **split_pgtables;
> +static unsigned long split_pgtables_count;
> +static unsigned long split_pgtables_idx;

I think these could all be __initdata, if you make
pgd_pgtable_get_preallocated() __init (see below) ?

>  
> +static __always_inline void __pgd_pgtable_init(struct mm_struct *mm,

Is there a reason for __always_inline? If not, I think it's preferable to just
leave it static and let the compiler decide.

> +					       struct ptdesc *ptdesc,
> +					       enum pgtable_type pgtable_type)
> +{
>  	switch (pgtable_type) {
>  	case TABLE_PTE:
>  		BUG_ON(!pagetable_pte_ctor(mm, ptdesc));
> @@ -555,26 +551,49 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
>  		pagetable_p4d_ctor(ptdesc);
>  		break;
>  	}
> -
> -	return pa;
>  }
>  
> -static phys_addr_t
> -pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp)
> +static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
> +				       enum pgtable_type pgtable_type)
>  {
> -	return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type);
> +	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
> +	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
> +
> +	if (!ptdesc)
> +		return INVALID_PHYS_ADDR;
> +
> +	__pgd_pgtable_init(mm, ptdesc, pgtable_type);
> +
> +	return page_to_phys(ptdesc_page(ptdesc));
>  }
>  
> -static phys_addr_t __maybe_unused
> +static phys_addr_t
>  pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
>  {
> -	return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL);
> +	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
>  }
>  
>  static phys_addr_t
>  pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
>  {
> -	return  __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type);
> +	return  __pgd_pgtable_alloc(NULL, pgtable_type);
> +}
> +
> +static phys_addr_t
> +pgd_pgtable_get_preallocated(enum pgtable_type pgtable_type)

I think this could probably be __init?

> +{
> +	struct ptdesc *ptdesc;
> +
> +	if (WARN_ON(split_pgtables_idx >= split_pgtables_count))
> +		return INVALID_PHYS_ADDR;
> +
> +	ptdesc = split_pgtables[split_pgtables_idx++];
> +	if (!ptdesc)
> +		return INVALID_PHYS_ADDR;
> +
> +	__pgd_pgtable_init(&init_mm, ptdesc, pgtable_type);
> +
> +	return page_to_phys(ptdesc_page(ptdesc));
>  }
>  
>  static void split_contpte(pte_t *ptep)
> @@ -586,7 +605,9 @@ static void split_contpte(pte_t *ptep)
>  		__set_pte(ptep, pte_mknoncont(__ptep_get(ptep)));
>  }
>  
> -static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
> +static int split_pmd(pmd_t *pmdp, pmd_t pmd,
> +		     pgtable_alloc_t pgtable_alloc,
> +		     bool to_cont)

nit: this will easily fit in 2 lines and still be within 80 chars:

static int split_pmd(pmd_t *pmdp, pmd_t pmd, pgtable_alloc_t pgtable_alloc,
		     bool to_cont)

>  {
>  	pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
>  	unsigned long pfn = pmd_pfn(pmd);
> @@ -595,7 +616,7 @@ static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
>  	pte_t *ptep;
>  	int i;
>  
> -	pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp);
> +	pte_phys = pgtable_alloc(TABLE_PTE);
>  	if (pte_phys == INVALID_PHYS_ADDR)
>  		return -ENOMEM;
>  	ptep = (pte_t *)phys_to_virt(pte_phys);
> @@ -630,7 +651,9 @@ static void split_contpmd(pmd_t *pmdp)
>  		set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp)));
>  }
>  
> -static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont)
> +static int split_pud(pud_t *pudp, pud_t pud,
> +		     pgtable_alloc_t pgtable_alloc,
> +		     bool to_cont)

nit: same comment.

Thanks,
Ryan

>  {
>  	pudval_t tableprot = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
>  	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
> @@ -640,7 +663,7 @@ static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont)
>  	pmd_t *pmdp;
>  	int i;
>  
> -	pmd_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PMD, gfp);
> +	pmd_phys = pgtable_alloc(TABLE_PMD);
>  	if (pmd_phys == INVALID_PHYS_ADDR)
>  		return -ENOMEM;
>  	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
> @@ -709,7 +732,7 @@ static int split_kernel_leaf_mapping_locked(unsigned long addr)
>  	if (!pud_present(pud))
>  		goto out;
>  	if (pud_leaf(pud)) {
> -		ret = split_pud(pudp, pud, GFP_PGTABLE_KERNEL, true);
> +		ret = split_pud(pudp, pud, pgd_pgtable_alloc_init_mm, true);
>  		if (ret)
>  			goto out;
>  	}
> @@ -734,7 +757,7 @@ static int split_kernel_leaf_mapping_locked(unsigned long addr)
>  		 */
>  		if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
>  			goto out;
> -		ret = split_pmd(pmdp, pmd, GFP_PGTABLE_KERNEL, true);
> +		ret = split_pmd(pmdp, pmd, pgd_pgtable_alloc_init_mm, true);
>  		if (ret)
>  			goto out;
>  	}
> @@ -832,12 +855,12 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
>  static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr,
>  				   unsigned long next, struct mm_walk *walk)
>  {
> -	gfp_t gfp = *(gfp_t *)walk->private;
> +	pgtable_alloc_t *pgtable_alloc = walk->private;
>  	pud_t pud = pudp_get(pudp);
>  	int ret = 0;
>  
>  	if (pud_leaf(pud))
> -		ret = split_pud(pudp, pud, gfp, false);
> +		ret = split_pud(pudp, pud, pgtable_alloc, false);
>  
>  	return ret;
>  }
> @@ -845,14 +868,14 @@ static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr,
>  static int split_to_ptes_pmd_entry(pmd_t *pmdp, unsigned long addr,
>  				   unsigned long next, struct mm_walk *walk)
>  {
> -	gfp_t gfp = *(gfp_t *)walk->private;
> +	pgtable_alloc_t *pgtable_alloc = walk->private;
>  	pmd_t pmd = pmdp_get(pmdp);
>  	int ret = 0;
>  
>  	if (pmd_leaf(pmd)) {
>  		if (pmd_cont(pmd))
>  			split_contpmd(pmdp);
> -		ret = split_pmd(pmdp, pmd, gfp, false);
> +		ret = split_pmd(pmdp, pmd, pgtable_alloc, false);
>  
>  		/*
>  		 * We have split the pmd directly to ptes so there is no need to
> @@ -881,13 +904,15 @@ static const struct mm_walk_ops split_to_ptes_ops = {
>  	.pte_entry	= split_to_ptes_pte_entry,
>  };
>  
> -static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp)
> +static int range_split_to_ptes(unsigned long start, unsigned long end,
> +			       pgtable_alloc_t pgtable_alloc)
>  {
>  	int ret;
>  
>  	arch_enter_lazy_mmu_mode();
>  	ret = walk_kernel_page_table_range_lockless(start, end,
> -					&split_to_ptes_ops, NULL, &gfp);
> +						    &split_to_ptes_ops, NULL,
> +						    pgtable_alloc);
>  	arch_leave_lazy_mmu_mode();
>  
>  	return ret;
> @@ -904,6 +929,103 @@ static void __init init_idmap_kpti_bbml2_flag(void)
>  	smp_mb();
>  }
>  
> +static int __init
> +collect_to_split_pud_entry(pud_t *pudp, unsigned long addr,
> +			   unsigned long next, struct mm_walk *walk)
> +{
> +	pud_t pud = pudp_get(pudp);
> +
> +	if (pud_leaf(pud)) {
> +		split_pgtables_count += 1 + PTRS_PER_PMD;
> +		walk->action = ACTION_CONTINUE;
> +	}
> +
> +	return 0;
> +}
> +
> +static int __init
> +collect_to_split_pmd_entry(pmd_t *pmdp, unsigned long addr,
> +			   unsigned long next, struct mm_walk *walk)
> +{
> +	pmd_t pmd = pmdp_get(pmdp);
> +
> +	if (pmd_leaf(pmd))
> +		split_pgtables_count++;
> +
> +	walk->action = ACTION_CONTINUE;
> +
> +	return 0;
> +}
> +
> +static void __init linear_map_free_split_pgtables(void)
> +{
> +	int i;
> +
> +	if (!split_pgtables_count || !split_pgtables)
> +		goto skip_free;
> +
> +	for (i = split_pgtables_idx; i < split_pgtables_count; i++) {
> +		if (split_pgtables[i])
> +			pagetable_free(split_pgtables[i]);
> +	}
> +
> +	kvfree(split_pgtables);
> +
> +skip_free:
> +	split_pgtables = NULL;
> +	split_pgtables_count = 0;
> +	split_pgtables_idx = 0;
> +}
> +
> +static int __init linear_map_prealloc_split_pgtables(void)
> +{
> +	int ret, i;
> +	unsigned long lstart = _PAGE_OFFSET(vabits_actual);
> +	unsigned long lend = PAGE_END;
> +	unsigned long kstart = (unsigned long)lm_alias(_stext);
> +	unsigned long kend = (unsigned long)lm_alias(__init_begin);
> +
> +	const struct mm_walk_ops collect_to_split_ops = {
> +		.pud_entry	= collect_to_split_pud_entry,
> +		.pmd_entry	= collect_to_split_pmd_entry
> +	};
> +
> +	split_pgtables_idx = 0;
> +	split_pgtables_count = 0;
> +
> +	ret = walk_kernel_page_table_range_lockless(lstart, kstart,
> +						    &collect_to_split_ops,
> +						    NULL, NULL);
> +	if (!ret)
> +		ret = walk_kernel_page_table_range_lockless(kend, lend,
> +							    &collect_to_split_ops,
> +							    NULL, NULL);
> +	if (ret || !split_pgtables_count)
> +		goto error;
> +
> +	ret = -ENOMEM;
> +
> +	split_pgtables = kvmalloc(split_pgtables_count * sizeof(struct ptdesc *),
> +				  GFP_KERNEL | __GFP_ZERO);
> +	if (!split_pgtables)
> +		goto error;
> +
> +	for (i = 0; i < split_pgtables_count; i++) {
> +		/* The page table will be filled during splitting, so zeroing it is unnecessary. */
> +		split_pgtables[i] = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
> +		if (!split_pgtables[i])
> +			goto error;
> +	}
> +
> +	ret = 0;
> +
> +error:
> +	if (ret)
> +		linear_map_free_split_pgtables();
> +
> +	return ret;
> +}
> +
>  static int __init linear_map_split_to_ptes(void *__unused)
>  {
>  	/*
> @@ -929,9 +1051,9 @@ static int __init linear_map_split_to_ptes(void *__unused)
>  		 * PTE. The kernel alias remains static throughout runtime so
>  		 * can continue to be safely mapped with large mappings.
>  		 */
> -		ret = range_split_to_ptes(lstart, kstart, GFP_ATOMIC);
> +		ret = range_split_to_ptes(lstart, kstart, pgd_pgtable_get_preallocated);
>  		if (!ret)
> -			ret = range_split_to_ptes(kend, lend, GFP_ATOMIC);
> +			ret = range_split_to_ptes(kend, lend, pgd_pgtable_get_preallocated);
>  		if (ret)
>  			panic("Failed to split linear map\n");
>  		flush_tlb_kernel_range(lstart, lend);
> @@ -964,10 +1086,16 @@ static int __init linear_map_split_to_ptes(void *__unused)
>  
>  void __init linear_map_maybe_split_to_ptes(void)
>  {
> -	if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort()) {
> -		init_idmap_kpti_bbml2_flag();
> -		stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
> -	}
> +	if (!linear_map_requires_bbml2 || system_supports_bbml2_noabort())
> +		return;
> +
> +	if (linear_map_prealloc_split_pgtables())
> +		panic("Failed to split linear map\n");
> +
> +	init_idmap_kpti_bbml2_flag();
> +	stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
> +
> +	linear_map_free_split_pgtables();
>  }
>  
>  /*
> @@ -1098,7 +1226,7 @@ bool arch_kfence_init_pool(void)
>  		return true;
>  
>  	mutex_lock(&pgtable_split_lock);
> -	ret = range_split_to_ptes(start, end, GFP_PGTABLE_KERNEL);
> +	ret = range_split_to_ptes(start, end, pgd_pgtable_alloc_init_mm);
>  	mutex_unlock(&pgtable_split_lock);
>  
>  	/*



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping
  2026-01-02 15:44   ` Ryan Roberts
@ 2026-01-02 16:14     ` Yeoreum Yun
  2026-01-02 17:04       ` Ryan Roberts
  0 siblings, 1 reply; 9+ messages in thread
From: Yeoreum Yun @ 2026-01-02 16:14 UTC (permalink / raw)
  To: Ryan Roberts
  Cc: catalin.marinas, will, akpm, david, kevin.brodsky, quic_zhenhuah,
	dev.jain, yang, chaitanyas.prakash, bigeasy, clrkwllms, rostedt,
	lorenzo.stoakes, ardb, jackmanb, vbabka, mhocko, linux-arm-kernel,
	linux-kernel, linux-rt-devel

Hi Ryan,

> > linear_map_split_to_ptes() currently allocates page tables while
> > splitting the linear mapping into PTEs under stop_machine() using GFP_ATOMIC.
> >
> > This is fine for non-PREEMPT_RT configurations.
> > However, it becomes problematic on PREEMPT_RT, because
> > generic memory allocation/free APIs (e.g. pgtable_alloc(), __get_free_pages(), etc.)
> > cannot be called from a non-preemptible context, except for the _nolock() variants.
> > This is because generic memory allocation/free paths are sleepable,
> > as they rely on spin_lock(), which becomes sleepable on PREEMPT_RT.
> >
> > In other words, even calling pgtable_alloc() with GFP_ATOMIC is not permitted
> > in __linear_map_split_to_pte() when it is executed by the stopper thread,
> >  where preemption is disabled on PREEMPT_RT.
> >
> > To address this, the required number of page tables is first collected
> > and preallocated, and the preallocated page tables are then used
> > when splitting the linear mapping in __linear_map_split_to_pte().
> >
> > Fixes: 3df6979d222b ("arm64: mm: split linear mapping if BBML2 unsupported on secondary CPUs")
> > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
>
> Looks good from my perspective.
>
> I have a couple more small comments below. With those addressed:
>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>
> > ---
> >  arch/arm64/mm/mmu.c | 204 +++++++++++++++++++++++++++++++++++---------
> >  1 file changed, 166 insertions(+), 38 deletions(-)
> >
> > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> > index 4b4908ae189b..cc086e91a506 100644
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -529,18 +529,14 @@ static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
> >  		panic("Failed to create page tables\n");
> >  }
> >
> > -static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
> > -				       enum pgtable_type pgtable_type)
> > -{
> > -	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
> > -	struct ptdesc *ptdesc = pagetable_alloc(gfp & ~__GFP_ZERO, 0);
> > -	phys_addr_t pa;
> > -
> > -	if (!ptdesc)
> > -		return INVALID_PHYS_ADDR;
> > -
> > -	pa = page_to_phys(ptdesc_page(ptdesc));
> > +static struct ptdesc **split_pgtables;
> > +static unsigned long split_pgtables_count;
> > +static unsigned long split_pgtables_idx;
>
> I think these could all be __initdata, if you make
> pgd_pgtable_get_preallocated() __init (see below) ?

I don't think so since range_split_to_ptes() couldn't be __init.
That's why there is warning while compiling below:

  WARNING: modpost: vmlinux: section mismatch in reference: range_split_to_ptes+0x3c (section: .text) -> pgd_pgtable_get_preallocated (section: .init.text)
>
> >
> > +static __always_inline void __pgd_pgtable_init(struct mm_struct *mm,
>
> Is there a reason for __always_inline? If not, I think it's preferable to just
> leave it static and let the compiler decide.

Okay. I'll remove __always_inline. Thanks.

>
> > +					       struct ptdesc *ptdesc,
> > +					       enum pgtable_type pgtable_type)
> > +{
> >  	switch (pgtable_type) {
> >  	case TABLE_PTE:
> >  		BUG_ON(!pagetable_pte_ctor(mm, ptdesc));
> > @@ -555,26 +551,49 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
> >  		pagetable_p4d_ctor(ptdesc);
> >  		break;
> >  	}
> > -
> > -	return pa;
> >  }
> >
> > -static phys_addr_t
> > -pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp)
> > +static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
> > +				       enum pgtable_type pgtable_type)
> >  {
> > -	return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type);
> > +	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
> > +	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
> > +
> > +	if (!ptdesc)
> > +		return INVALID_PHYS_ADDR;
> > +
> > +	__pgd_pgtable_init(mm, ptdesc, pgtable_type);
> > +
> > +	return page_to_phys(ptdesc_page(ptdesc));
> >  }
> >
> > -static phys_addr_t __maybe_unused
> > +static phys_addr_t
> >  pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
> >  {
> > -	return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL);
> > +	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
> >  }
> >
> >  static phys_addr_t
> >  pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
> >  {
> > -	return  __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type);
> > +	return  __pgd_pgtable_alloc(NULL, pgtable_type);
> > +}
> > +
> > +static phys_addr_t
> > +pgd_pgtable_get_preallocated(enum pgtable_type pgtable_type)
>
> I think this could probably be __init?

See above.

>
> > +{
> > +	struct ptdesc *ptdesc;
> > +
> > +	if (WARN_ON(split_pgtables_idx >= split_pgtables_count))
> > +		return INVALID_PHYS_ADDR;
> > +
> > +	ptdesc = split_pgtables[split_pgtables_idx++];
> > +	if (!ptdesc)
> > +		return INVALID_PHYS_ADDR;
> > +
> > +	__pgd_pgtable_init(&init_mm, ptdesc, pgtable_type);
> > +
> > +	return page_to_phys(ptdesc_page(ptdesc));
> >  }
> >
> >  static void split_contpte(pte_t *ptep)
> > @@ -586,7 +605,9 @@ static void split_contpte(pte_t *ptep)
> >  		__set_pte(ptep, pte_mknoncont(__ptep_get(ptep)));
> >  }
> >
> > -static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
> > +static int split_pmd(pmd_t *pmdp, pmd_t pmd,
> > +		     pgtable_alloc_t pgtable_alloc,
> > +		     bool to_cont)
>
> nit: this will easily fit in 2 lines and still be within 80 chars:

Okay. I'll change it.

>
> static int split_pmd(pmd_t *pmdp, pmd_t pmd, pgtable_alloc_t pgtable_alloc,
> 		     bool to_cont)
>
> >  {
> >  	pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
> >  	unsigned long pfn = pmd_pfn(pmd);
> > @@ -595,7 +616,7 @@ static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
> >  	pte_t *ptep;
> >  	int i;
> >
> > -	pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp);
> > +	pte_phys = pgtable_alloc(TABLE_PTE);
> >  	if (pte_phys == INVALID_PHYS_ADDR)
> >  		return -ENOMEM;
> >  	ptep = (pte_t *)phys_to_virt(pte_phys);
> > @@ -630,7 +651,9 @@ static void split_contpmd(pmd_t *pmdp)
> >  		set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp)));
> >  }
> >
> > -static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont)
> > +static int split_pud(pud_t *pudp, pud_t pud,
> > +		     pgtable_alloc_t pgtable_alloc,
> > +		     bool to_cont)
>
> nit: same comment.

Thanks for your review :D
BTW, except the __init related comments, Could I add R-b tag
after fixing others?

--
Sincerely,
Yeoreum Yun


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t
  2026-01-02 15:28   ` Ryan Roberts
@ 2026-01-02 16:16     ` Yeoreum Yun
  0 siblings, 0 replies; 9+ messages in thread
From: Yeoreum Yun @ 2026-01-02 16:16 UTC (permalink / raw)
  To: Ryan Roberts
  Cc: catalin.marinas, will, akpm, david, kevin.brodsky, quic_zhenhuah,
	dev.jain, yang, chaitanyas.prakash, bigeasy, clrkwllms, rostedt,
	lorenzo.stoakes, ardb, jackmanb, vbabka, mhocko, linux-arm-kernel,
	linux-kernel, linux-rt-devel

> On 02/01/2026 15:07, Yeoreum Yun wrote:
> > This is preparation patch to use preallocated page tables
> > for linear_map_split_to_ptes().
> >
> > Define pgtable_alloc_t type for callback used by create_XXX_mapping().
> >
> > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

Thanks ;)


>
> > ---
> >  arch/arm64/mm/mmu.c | 18 ++++++++++--------
> >  1 file changed, 10 insertions(+), 8 deletions(-)
> >
> > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> > index 8e1d80a7033e..4b4908ae189b 100644
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -74,6 +74,8 @@ EXPORT_SYMBOL(empty_zero_page);
> >  static DEFINE_SPINLOCK(swapper_pgdir_lock);
> >  static DEFINE_MUTEX(fixmap_lock);
> >
> > +typedef phys_addr_t (pgtable_alloc_t)(enum pgtable_type);
> > +
> >  void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
> >  {
> >  	pgd_t *fixmap_pgdp;
> > @@ -197,7 +199,7 @@ static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
> >  static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
> >  			       unsigned long end, phys_addr_t phys,
> >  			       pgprot_t prot,
> > -			       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +			       pgtable_alloc_t pgtable_alloc,
> >  			       int flags)
> >  {
> >  	unsigned long next;
> > @@ -252,7 +254,7 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
> >
> >  static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
> >  		    phys_addr_t phys, pgprot_t prot,
> > -		    phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags)
> > +		    pgtable_alloc_t pgtable_alloc, int flags)
> >  {
> >  	unsigned long next;
> >
> > @@ -292,7 +294,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
> >  static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
> >  			       unsigned long end, phys_addr_t phys,
> >  			       pgprot_t prot,
> > -			       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +			       pgtable_alloc_t pgtable_alloc,
> >  			       int flags)
> >  {
> >  	int ret;
> > @@ -349,7 +351,7 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
> >
> >  static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
> >  			  phys_addr_t phys, pgprot_t prot,
> > -			  phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +			  pgtable_alloc_t pgtable_alloc,
> >  			  int flags)
> >  {
> >  	int ret = 0;
> > @@ -415,7 +417,7 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
> >
> >  static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
> >  			  phys_addr_t phys, pgprot_t prot,
> > -			  phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +			  pgtable_alloc_t pgtable_alloc,
> >  			  int flags)
> >  {
> >  	int ret;
> > @@ -467,7 +469,7 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
> >  static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
> >  				       unsigned long virt, phys_addr_t size,
> >  				       pgprot_t prot,
> > -				       phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +				       pgtable_alloc_t pgtable_alloc,
> >  				       int flags)
> >  {
> >  	int ret;
> > @@ -500,7 +502,7 @@ static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
> >  static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
> >  				unsigned long virt, phys_addr_t size,
> >  				pgprot_t prot,
> > -				phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +				pgtable_alloc_t pgtable_alloc,
> >  				int flags)
> >  {
> >  	int ret;
> > @@ -516,7 +518,7 @@ static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
> >  static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
> >  				     unsigned long virt, phys_addr_t size,
> >  				     pgprot_t prot,
> > -				     phys_addr_t (*pgtable_alloc)(enum pgtable_type),
> > +				     pgtable_alloc_t pgtable_alloc,
> >  				     int flags)
> >  {
> >  	int ret;
>

--
Sincerely,
Yeoreum Yun


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping
  2026-01-02 16:14     ` Yeoreum Yun
@ 2026-01-02 17:04       ` Ryan Roberts
  0 siblings, 0 replies; 9+ messages in thread
From: Ryan Roberts @ 2026-01-02 17:04 UTC (permalink / raw)
  To: Yeoreum Yun
  Cc: catalin.marinas, will, akpm, david, kevin.brodsky, quic_zhenhuah,
	dev.jain, yang, chaitanyas.prakash, bigeasy, clrkwllms, rostedt,
	lorenzo.stoakes, ardb, jackmanb, vbabka, mhocko, linux-arm-kernel,
	linux-kernel, linux-rt-devel

On 02/01/2026 16:14, Yeoreum Yun wrote:
> Hi Ryan,
> 
>>> linear_map_split_to_ptes() currently allocates page tables while
>>> splitting the linear mapping into PTEs under stop_machine() using GFP_ATOMIC.
>>>
>>> This is fine for non-PREEMPT_RT configurations.
>>> However, it becomes problematic on PREEMPT_RT, because
>>> generic memory allocation/free APIs (e.g. pgtable_alloc(), __get_free_pages(), etc.)
>>> cannot be called from a non-preemptible context, except for the _nolock() variants.
>>> This is because generic memory allocation/free paths are sleepable,
>>> as they rely on spin_lock(), which becomes sleepable on PREEMPT_RT.
>>>
>>> In other words, even calling pgtable_alloc() with GFP_ATOMIC is not permitted
>>> in __linear_map_split_to_pte() when it is executed by the stopper thread,
>>>  where preemption is disabled on PREEMPT_RT.
>>>
>>> To address this, the required number of page tables is first collected
>>> and preallocated, and the preallocated page tables are then used
>>> when splitting the linear mapping in __linear_map_split_to_pte().
>>>
>>> Fixes: 3df6979d222b ("arm64: mm: split linear mapping if BBML2 unsupported on secondary CPUs")
>>> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
>>
>> Looks good from my perspective.
>>
>> I have a couple more small comments below. With those addressed:
>>
>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>
>>> ---
>>>  arch/arm64/mm/mmu.c | 204 +++++++++++++++++++++++++++++++++++---------
>>>  1 file changed, 166 insertions(+), 38 deletions(-)
>>>
>>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>>> index 4b4908ae189b..cc086e91a506 100644
>>> --- a/arch/arm64/mm/mmu.c
>>> +++ b/arch/arm64/mm/mmu.c
>>> @@ -529,18 +529,14 @@ static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
>>>  		panic("Failed to create page tables\n");
>>>  }
>>>
>>> -static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
>>> -				       enum pgtable_type pgtable_type)
>>> -{
>>> -	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
>>> -	struct ptdesc *ptdesc = pagetable_alloc(gfp & ~__GFP_ZERO, 0);
>>> -	phys_addr_t pa;
>>> -
>>> -	if (!ptdesc)
>>> -		return INVALID_PHYS_ADDR;
>>> -
>>> -	pa = page_to_phys(ptdesc_page(ptdesc));
>>> +static struct ptdesc **split_pgtables;
>>> +static unsigned long split_pgtables_count;
>>> +static unsigned long split_pgtables_idx;
>>
>> I think these could all be __initdata, if you make
>> pgd_pgtable_get_preallocated() __init (see below) ?
> 
> I don't think so since range_split_to_ptes() couldn't be __init.
> That's why there is warning while compiling below:
> 
>   WARNING: modpost: vmlinux: section mismatch in reference: range_split_to_ptes+0x3c (section: .text) -> pgd_pgtable_get_preallocated (section: .init.text)

Ahh ok, the compiler beats me :)

>>
>>>
>>> +static __always_inline void __pgd_pgtable_init(struct mm_struct *mm,
>>
>> Is there a reason for __always_inline? If not, I think it's preferable to just
>> leave it static and let the compiler decide.
> 
> Okay. I'll remove __always_inline. Thanks.
> 
>>
>>> +					       struct ptdesc *ptdesc,
>>> +					       enum pgtable_type pgtable_type)
>>> +{
>>>  	switch (pgtable_type) {
>>>  	case TABLE_PTE:
>>>  		BUG_ON(!pagetable_pte_ctor(mm, ptdesc));
>>> @@ -555,26 +551,49 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
>>>  		pagetable_p4d_ctor(ptdesc);
>>>  		break;
>>>  	}
>>> -
>>> -	return pa;
>>>  }
>>>
>>> -static phys_addr_t
>>> -pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp)
>>> +static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
>>> +				       enum pgtable_type pgtable_type)
>>>  {
>>> -	return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type);
>>> +	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
>>> +	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
>>> +
>>> +	if (!ptdesc)
>>> +		return INVALID_PHYS_ADDR;
>>> +
>>> +	__pgd_pgtable_init(mm, ptdesc, pgtable_type);
>>> +
>>> +	return page_to_phys(ptdesc_page(ptdesc));
>>>  }
>>>
>>> -static phys_addr_t __maybe_unused
>>> +static phys_addr_t
>>>  pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
>>>  {
>>> -	return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL);
>>> +	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
>>>  }
>>>
>>>  static phys_addr_t
>>>  pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
>>>  {
>>> -	return  __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type);
>>> +	return  __pgd_pgtable_alloc(NULL, pgtable_type);
>>> +}
>>> +
>>> +static phys_addr_t
>>> +pgd_pgtable_get_preallocated(enum pgtable_type pgtable_type)
>>
>> I think this could probably be __init?
> 
> See above.
> 
>>
>>> +{
>>> +	struct ptdesc *ptdesc;
>>> +
>>> +	if (WARN_ON(split_pgtables_idx >= split_pgtables_count))
>>> +		return INVALID_PHYS_ADDR;
>>> +
>>> +	ptdesc = split_pgtables[split_pgtables_idx++];
>>> +	if (!ptdesc)
>>> +		return INVALID_PHYS_ADDR;
>>> +
>>> +	__pgd_pgtable_init(&init_mm, ptdesc, pgtable_type);
>>> +
>>> +	return page_to_phys(ptdesc_page(ptdesc));
>>>  }
>>>
>>>  static void split_contpte(pte_t *ptep)
>>> @@ -586,7 +605,9 @@ static void split_contpte(pte_t *ptep)
>>>  		__set_pte(ptep, pte_mknoncont(__ptep_get(ptep)));
>>>  }
>>>
>>> -static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
>>> +static int split_pmd(pmd_t *pmdp, pmd_t pmd,
>>> +		     pgtable_alloc_t pgtable_alloc,
>>> +		     bool to_cont)
>>
>> nit: this will easily fit in 2 lines and still be within 80 chars:
> 
> Okay. I'll change it.
> 
>>
>> static int split_pmd(pmd_t *pmdp, pmd_t pmd, pgtable_alloc_t pgtable_alloc,
>> 		     bool to_cont)
>>
>>>  {
>>>  	pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
>>>  	unsigned long pfn = pmd_pfn(pmd);
>>> @@ -595,7 +616,7 @@ static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont)
>>>  	pte_t *ptep;
>>>  	int i;
>>>
>>> -	pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp);
>>> +	pte_phys = pgtable_alloc(TABLE_PTE);
>>>  	if (pte_phys == INVALID_PHYS_ADDR)
>>>  		return -ENOMEM;
>>>  	ptep = (pte_t *)phys_to_virt(pte_phys);
>>> @@ -630,7 +651,9 @@ static void split_contpmd(pmd_t *pmdp)
>>>  		set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp)));
>>>  }
>>>
>>> -static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont)
>>> +static int split_pud(pud_t *pudp, pud_t pud,
>>> +		     pgtable_alloc_t pgtable_alloc,
>>> +		     bool to_cont)
>>
>> nit: same comment.
> 
> Thanks for your review :D
> BTW, except the __init related comments, Could I add R-b tag
> after fixing others?

Yes, please add my R-b.

> 
> --
> Sincerely,
> Yeoreum Yun



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2026-01-02 17:04 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-01-02 15:07 [PATCH v4 0/3] fix wrong usage of memory allocation APIs under PREEMPT_RT in arm64 Yeoreum Yun
2026-01-02 15:07 ` [PATCH v4 1/3] arm64: mmu: introduce pgtable_alloc_t Yeoreum Yun
2026-01-02 15:28   ` Ryan Roberts
2026-01-02 16:16     ` Yeoreum Yun
2026-01-02 15:07 ` [PATCH v4 2/3] arm64: mmu: avoid allocating pages while splitting the linear mapping Yeoreum Yun
2026-01-02 15:44   ` Ryan Roberts
2026-01-02 16:14     ` Yeoreum Yun
2026-01-02 17:04       ` Ryan Roberts
2026-01-02 15:07 ` [PATCH v4 3/3] arm64: mmu: avoid allocating pages while installing ng-mapping for KPTI Yeoreum Yun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).