From: Yin Tirui <yintirui@huawei.com>
To: <akpm@linux-foundation.org>, <david@redhat.com>,
<lorenzo.stoakes@oracle.com>, <Liam.Howlett@oracle.com>,
<vbabka@suse.cz>, <rppt@kernel.org>, <surenb@google.com>,
<mhocko@suse.com>, <ziy@nvidia.com>,
<baolin.wang@linux.alibaba.com>, <npache@redhat.com>,
<ryan.roberts@arm.com>, <dev.jain@arm.com>, <baohua@kernel.org>,
<catalin.marinas@arm.com>, <will@kernel.org>,
<paul.walmsley@sifive.com>, <palmer@dabbelt.com>,
<aou@eecs.berkeley.edu>, <alex@ghiti.fr>,
<anshuman.khandual@arm.com>, <yangyicong@hisilicon.com>,
<ardb@kernel.org>, <willy@infradead.org>, <apopple@nvidia.com>,
<samuel.holland@sifive.com>, <luxu.kernel@bytedance.com>,
<abrestic@rivosinc.com>, <yongxuan.wang@sifive.com>,
<linux-mm@kvack.org>, <linux-kernel@vger.kernel.org>,
<linux-arm-kernel@lists.infradead.org>,
<linux-riscv@lists.infradead.org>
Cc: <wangkefeng.wang@huawei.com>, <chenjun102@huawei.com>,
<yintirui@huawei.com>
Subject: [PATCH RFC 2/2] mm: add PMD-level huge page support for remap_pfn_range()
Date: Tue, 23 Sep 2025 21:31:04 +0800 [thread overview]
Message-ID: <20250923133104.926672-3-yintirui@huawei.com> (raw)
In-Reply-To: <20250923133104.926672-1-yintirui@huawei.com>
Add PMD-level huge page support to remap_pfn_range(), automatically
creating huge mappings when prerequisites are satisfied (size, alignment,
architecture support, etc.) and falling back to normal page mappings
otherwise.
Implement special huge PMD splitting by utilizing the pgtable deposit/
withdraw mechanism. When splitting is needed, the deposited pgtable is
withdrawn and populated with individual PTEs created from the original
huge mapping, using pte_clrhuge() to clear huge page attributes.
Update arch_needs_pgtable_deposit() to return true when PMD pfnmap
support is enabled, ensuring proper pgtable management for huge
pfnmap operations.
Introduce pfnmap_max_page_shift parameter to control maximum page
size and "nohugepfnmap" boot option to disable huge pfnmap entirely.
Signed-off-by: Yin Tirui <yintirui@huawei.com>
---
include/linux/pgtable.h | 6 +++-
mm/huge_memory.c | 22 ++++++++----
mm/memory.c | 74 ++++++++++++++++++++++++++++++++++++-----
3 files changed, 85 insertions(+), 17 deletions(-)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 4c035637eeb7..4028318552ca 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1025,7 +1025,11 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
+#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
+static inline bool arch_needs_pgtable_deposit(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_SUPPORTS_PMD_PFNMAP);
+}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9c38a95e9f09..9f20adcbbb55 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2857,14 +2857,22 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
if (!vma_is_anonymous(vma)) {
old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
- /*
- * We are going to unmap this huge page. So
- * just go ahead and zap it
- */
- if (arch_needs_pgtable_deposit())
- zap_deposited_table(mm, pmd);
- if (!vma_is_dax(vma) && vma_is_special_huge(vma))
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
+ pte_t entry;
+
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ if (unlikely(!pgtable))
+ return;
+ pmd_populate(mm, &_pmd, pgtable);
+ pte = pte_offset_map(&_pmd, haddr);
+ entry = pte_clrhuge(pfn_pte(pmd_pfn(old_pmd), pmd_pgprot(old_pmd)));
+ set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
+ pte_unmap(pte);
+
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
return;
+ }
if (unlikely(is_pmd_migration_entry(old_pmd))) {
swp_entry_t entry;
diff --git a/mm/memory.c b/mm/memory.c
index 0ba4f6b71847..c4aaf3bd9cad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2674,6 +2674,19 @@ vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
return __vm_insert_mixed(vma, addr, pfn, true);
}
+#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
+static unsigned int __ro_after_init pfnmap_max_page_shift = BITS_PER_LONG - 1;
+
+static int __init set_nohugepfnmap(char *str)
+{
+ pfnmap_max_page_shift = PAGE_SHIFT;
+ return 0;
+}
+early_param("nohugepfnmap", set_nohugepfnmap);
+#else /* CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP */
+static const unsigned int pfnmap_max_page_shift = PAGE_SHIFT;
+#endif /* CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP */
+
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
@@ -2705,9 +2718,47 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
return err;
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static int remap_try_huge_pmd(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned long pfn, pgprot_t prot,
+ unsigned int page_shift)
+{
+ pgtable_t pgtable;
+ spinlock_t *ptl;
+
+ if (page_shift < PMD_SHIFT)
+ return 0;
+
+ if ((end - addr) != PMD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(addr, PMD_SIZE))
+ return 0;
+
+ if (!IS_ALIGNED(pfn, 1 << (PMD_SHIFT - PAGE_SHIFT)))
+ return 0;
+
+ if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
+ return 0;
+
+ set_pmd_at(mm, addr, pmd, pmd_mkspecial(pmd_mkhuge(pfn_pmd(pfn, prot))));
+
+ pgtable = pte_alloc_one(mm);
+ if (unlikely(!pgtable))
+ return 1;
+ mm_inc_nr_ptes(mm);
+ ptl = pmd_lock(mm, pmd);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ spin_unlock(ptl);
+
+ return 1;
+}
+#endif
+
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
- unsigned long pfn, pgprot_t prot)
+ unsigned long pfn, pgprot_t prot, unsigned int max_page_shift)
{
pmd_t *pmd;
unsigned long next;
@@ -2720,6 +2771,12 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ if (remap_try_huge_pmd(mm, pmd, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot, max_page_shift)) {
+ continue;
+ }
+#endif
err = remap_pte_range(mm, pmd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
@@ -2730,7 +2787,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
- unsigned long pfn, pgprot_t prot)
+ unsigned long pfn, pgprot_t prot, unsigned int max_page_shift)
{
pud_t *pud;
unsigned long next;
@@ -2743,7 +2800,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
do {
next = pud_addr_end(addr, end);
err = remap_pmd_range(mm, pud, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot);
+ pfn + (addr >> PAGE_SHIFT), prot, max_page_shift);
if (err)
return err;
} while (pud++, addr = next, addr != end);
@@ -2752,7 +2809,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
- unsigned long pfn, pgprot_t prot)
+ unsigned long pfn, pgprot_t prot, unsigned int max_page_shift)
{
p4d_t *p4d;
unsigned long next;
@@ -2765,7 +2822,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
do {
next = p4d_addr_end(addr, end);
err = remap_pud_range(mm, p4d, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot);
+ pfn + (addr >> PAGE_SHIFT), prot, max_page_shift);
if (err)
return err;
} while (p4d++, addr = next, addr != end);
@@ -2773,7 +2830,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
}
static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t prot)
+ unsigned long pfn, unsigned long size, pgprot_t prot, unsigned int max_page_shift)
{
pgd_t *pgd;
unsigned long next;
@@ -2817,7 +2874,7 @@ static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long ad
do {
next = pgd_addr_end(addr, end);
err = remap_p4d_range(mm, pgd, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot);
+ pfn + (addr >> PAGE_SHIFT), prot, max_page_shift);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
@@ -2832,8 +2889,7 @@ static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long ad
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
- int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
-
+ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot, pfnmap_max_page_shift);
if (!error)
return 0;
--
2.43.0
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
next prev parent reply other threads:[~2025-09-23 13:37 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-23 13:31 [PATCH RFC 0/2] mm: add huge pfnmap support for remap_pfn_range() Yin Tirui
2025-09-23 13:31 ` [PATCH RFC 1/2] pgtable: add pte_clrhuge() implementation for arm64 and riscv Yin Tirui
2025-09-23 13:31 ` Yin Tirui [this message]
2025-09-23 22:39 ` [PATCH RFC 2/2] mm: add PMD-level huge page support for remap_pfn_range() Matthew Wilcox
2025-09-25 2:17 ` Yin Tirui
2025-09-24 9:50 ` David Hildenbrand
2025-09-25 1:43 ` Yin Tirui
2025-09-25 9:38 ` David Hildenbrand
2025-09-23 22:53 ` [syzbot ci] Re: mm: add huge pfnmap " syzbot ci
-- strict thread matches above, loose matches on Subject: below --
2025-10-16 11:27 [PATCH RFC v2 0/2] " Yin Tirui
2025-10-16 11:27 ` [PATCH RFC 2/2] mm: add PMD-level huge page " Yin Tirui
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250923133104.926672-3-yintirui@huawei.com \
--to=yintirui@huawei.com \
--cc=Liam.Howlett@oracle.com \
--cc=abrestic@rivosinc.com \
--cc=akpm@linux-foundation.org \
--cc=alex@ghiti.fr \
--cc=anshuman.khandual@arm.com \
--cc=aou@eecs.berkeley.edu \
--cc=apopple@nvidia.com \
--cc=ardb@kernel.org \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=catalin.marinas@arm.com \
--cc=chenjun102@huawei.com \
--cc=david@redhat.com \
--cc=dev.jain@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=luxu.kernel@bytedance.com \
--cc=mhocko@suse.com \
--cc=npache@redhat.com \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=samuel.holland@sifive.com \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=wangkefeng.wang@huawei.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=yangyicong@hisilicon.com \
--cc=yongxuan.wang@sifive.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox