From: Yang Shi <yang@os.amperecomputing.com>
To: cl@gentwo.org, dennis@kernel.org, tj@kernel.org,
urezki@gmail.com, catalin.marinas@arm.com, will@kernel.org,
ryan.roberts@arm.com, david@kernel.org,
akpm@linux-foundation.org, hca@linux.ibm.com, gor@linux.ibm.com,
agordeev@linux.ibm.com
Cc: yang@os.amperecomputing.com, linux-mm@kvack.org,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org
Subject: [PATCH 08/11] vmalloc: pass in pgd pointer for vmap{__vunmap}_range_noflush()
Date: Wed, 29 Apr 2026 10:04:36 -0700 [thread overview]
Message-ID: <20260429170758.3018959-9-yang@os.amperecomputing.com> (raw)
In-Reply-To: <20260429170758.3018959-1-yang@os.amperecomputing.com>
vmap{__vunmap}_range_noflush() assume manipulate init_mm pgd. The
following patch will map percpu local mapping into percpu page table by
calling them, so the assumption will no longer stand. Make them take
pgd pointer as an parameter.
Also make vmap_range_noflush() non static, it will be called outside
vmalloc in the following patch.
There is no functional change.
Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
---
mm/internal.h | 5 ++++-
mm/kmsan/hooks.c | 14 +++++++-------
mm/vmalloc.c | 25 +++++++++++++------------
3 files changed, 24 insertions(+), 20 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 5a2ddcf68e0b..1e54945f8750 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1553,10 +1553,13 @@ void clear_vm_uninitialized_flag(struct vm_struct *vm);
int __must_check __vmap_pages_range_noflush(unsigned long addr,
unsigned long end, pgprot_t prot,
struct page **pages, unsigned int page_shift);
+int __must_check vmap_range_noflush(pgd_t *pgdir, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot, unsigned int max_page_shift);
void vunmap_range_noflush(unsigned long start, unsigned long end);
-void __vunmap_range_noflush(unsigned long start, unsigned long end);
+void __vunmap_range_noflush(pgd_t *pgdir, unsigned long start, unsigned long end);
static inline bool vma_is_single_threaded_private(struct vm_area_struct *vma)
{
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 8f22d1f22981..e2a0faf344b9 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -135,8 +135,8 @@ static unsigned long vmalloc_origin(unsigned long addr)
void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
{
- __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
- __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
+ __vunmap_range_noflush(init_mm.pgd, vmalloc_shadow(start), vmalloc_shadow(end));
+ __vunmap_range_noflush(init_mm.pgd, vmalloc_origin(start), vmalloc_origin(end));
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
}
@@ -181,7 +181,7 @@ int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
PAGE_SHIFT);
if (mapped) {
- __vunmap_range_noflush(
+ __vunmap_range_noflush(init_mm.pgd,
vmalloc_shadow(start + off),
vmalloc_shadow(start + off + PAGE_SIZE));
err = mapped;
@@ -203,10 +203,10 @@ int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
__free_pages(shadow, 1);
if (origin)
__free_pages(origin, 1);
- __vunmap_range_noflush(
+ __vunmap_range_noflush(init_mm.pgd,
vmalloc_shadow(start),
vmalloc_shadow(start + clean * PAGE_SIZE));
- __vunmap_range_noflush(
+ __vunmap_range_noflush(init_mm.pgd,
vmalloc_origin(start),
vmalloc_origin(start + clean * PAGE_SIZE));
}
@@ -233,8 +233,8 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
- __vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
- __vunmap_range_noflush(v_origin, vmalloc_origin(end));
+ __vunmap_range_noflush(init_mm.pgd, v_shadow, vmalloc_shadow(end));
+ __vunmap_range_noflush(init_mm.pgd, v_origin, vmalloc_origin(end));
if (shadow)
__free_pages(shadow, 1);
if (origin)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 068a6709062d..8ef7d9987e18 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -295,9 +295,9 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
return err;
}
-static int vmap_range_noflush(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift)
+int vmap_range_noflush(pgd_t *pgdir, unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
{
pgd_t *pgd;
unsigned long start;
@@ -314,7 +314,7 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
BUG_ON(addr >= end);
start = addr;
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset_pgd(pgdir, addr);
do {
next = pgd_addr_end(addr, end);
err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
@@ -334,8 +334,8 @@ int vmap_page_range(unsigned long addr, unsigned long end,
{
int err;
- err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
- ioremap_max_page_shift);
+ err = vmap_range_noflush(init_mm.pgd, addr, end, phys_addr,
+ pgprot_nx(prot), ioremap_max_page_shift);
flush_cache_vmap(addr, end);
if (!err)
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
@@ -478,7 +478,7 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
*
* This is an internal function only. Do not use outside mm/.
*/
-void __vunmap_range_noflush(unsigned long start, unsigned long end)
+void __vunmap_range_noflush(pgd_t *pgdir, unsigned long start, unsigned long end)
{
unsigned long next;
pgd_t *pgd;
@@ -486,7 +486,7 @@ void __vunmap_range_noflush(unsigned long start, unsigned long end)
pgtbl_mod_mask mask = 0;
BUG_ON(addr >= end);
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset_pgd(pgdir, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_bad(*pgd))
@@ -503,7 +503,7 @@ void __vunmap_range_noflush(unsigned long start, unsigned long end)
void vunmap_range_noflush(unsigned long start, unsigned long end)
{
kmsan_vunmap_range_noflush(start, end);
- __vunmap_range_noflush(start, end);
+ __vunmap_range_noflush(init_mm.pgd, start, end);
}
/**
@@ -670,9 +670,10 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
int err;
- err = vmap_range_noflush(addr, addr + (1UL << page_shift),
- page_to_phys(pages[i]), prot,
- page_shift);
+ err = vmap_range_noflush(init_mm.pgd, addr,
+ addr + (1UL << page_shift),
+ page_to_phys(pages[i]), prot,
+ page_shift);
if (err)
return err;
--
2.47.0
next prev parent reply other threads:[~2026-04-29 17:09 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-29 17:04 [RFC v1 PATCH 0/11] Optimize this_cpu_*() ops for non-x86 (ARM64 for this series) Yang Shi
2026-04-29 17:04 ` [PATCH 01/11] arm64: mm: enable percpu kernel page table Yang Shi
2026-04-29 17:04 ` [PATCH 02/11] arm64: mm: define percpu virtual space area Yang Shi
2026-04-29 17:04 ` [PATCH 03/11] arm64: smp: define setup_per_cpu_areas() Yang Shi
2026-04-29 17:04 ` [PATCH 04/11] mm: percpu: prepare to use dedicated percpu area Yang Shi
2026-04-29 17:04 ` [PATCH 05/11] arm64: mm: map local percpu first chunk Yang Shi
2026-04-29 17:04 ` [PATCH 06/11] mm: percpu: set up first chunk and reserve chunk Yang Shi
2026-04-29 17:04 ` [PATCH 07/11] arm64: mm: introduce __per_cpu_local_off Yang Shi
2026-04-29 17:04 ` Yang Shi [this message]
2026-04-29 17:04 ` [PATCH 09/11] mm: percpu: allocate and free local percpu vm area Yang Shi
2026-04-29 17:04 ` [PATCH 10/11] arm64: kconfig: select HAVE_LOCAL_PER_CPU_MAP Yang Shi
2026-04-29 17:04 ` [PATCH 11/11] arm64: percpu: use local percpu for this_cpu_*() APIs Yang Shi
2026-04-30 19:02 ` [RFC v1 PATCH 0/11] Optimize this_cpu_*() ops for non-x86 (ARM64 for this series) Yang Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260429170758.3018959-9-yang@os.amperecomputing.com \
--to=yang@os.amperecomputing.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=catalin.marinas@arm.com \
--cc=cl@gentwo.org \
--cc=david@kernel.org \
--cc=dennis@kernel.org \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ryan.roberts@arm.com \
--cc=tj@kernel.org \
--cc=urezki@gmail.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox