From: Uladzislau Rezki <urezki@gmail.com>
To: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
Michal Hocko <mhocko@kernel.org>, Baoquan He <bhe@redhat.com>,
LKML <linux-kernel@vger.kernel.org>,
stable@vger.kernel.org
Subject: Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
Date: Sun, 31 Aug 2025 14:12:23 +0200 [thread overview]
Message-ID: <aLQ8J2vuYi2POPsE@pc636> (raw)
In-Reply-To: <20250831121058.92971-1-urezki@gmail.com>
On Sun, Aug 31, 2025 at 02:10:58PM +0200, Uladzislau Rezki (Sony) wrote:
> kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> and always allocate memory using the hardcoded GFP_KERNEL flag. This
> makes them inconsistent with vmalloc(), which was recently extended to
> support GFP_NOFS and GFP_NOIO allocations.
>
> Page table allocations performed during shadow population also ignore
> the external gfp_mask. To preserve the intended semantics of GFP_NOFS
> and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
> memalloc scope.
>
> This patch:
> - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
> - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
> - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
> around apply_to_page_range();
> - Updates vmalloc.c and percpu allocator call sites accordingly.
>
> To: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: <stable@vger.kernel.org>
> Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
> include/linux/kasan.h | 6 +++---
> mm/kasan/shadow.c | 31 ++++++++++++++++++++++++-------
> mm/vmalloc.c | 8 ++++----
> 3 files changed, 31 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 890011071f2b..fe5ce9215821 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
> #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
>
> void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
> +int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
> void kasan_release_vmalloc(unsigned long start, unsigned long end,
> unsigned long free_region_start,
> unsigned long free_region_end,
> @@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
> unsigned long size)
> { }
> static inline int kasan_populate_vmalloc(unsigned long start,
> - unsigned long size)
> + unsigned long size, gfp_t gfp_mask)
> {
> return 0;
> }
> @@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
> static inline void kasan_populate_early_vm_area_shadow(void *start,
> unsigned long size) { }
> static inline int kasan_populate_vmalloc(unsigned long start,
> - unsigned long size)
> + unsigned long size, gfp_t gfp_mask)
> {
> return 0;
> }
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index d2c70cd2afb1..c7c0be119173 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
> }
> }
>
> -static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> +static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
> {
> unsigned long nr_populated, nr_total = nr_pages;
> struct page **page_array = pages;
>
> while (nr_pages) {
> - nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
> + nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
> if (!nr_populated) {
> ___free_pages_bulk(page_array, nr_total - nr_pages);
> return -ENOMEM;
> @@ -353,25 +353,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> return 0;
> }
>
> -static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
> {
> unsigned long nr_pages, nr_total = PFN_UP(end - start);
> struct vmalloc_populate_data data;
> + unsigned int flags;
> int ret = 0;
>
> - data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> + data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
> if (!data.pages)
> return -ENOMEM;
>
> while (nr_total) {
> nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
> - ret = ___alloc_pages_bulk(data.pages, nr_pages);
> + ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
> if (ret)
> break;
>
> data.start = start;
> +
> + /*
> + * page tables allocations ignore external gfp mask, enforce it
> + * by the scope API
> + */
> + if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> + flags = memalloc_nofs_save();
> + else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> + flags = memalloc_noio_save();
> +
> ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
> kasan_populate_vmalloc_pte, &data);
> +
> + if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> + memalloc_nofs_restore(flags);
> + else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> + memalloc_noio_restore(flags);
> +
> ___free_pages_bulk(data.pages, nr_pages);
> if (ret)
> break;
> @@ -385,7 +402,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> return ret;
> }
>
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
> +int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
> {
> unsigned long shadow_start, shadow_end;
> int ret;
> @@ -414,7 +431,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
> shadow_start = PAGE_ALIGN_DOWN(shadow_start);
> shadow_end = PAGE_ALIGN(shadow_end);
>
> - ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
> + ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
> if (ret)
> return ret;
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6dbcdceecae1..5edd536ba9d2 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> if (unlikely(!vmap_initialized))
> return ERR_PTR(-EBUSY);
>
> + /* Only reclaim behaviour flags are relevant. */
> + gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
> might_sleep();
>
> /*
> @@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> */
> va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
> if (!va) {
> - gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
> -
> va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
> if (unlikely(!va))
> return ERR_PTR(-ENOMEM);
> @@ -2089,7 +2089,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> BUG_ON(va->va_start < vstart);
> BUG_ON(va->va_end > vend);
>
> - ret = kasan_populate_vmalloc(addr, size);
> + ret = kasan_populate_vmalloc(addr, size, gfp_mask);
> if (ret) {
> free_vmap_area(va);
> return ERR_PTR(ret);
> @@ -4826,7 +4826,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>
> /* populate the kasan shadow space */
> for (area = 0; area < nr_vms; area++) {
> - if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
> + if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
> goto err_free_shadow;
> }
>
> --
> 2.47.2
>
+ Andrey Ryabinin <ryabinin.a.a@gmail.com>
--
Uladzislau Rezki
next prev parent reply other threads:[~2025-08-31 12:12 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-31 12:10 [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc() Uladzislau Rezki (Sony)
2025-08-31 12:12 ` Uladzislau Rezki [this message]
2025-08-31 19:24 ` Andrew Morton
2025-09-01 10:00 ` Uladzislau Rezki
2025-09-05 14:25 ` Andrey Ryabinin
2025-09-01 10:16 ` Baoquan He
2025-09-01 10:23 ` Uladzislau Rezki
2025-09-01 15:33 ` Baoquan He
2025-09-05 14:35 ` Andrey Ryabinin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aLQ8J2vuYi2POPsE@pc636 \
--to=urezki@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=bhe@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=ryabinin.a.a@gmail.com \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).