linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
@ 2025-08-31 12:10 Uladzislau Rezki (Sony)
  2025-08-31 12:12 ` Uladzislau Rezki
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Uladzislau Rezki (Sony) @ 2025-08-31 12:10 UTC (permalink / raw)
  To: linux-mm, Andrew Morton
  Cc: Michal Hocko, Baoquan He, LKML, Uladzislau Rezki, stable

kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
and always allocate memory using the hardcoded GFP_KERNEL flag. This
makes them inconsistent with vmalloc(), which was recently extended to
support GFP_NOFS and GFP_NOIO allocations.

Page table allocations performed during shadow population also ignore
the external gfp_mask. To preserve the intended semantics of GFP_NOFS
and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
memalloc scope.

This patch:
 - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
 - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
 - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
   around apply_to_page_range();
 - Updates vmalloc.c and percpu allocator call sites accordingly.

To: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: <stable@vger.kernel.org>
Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 include/linux/kasan.h |  6 +++---
 mm/kasan/shadow.c     | 31 ++++++++++++++++++++++++-------
 mm/vmalloc.c          |  8 ++++----
 3 files changed, 31 insertions(+), 14 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 890011071f2b..fe5ce9215821 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 
 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
 void kasan_release_vmalloc(unsigned long start, unsigned long end,
 			   unsigned long free_region_start,
 			   unsigned long free_region_end,
@@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
 						       unsigned long size)
 { }
 static inline int kasan_populate_vmalloc(unsigned long start,
-					unsigned long size)
+					unsigned long size, gfp_t gfp_mask)
 {
 	return 0;
 }
@@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
 static inline void kasan_populate_early_vm_area_shadow(void *start,
 						       unsigned long size) { }
 static inline int kasan_populate_vmalloc(unsigned long start,
-					unsigned long size)
+					unsigned long size, gfp_t gfp_mask)
 {
 	return 0;
 }
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d2c70cd2afb1..c7c0be119173 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
 	}
 }
 
-static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
+static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
 {
 	unsigned long nr_populated, nr_total = nr_pages;
 	struct page **page_array = pages;
 
 	while (nr_pages) {
-		nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
+		nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
 		if (!nr_populated) {
 			___free_pages_bulk(page_array, nr_total - nr_pages);
 			return -ENOMEM;
@@ -353,25 +353,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
 	return 0;
 }
 
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
+static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
 {
 	unsigned long nr_pages, nr_total = PFN_UP(end - start);
 	struct vmalloc_populate_data data;
+	unsigned int flags;
 	int ret = 0;
 
-	data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+	data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
 	if (!data.pages)
 		return -ENOMEM;
 
 	while (nr_total) {
 		nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
-		ret = ___alloc_pages_bulk(data.pages, nr_pages);
+		ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
 		if (ret)
 			break;
 
 		data.start = start;
+
+		/*
+		 * page tables allocations ignore external gfp mask, enforce it
+		 * by the scope API
+		 */
+		if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+			flags = memalloc_nofs_save();
+		else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+			flags = memalloc_noio_save();
+
 		ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
 					  kasan_populate_vmalloc_pte, &data);
+
+		if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+			memalloc_nofs_restore(flags);
+		else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+			memalloc_noio_restore(flags);
+
 		___free_pages_bulk(data.pages, nr_pages);
 		if (ret)
 			break;
@@ -385,7 +402,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
 	return ret;
 }
 
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
 {
 	unsigned long shadow_start, shadow_end;
 	int ret;
@@ -414,7 +431,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
 	shadow_end = PAGE_ALIGN(shadow_end);
 
-	ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
+	ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
 	if (ret)
 		return ret;
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6dbcdceecae1..5edd536ba9d2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 	if (unlikely(!vmap_initialized))
 		return ERR_PTR(-EBUSY);
 
+	/* Only reclaim behaviour flags are relevant. */
+	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
 	might_sleep();
 
 	/*
@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 	 */
 	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
 	if (!va) {
-		gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
-
 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
 		if (unlikely(!va))
 			return ERR_PTR(-ENOMEM);
@@ -2089,7 +2089,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 	BUG_ON(va->va_start < vstart);
 	BUG_ON(va->va_end > vend);
 
-	ret = kasan_populate_vmalloc(addr, size);
+	ret = kasan_populate_vmalloc(addr, size, gfp_mask);
 	if (ret) {
 		free_vmap_area(va);
 		return ERR_PTR(ret);
@@ -4826,7 +4826,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 
 	/* populate the kasan shadow space */
 	for (area = 0; area < nr_vms; area++) {
-		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
+		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
 			goto err_free_shadow;
 	}
 
-- 
2.47.2



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  2025-08-31 12:10 [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc() Uladzislau Rezki (Sony)
@ 2025-08-31 12:12 ` Uladzislau Rezki
  2025-08-31 19:24 ` Andrew Morton
  2025-09-01 10:16 ` Baoquan He
  2 siblings, 0 replies; 7+ messages in thread
From: Uladzislau Rezki @ 2025-08-31 12:12 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: linux-mm, Andrew Morton, Michal Hocko, Baoquan He, LKML, stable

On Sun, Aug 31, 2025 at 02:10:58PM +0200, Uladzislau Rezki (Sony) wrote:
> kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> and always allocate memory using the hardcoded GFP_KERNEL flag. This
> makes them inconsistent with vmalloc(), which was recently extended to
> support GFP_NOFS and GFP_NOIO allocations.
> 
> Page table allocations performed during shadow population also ignore
> the external gfp_mask. To preserve the intended semantics of GFP_NOFS
> and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
> memalloc scope.
> 
> This patch:
>  - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
>  - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
>  - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
>    around apply_to_page_range();
>  - Updates vmalloc.c and percpu allocator call sites accordingly.
> 
> To: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: <stable@vger.kernel.org>
> Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  include/linux/kasan.h |  6 +++---
>  mm/kasan/shadow.c     | 31 ++++++++++++++++++++++++-------
>  mm/vmalloc.c          |  8 ++++----
>  3 files changed, 31 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 890011071f2b..fe5ce9215821 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
>  #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
>  
>  void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
> +int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
>  void kasan_release_vmalloc(unsigned long start, unsigned long end,
>  			   unsigned long free_region_start,
>  			   unsigned long free_region_end,
> @@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
>  						       unsigned long size)
>  { }
>  static inline int kasan_populate_vmalloc(unsigned long start,
> -					unsigned long size)
> +					unsigned long size, gfp_t gfp_mask)
>  {
>  	return 0;
>  }
> @@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
>  static inline void kasan_populate_early_vm_area_shadow(void *start,
>  						       unsigned long size) { }
>  static inline int kasan_populate_vmalloc(unsigned long start,
> -					unsigned long size)
> +					unsigned long size, gfp_t gfp_mask)
>  {
>  	return 0;
>  }
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index d2c70cd2afb1..c7c0be119173 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
>  	}
>  }
>  
> -static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> +static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
>  {
>  	unsigned long nr_populated, nr_total = nr_pages;
>  	struct page **page_array = pages;
>  
>  	while (nr_pages) {
> -		nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
> +		nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
>  		if (!nr_populated) {
>  			___free_pages_bulk(page_array, nr_total - nr_pages);
>  			return -ENOMEM;
> @@ -353,25 +353,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
>  	return 0;
>  }
>  
> -static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
>  {
>  	unsigned long nr_pages, nr_total = PFN_UP(end - start);
>  	struct vmalloc_populate_data data;
> +	unsigned int flags;
>  	int ret = 0;
>  
> -	data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> +	data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
>  	if (!data.pages)
>  		return -ENOMEM;
>  
>  	while (nr_total) {
>  		nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
> -		ret = ___alloc_pages_bulk(data.pages, nr_pages);
> +		ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
>  		if (ret)
>  			break;
>  
>  		data.start = start;
> +
> +		/*
> +		 * page tables allocations ignore external gfp mask, enforce it
> +		 * by the scope API
> +		 */
> +		if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> +			flags = memalloc_nofs_save();
> +		else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> +			flags = memalloc_noio_save();
> +
>  		ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
>  					  kasan_populate_vmalloc_pte, &data);
> +
> +		if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> +			memalloc_nofs_restore(flags);
> +		else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> +			memalloc_noio_restore(flags);
> +
>  		___free_pages_bulk(data.pages, nr_pages);
>  		if (ret)
>  			break;
> @@ -385,7 +402,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
>  	return ret;
>  }
>  
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
> +int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
>  {
>  	unsigned long shadow_start, shadow_end;
>  	int ret;
> @@ -414,7 +431,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
>  	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
>  	shadow_end = PAGE_ALIGN(shadow_end);
>  
> -	ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
> +	ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
>  	if (ret)
>  		return ret;
>  
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6dbcdceecae1..5edd536ba9d2 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	if (unlikely(!vmap_initialized))
>  		return ERR_PTR(-EBUSY);
>  
> +	/* Only reclaim behaviour flags are relevant. */
> +	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
>  	might_sleep();
>  
>  	/*
> @@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	 */
>  	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
>  	if (!va) {
> -		gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
> -
>  		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
>  		if (unlikely(!va))
>  			return ERR_PTR(-ENOMEM);
> @@ -2089,7 +2089,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	BUG_ON(va->va_start < vstart);
>  	BUG_ON(va->va_end > vend);
>  
> -	ret = kasan_populate_vmalloc(addr, size);
> +	ret = kasan_populate_vmalloc(addr, size, gfp_mask);
>  	if (ret) {
>  		free_vmap_area(va);
>  		return ERR_PTR(ret);
> @@ -4826,7 +4826,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>  
>  	/* populate the kasan shadow space */
>  	for (area = 0; area < nr_vms; area++) {
> -		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
> +		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
>  			goto err_free_shadow;
>  	}
>  
> -- 
> 2.47.2
> 
+ Andrey Ryabinin <ryabinin.a.a@gmail.com>

--
Uladzislau Rezki


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  2025-08-31 12:10 [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc() Uladzislau Rezki (Sony)
  2025-08-31 12:12 ` Uladzislau Rezki
@ 2025-08-31 19:24 ` Andrew Morton
  2025-09-01 10:00   ` Uladzislau Rezki
  2025-09-01 10:16 ` Baoquan He
  2 siblings, 1 reply; 7+ messages in thread
From: Andrew Morton @ 2025-08-31 19:24 UTC (permalink / raw)
  To: Uladzislau Rezki (Sony); +Cc: linux-mm, Michal Hocko, Baoquan He, LKML, stable

On Sun, 31 Aug 2025 14:10:58 +0200 "Uladzislau Rezki (Sony)" <urezki@gmail.com> wrote:

> kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> and always allocate memory using the hardcoded GFP_KERNEL flag. This
> makes them inconsistent with vmalloc(), which was recently extended to
> support GFP_NOFS and GFP_NOIO allocations.
> 
> Page table allocations performed during shadow population also ignore
> the external gfp_mask. To preserve the intended semantics of GFP_NOFS
> and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
> memalloc scope.
> 
> This patch:
>  - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
>  - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
>  - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
>    around apply_to_page_range();
>  - Updates vmalloc.c and percpu allocator call sites accordingly.
> 
> To: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: <stable@vger.kernel.org>
> Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

Why cc:stable?

To justify this we'll need a description of the userspace visible
effects of the bug please.  We should always provide this information
when fixing something.  Or when adding something.  Basically, all the
time ;)

Thanks.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  2025-08-31 19:24 ` Andrew Morton
@ 2025-09-01 10:00   ` Uladzislau Rezki
  0 siblings, 0 replies; 7+ messages in thread
From: Uladzislau Rezki @ 2025-09-01 10:00 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Uladzislau Rezki (Sony), linux-mm, Michal Hocko, Baoquan He, LKML,
	stable

On Sun, Aug 31, 2025 at 12:24:10PM -0700, Andrew Morton wrote:
> On Sun, 31 Aug 2025 14:10:58 +0200 "Uladzislau Rezki (Sony)" <urezki@gmail.com> wrote:
> 
> > kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> > and always allocate memory using the hardcoded GFP_KERNEL flag. This
> > makes them inconsistent with vmalloc(), which was recently extended to
> > support GFP_NOFS and GFP_NOIO allocations.
> > 
> > Page table allocations performed during shadow population also ignore
> > the external gfp_mask. To preserve the intended semantics of GFP_NOFS
> > and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
> > memalloc scope.
> > 
> > This patch:
> >  - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
> >  - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
> >  - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
> >    around apply_to_page_range();
> >  - Updates vmalloc.c and percpu allocator call sites accordingly.
> > 
> > To: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> > Cc: <stable@vger.kernel.org>
> > Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> 
> Why cc:stable?
> 
> To justify this we'll need a description of the userspace visible
> effects of the bug please.  We should always provide this information
> when fixing something.  Or when adding something.  Basically, all the
> time ;)
> 
Yes, i am not aware about any report. I was thinking more about that
"mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc" was incomplete and thus
is a good candidate for stable.

We can drop it for the stable until there are some reports from people.
If there are :)

Thanks!

--
Uladzislau Rezki


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  2025-08-31 12:10 [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc() Uladzislau Rezki (Sony)
  2025-08-31 12:12 ` Uladzislau Rezki
  2025-08-31 19:24 ` Andrew Morton
@ 2025-09-01 10:16 ` Baoquan He
  2025-09-01 10:23   ` Uladzislau Rezki
  2 siblings, 1 reply; 7+ messages in thread
From: Baoquan He @ 2025-09-01 10:16 UTC (permalink / raw)
  To: Uladzislau Rezki (Sony)
  Cc: linux-mm, Andrew Morton, Michal Hocko, LKML, stable

Hi Uladzislau,

On 08/31/25 at 02:10pm, Uladzislau Rezki (Sony) wrote:
> kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> and always allocate memory using the hardcoded GFP_KERNEL flag. This
> makes them inconsistent with vmalloc(), which was recently extended to
> support GFP_NOFS and GFP_NOIO allocations.

Is this patch on top of your patchset "[PATCH 0/8] __vmalloc() and no-block
support"? Or it is a replacement of "[PATCH 5/8] mm/kasan, mm/vmalloc: Respect
GFP flags in kasan_populate_vmalloc()" in the patchset?

I may not get their relationship clearly.

Thanks
Baoquan



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  2025-09-01 10:16 ` Baoquan He
@ 2025-09-01 10:23   ` Uladzislau Rezki
  2025-09-01 15:33     ` Baoquan He
  0 siblings, 1 reply; 7+ messages in thread
From: Uladzislau Rezki @ 2025-09-01 10:23 UTC (permalink / raw)
  To: Baoquan He
  Cc: Uladzislau Rezki (Sony), linux-mm, Andrew Morton, Michal Hocko,
	LKML, stable

On Mon, Sep 01, 2025 at 06:16:41PM +0800, Baoquan He wrote:
> Hi Uladzislau,
> 
> On 08/31/25 at 02:10pm, Uladzislau Rezki (Sony) wrote:
> > kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> > and always allocate memory using the hardcoded GFP_KERNEL flag. This
> > makes them inconsistent with vmalloc(), which was recently extended to
> > support GFP_NOFS and GFP_NOIO allocations.
> 
> Is this patch on top of your patchset "[PATCH 0/8] __vmalloc() and no-block
> support"? Or it is a replacement of "[PATCH 5/8] mm/kasan, mm/vmalloc: Respect
> GFP flags in kasan_populate_vmalloc()" in the patchset?
> 
> I may not get their relationship clearly.
> 
It is out of series which i posted to support no-block for vmalloc. 
I will base a new version based on this patch because it is rather
a fix.

It is to address and complete GFP_NOFS/GFP_NOIO flags for vmalloc.

--
Uladzislau Rezki


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  2025-09-01 10:23   ` Uladzislau Rezki
@ 2025-09-01 15:33     ` Baoquan He
  0 siblings, 0 replies; 7+ messages in thread
From: Baoquan He @ 2025-09-01 15:33 UTC (permalink / raw)
  To: Uladzislau Rezki; +Cc: linux-mm, Andrew Morton, Michal Hocko, LKML, stable

On 09/01/25 at 12:23pm, Uladzislau Rezki wrote:
> On Mon, Sep 01, 2025 at 06:16:41PM +0800, Baoquan He wrote:
> > Hi Uladzislau,
> > 
> > On 08/31/25 at 02:10pm, Uladzislau Rezki (Sony) wrote:
> > > kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> > > and always allocate memory using the hardcoded GFP_KERNEL flag. This
> > > makes them inconsistent with vmalloc(), which was recently extended to
> > > support GFP_NOFS and GFP_NOIO allocations.
> > 
> > Is this patch on top of your patchset "[PATCH 0/8] __vmalloc() and no-block
> > support"? Or it is a replacement of "[PATCH 5/8] mm/kasan, mm/vmalloc: Respect
> > GFP flags in kasan_populate_vmalloc()" in the patchset?
> > 
> > I may not get their relationship clearly.
> > 
> It is out of series which i posted to support no-block for vmalloc. 
> I will base a new version based on this patch because it is rather
> a fix.
> 
> It is to address and complete GFP_NOFS/GFP_NOIO flags for vmalloc.

I got it now, thanks a lot.



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-09-01 15:33 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-31 12:10 [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc() Uladzislau Rezki (Sony)
2025-08-31 12:12 ` Uladzislau Rezki
2025-08-31 19:24 ` Andrew Morton
2025-09-01 10:00   ` Uladzislau Rezki
2025-09-01 10:16 ` Baoquan He
2025-09-01 10:23   ` Uladzislau Rezki
2025-09-01 15:33     ` Baoquan He

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).