linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
@ 2025-08-30  2:09 yangshiguang1011
  2025-09-01  7:50 ` David Rientjes
  0 siblings, 1 reply; 7+ messages in thread
From: yangshiguang1011 @ 2025-08-30  2:09 UTC (permalink / raw)
  To: harry.yoo
  Cc: vbabka, akpm, cl, rientjes, roman.gushchin, glittao, linux-mm,
	linux-kernel, yangshiguang, stable

From: yangshiguang <yangshiguang@xiaomi.com>

From: yangshiguang <yangshiguang@xiaomi.com>

set_track_prepare() can incur lock recursion.
The issue is that it is called from hrtimer_start_range_ns
holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
and try to hold the per_cpu(hrtimer_bases)[n].lock.

Avoid deadlock caused by implicitly waking up kswapd by
passing in allocation flags. And the slab caller context has
preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.

The oops looks something like:

BUG: spinlock recursion on CPU#3, swapper/3/0
 lock: 0xffffff8a4bf29c80, .magic: dead4ead, .owner: swapper/3/0, .owner_cpu: 3
Hardware name: Qualcomm Technologies, Inc. Popsicle based on SM8850 (DT)
Call trace:
spin_bug+0x0
_raw_spin_lock_irqsave+0x80
hrtimer_try_to_cancel+0x94
task_contending+0x10c
enqueue_dl_entity+0x2a4
dl_server_start+0x74
enqueue_task_fair+0x568
enqueue_task+0xac
do_activate_task+0x14c
ttwu_do_activate+0xcc
try_to_wake_up+0x6c8
default_wake_function+0x20
autoremove_wake_function+0x1c
__wake_up+0xac
wakeup_kswapd+0x19c
wake_all_kswapds+0x78
__alloc_pages_slowpath+0x1ac
__alloc_pages_noprof+0x298
stack_depot_save_flags+0x6b0
stack_depot_save+0x14
set_track_prepare+0x5c
___slab_alloc+0xccc
__kmalloc_cache_noprof+0x470
__set_page_owner+0x2bc
post_alloc_hook[jt]+0x1b8
prep_new_page+0x28
get_page_from_freelist+0x1edc
__alloc_pages_noprof+0x13c
alloc_slab_page+0x244
allocate_slab+0x7c
___slab_alloc+0x8e8
kmem_cache_alloc_noprof+0x450
debug_objects_fill_pool+0x22c
debug_object_activate+0x40
enqueue_hrtimer[jt]+0xdc
hrtimer_start_range_ns+0x5f8
...

Signed-off-by: yangshiguang <yangshiguang@xiaomi.com>
Fixes: 5cf909c553e9 ("mm/slub: use stackdepot to save stack trace in objects")
Cc: stable@vger.kernel.org
---

v1 -> v2:
    propagate gfp flags to set_track_prepare()
v2 -> v3:
    Remove the gfp restriction in set_track_prepare()
v3 -> v4:
    Re-describe the comments in set_track_prepare.

[1]https://lore.kernel.org/all/20250801065121.876793-1-yangshiguang1011@163.com/
[2]https://lore.kernel.org/all/20250814111641.380629-2-yangshiguang1011@163.com/
[3]https://lore.kernel.org/all/20250825121737.2535732-1-yangshiguang1011@163.com/
---
 mm/slub.c | 26 ++++++++++++++++----------
 1 file changed, 16 insertions(+), 10 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 30003763d224..b0af51a5321b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -962,19 +962,25 @@ static struct track *get_track(struct kmem_cache *s, void *object,
 }
 
 #ifdef CONFIG_STACKDEPOT
-static noinline depot_stack_handle_t set_track_prepare(void)
+static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
 {
 	depot_stack_handle_t handle;
 	unsigned long entries[TRACK_ADDRS_COUNT];
 	unsigned int nr_entries;
+	/*
+	 * Preemption is disabled in ___slab_alloc() so we need to disallow
+	 * blocking. The flags are further adjusted by gfp_nested_mask() in
+	 * stack_depot itself.
+	 */
+	gfp_flags &= ~(__GFP_DIRECT_RECLAIM);
 
 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
-	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+	handle = stack_depot_save(entries, nr_entries, gfp_flags);
 
 	return handle;
 }
 #else
-static inline depot_stack_handle_t set_track_prepare(void)
+static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
 {
 	return 0;
 }
@@ -996,9 +1002,9 @@ static void set_track_update(struct kmem_cache *s, void *object,
 }
 
 static __always_inline void set_track(struct kmem_cache *s, void *object,
-				      enum track_item alloc, unsigned long addr)
+				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
 {
-	depot_stack_handle_t handle = set_track_prepare();
+	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
 
 	set_track_update(s, object, alloc, addr, handle);
 }
@@ -1921,9 +1927,9 @@ static inline bool free_debug_processing(struct kmem_cache *s,
 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
 static inline int check_object(struct kmem_cache *s, struct slab *slab,
 			void *object, u8 val) { return 1; }
-static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
+static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
 static inline void set_track(struct kmem_cache *s, void *object,
-			     enum track_item alloc, unsigned long addr) {}
+			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
 					struct slab *slab) {}
 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
@@ -3878,7 +3884,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			 * tracking info and return the object.
 			 */
 			if (s->flags & SLAB_STORE_USER)
-				set_track(s, freelist, TRACK_ALLOC, addr);
+				set_track(s, freelist, TRACK_ALLOC, addr, gfpflags);
 
 			return freelist;
 		}
@@ -3910,7 +3916,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			goto new_objects;
 
 		if (s->flags & SLAB_STORE_USER)
-			set_track(s, freelist, TRACK_ALLOC, addr);
+			set_track(s, freelist, TRACK_ALLOC, addr, gfpflags);
 
 		return freelist;
 	}
@@ -4422,7 +4428,7 @@ static noinline void free_to_partial_list(
 	depot_stack_handle_t handle = 0;
 
 	if (s->flags & SLAB_STORE_USER)
-		handle = set_track_prepare();
+		handle = set_track_prepare(__GFP_NOWARN);
 
 	spin_lock_irqsave(&n->list_lock, flags);
 
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
  2025-08-30  2:09 [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare yangshiguang1011
@ 2025-09-01  7:50 ` David Rientjes
  2025-09-01  8:15   ` Vlastimil Babka
  0 siblings, 1 reply; 7+ messages in thread
From: David Rientjes @ 2025-09-01  7:50 UTC (permalink / raw)
  To: yangshiguang1011
  Cc: harry.yoo, vbabka, akpm, cl, roman.gushchin, glittao, linux-mm,
	linux-kernel, yangshiguang, stable

On Sat, 30 Aug 2025, yangshiguang1011@163.com wrote:

> From: yangshiguang <yangshiguang@xiaomi.com>
> 
> From: yangshiguang <yangshiguang@xiaomi.com>
> 

Duplicate lines.

> set_track_prepare() can incur lock recursion.
> The issue is that it is called from hrtimer_start_range_ns
> holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
> CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
> and try to hold the per_cpu(hrtimer_bases)[n].lock.
> 
> Avoid deadlock caused by implicitly waking up kswapd by
> passing in allocation flags. And the slab caller context has
> preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.
> 

This mentions __GFP_KSWAPD_RECLAIM, but the patch actually masks off 
__GFP_DIRECT_RECLAIM which would be a heavierweight operation.  Disabling 
direct reclaim does not necessarily imply that kswapd will be disabled as 
well.

Are you meaning to clear __GFP_RECLAIM in set_track_prepare()?

> The oops looks something like:
> 
> BUG: spinlock recursion on CPU#3, swapper/3/0
>  lock: 0xffffff8a4bf29c80, .magic: dead4ead, .owner: swapper/3/0, .owner_cpu: 3
> Hardware name: Qualcomm Technologies, Inc. Popsicle based on SM8850 (DT)
> Call trace:
> spin_bug+0x0
> _raw_spin_lock_irqsave+0x80
> hrtimer_try_to_cancel+0x94
> task_contending+0x10c
> enqueue_dl_entity+0x2a4
> dl_server_start+0x74
> enqueue_task_fair+0x568
> enqueue_task+0xac
> do_activate_task+0x14c
> ttwu_do_activate+0xcc
> try_to_wake_up+0x6c8
> default_wake_function+0x20
> autoremove_wake_function+0x1c
> __wake_up+0xac
> wakeup_kswapd+0x19c
> wake_all_kswapds+0x78
> __alloc_pages_slowpath+0x1ac
> __alloc_pages_noprof+0x298
> stack_depot_save_flags+0x6b0
> stack_depot_save+0x14
> set_track_prepare+0x5c
> ___slab_alloc+0xccc
> __kmalloc_cache_noprof+0x470
> __set_page_owner+0x2bc
> post_alloc_hook[jt]+0x1b8
> prep_new_page+0x28
> get_page_from_freelist+0x1edc
> __alloc_pages_noprof+0x13c
> alloc_slab_page+0x244
> allocate_slab+0x7c
> ___slab_alloc+0x8e8
> kmem_cache_alloc_noprof+0x450
> debug_objects_fill_pool+0x22c
> debug_object_activate+0x40
> enqueue_hrtimer[jt]+0xdc
> hrtimer_start_range_ns+0x5f8
> ...
> 
> Signed-off-by: yangshiguang <yangshiguang@xiaomi.com>
> Fixes: 5cf909c553e9 ("mm/slub: use stackdepot to save stack trace in objects")
> Cc: stable@vger.kernel.org
> ---
> 
> v1 -> v2:
>     propagate gfp flags to set_track_prepare()
> v2 -> v3:
>     Remove the gfp restriction in set_track_prepare()
> v3 -> v4:
>     Re-describe the comments in set_track_prepare.
> 
> [1]https://lore.kernel.org/all/20250801065121.876793-1-yangshiguang1011@163.com/
> [2]https://lore.kernel.org/all/20250814111641.380629-2-yangshiguang1011@163.com/
> [3]https://lore.kernel.org/all/20250825121737.2535732-1-yangshiguang1011@163.com/
> ---
>  mm/slub.c | 26 ++++++++++++++++----------
>  1 file changed, 16 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 30003763d224..b0af51a5321b 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -962,19 +962,25 @@ static struct track *get_track(struct kmem_cache *s, void *object,
>  }
>  
>  #ifdef CONFIG_STACKDEPOT
> -static noinline depot_stack_handle_t set_track_prepare(void)
> +static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
>  {
>  	depot_stack_handle_t handle;
>  	unsigned long entries[TRACK_ADDRS_COUNT];
>  	unsigned int nr_entries;
> +	/*
> +	 * Preemption is disabled in ___slab_alloc() so we need to disallow
> +	 * blocking. The flags are further adjusted by gfp_nested_mask() in
> +	 * stack_depot itself.
> +	 */
> +	gfp_flags &= ~(__GFP_DIRECT_RECLAIM);
>  
>  	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
> -	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
> +	handle = stack_depot_save(entries, nr_entries, gfp_flags);
>  
>  	return handle;
>  }
>  #else
> -static inline depot_stack_handle_t set_track_prepare(void)
> +static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
>  {
>  	return 0;
>  }
> @@ -996,9 +1002,9 @@ static void set_track_update(struct kmem_cache *s, void *object,
>  }
>  
>  static __always_inline void set_track(struct kmem_cache *s, void *object,
> -				      enum track_item alloc, unsigned long addr)
> +				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
>  {
> -	depot_stack_handle_t handle = set_track_prepare();
> +	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
>  
>  	set_track_update(s, object, alloc, addr, handle);
>  }
> @@ -1921,9 +1927,9 @@ static inline bool free_debug_processing(struct kmem_cache *s,
>  static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
>  static inline int check_object(struct kmem_cache *s, struct slab *slab,
>  			void *object, u8 val) { return 1; }
> -static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
> +static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
>  static inline void set_track(struct kmem_cache *s, void *object,
> -			     enum track_item alloc, unsigned long addr) {}
> +			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
>  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
>  					struct slab *slab) {}
>  static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
> @@ -3878,7 +3884,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
>  			 * tracking info and return the object.
>  			 */
>  			if (s->flags & SLAB_STORE_USER)
> -				set_track(s, freelist, TRACK_ALLOC, addr);
> +				set_track(s, freelist, TRACK_ALLOC, addr, gfpflags);
>  
>  			return freelist;
>  		}
> @@ -3910,7 +3916,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
>  			goto new_objects;
>  
>  		if (s->flags & SLAB_STORE_USER)
> -			set_track(s, freelist, TRACK_ALLOC, addr);
> +			set_track(s, freelist, TRACK_ALLOC, addr, gfpflags);
>  
>  		return freelist;
>  	}
> @@ -4422,7 +4428,7 @@ static noinline void free_to_partial_list(
>  	depot_stack_handle_t handle = 0;
>  
>  	if (s->flags & SLAB_STORE_USER)
> -		handle = set_track_prepare();
> +		handle = set_track_prepare(__GFP_NOWARN);
>  
>  	spin_lock_irqsave(&n->list_lock, flags);
>  
> -- 
> 2.43.0
> 
> 


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
  2025-09-01  7:50 ` David Rientjes
@ 2025-09-01  8:15   ` Vlastimil Babka
  2025-09-01  8:29     ` yangshiguang
  0 siblings, 1 reply; 7+ messages in thread
From: Vlastimil Babka @ 2025-09-01  8:15 UTC (permalink / raw)
  To: David Rientjes, yangshiguang1011
  Cc: harry.yoo, akpm, cl, roman.gushchin, glittao, linux-mm,
	linux-kernel, yangshiguang, stable

On 9/1/25 09:50, David Rientjes wrote:
> On Sat, 30 Aug 2025, yangshiguang1011@163.com wrote:
> 
>> From: yangshiguang <yangshiguang@xiaomi.com>
>> 
>> From: yangshiguang <yangshiguang@xiaomi.com>
>> 
> 
> Duplicate lines.
> 
>> set_track_prepare() can incur lock recursion.
>> The issue is that it is called from hrtimer_start_range_ns
>> holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
>> CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
>> and try to hold the per_cpu(hrtimer_bases)[n].lock.
>> 
>> Avoid deadlock caused by implicitly waking up kswapd by
>> passing in allocation flags. And the slab caller context has
>> preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.
>> 
> 
> This mentions __GFP_KSWAPD_RECLAIM, but the patch actually masks off 
> __GFP_DIRECT_RECLAIM which would be a heavierweight operation.  Disabling 
> direct reclaim does not necessarily imply that kswapd will be disabled as 
> well.

Yeah I think the changelog should say __GFP_DIRECT_RECLAIM.

> Are you meaning to clear __GFP_RECLAIM in set_track_prepare()?

No because if the context context (e.g. the hrtimers) can't support
__GFP_KSWAPD_RECLAIM it won't have it in gfp_flags and we now pass them to
set_track_prepare() so it already won't be there.



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re:Re: [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
  2025-09-01  8:15   ` Vlastimil Babka
@ 2025-09-01  8:29     ` yangshiguang
  2025-09-01  8:46       ` Vlastimil Babka
  0 siblings, 1 reply; 7+ messages in thread
From: yangshiguang @ 2025-09-01  8:29 UTC (permalink / raw)
  To: Vlastimil Babka
  Cc: David Rientjes, harry.yoo, akpm, cl, roman.gushchin, glittao,
	linux-mm, linux-kernel, yangshiguang, stable



At 2025-09-01 16:15:04, "Vlastimil Babka" <vbabka@suse.cz> wrote:
>On 9/1/25 09:50, David Rientjes wrote:
>> On Sat, 30 Aug 2025, yangshiguang1011@163.com wrote:
>> 
>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>> 
>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>> 
>> 
>> Duplicate lines.
>> 
>>> set_track_prepare() can incur lock recursion.
>>> The issue is that it is called from hrtimer_start_range_ns
>>> holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
>>> CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
>>> and try to hold the per_cpu(hrtimer_bases)[n].lock.
>>> 
>>> Avoid deadlock caused by implicitly waking up kswapd by
>>> passing in allocation flags. And the slab caller context has
>>> preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.
>>> 
>> 
>> This mentions __GFP_KSWAPD_RECLAIM, but the patch actually masks off 
>> __GFP_DIRECT_RECLAIM which would be a heavierweight operation.  Disabling 
>> direct reclaim does not necessarily imply that kswapd will be disabled as 
>> well.
>
>Yeah I think the changelog should say __GFP_DIRECT_RECLAIM.
>
>> Are you meaning to clear __GFP_RECLAIM in set_track_prepare()?
>
>No because if the context context (e.g. the hrtimers) can't support
>__GFP_KSWAPD_RECLAIM it won't have it in gfp_flags and we now pass them to

>set_track_prepare() so it already won't be there.


Sry. Should be __GFP_DIRECT_RECLAIM. I will resend the patch.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
  2025-09-01  8:29     ` yangshiguang
@ 2025-09-01  8:46       ` Vlastimil Babka
  2025-09-01  9:00         ` yangshiguang
  0 siblings, 1 reply; 7+ messages in thread
From: Vlastimil Babka @ 2025-09-01  8:46 UTC (permalink / raw)
  To: yangshiguang
  Cc: David Rientjes, harry.yoo, akpm, cl, roman.gushchin, glittao,
	linux-mm, linux-kernel, yangshiguang, stable

On 9/1/25 10:29, yangshiguang wrote:
> 
> 
> At 2025-09-01 16:15:04, "Vlastimil Babka" <vbabka@suse.cz> wrote:
>>On 9/1/25 09:50, David Rientjes wrote:
>>> On Sat, 30 Aug 2025, yangshiguang1011@163.com wrote:
>>> 
>>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>>> 
>>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>>> 
>>> 
>>> Duplicate lines.
>>> 
>>>> set_track_prepare() can incur lock recursion.
>>>> The issue is that it is called from hrtimer_start_range_ns
>>>> holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
>>>> CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
>>>> and try to hold the per_cpu(hrtimer_bases)[n].lock.
>>>> 
>>>> Avoid deadlock caused by implicitly waking up kswapd by
>>>> passing in allocation flags. And the slab caller context has
>>>> preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.
>>>> 
>>> 
>>> This mentions __GFP_KSWAPD_RECLAIM, but the patch actually masks off 
>>> __GFP_DIRECT_RECLAIM which would be a heavierweight operation.  Disabling 
>>> direct reclaim does not necessarily imply that kswapd will be disabled as 
>>> well.
>>
>>Yeah I think the changelog should say __GFP_DIRECT_RECLAIM.
>>
>>> Are you meaning to clear __GFP_RECLAIM in set_track_prepare()?
>>
>>No because if the context context (e.g. the hrtimers) can't support
>>__GFP_KSWAPD_RECLAIM it won't have it in gfp_flags and we now pass them to
> 
>>set_track_prepare() so it already won't be there.
> 
> 
> Sry. Should be __GFP_DIRECT_RECLAIM. I will resend the patch.

I have adjusted it locally already. Also moved the masking of
__GFP_DIRECT_RECLAIM to ___slab_alloc itself as that's where
the preemption is disabled so it's more obvious.

Does the result look good to you?

commit 1b7052bc536650f8ca29b4f6f8682dc9f5692d16
Author: yangshiguang <yangshiguang@xiaomi.com>
Date:   Sat Aug 30 10:09:46 2025 +0800

    mm: slub: avoid wake up kswapd in set_track_prepare
    
    set_track_prepare() can incur lock recursion.
    The issue is that it is called from hrtimer_start_range_ns
    holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
    CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
    and try to hold the per_cpu(hrtimer_bases)[n].lock.
    
    Avoid deadlock caused by implicitly waking up kswapd by passing in
    allocation flags, which do not contain __GFP_KSWAPD_RECLAIM in the
    debug_objects_fill_pool() case.
    Since ___slab_alloc() has preemption disabled and thus was using
    GFP_NOWAIT, we instead mask out __GFP_DIRECT_RECLAIM from the flags.
    
    The oops looks something like:
    
    BUG: spinlock recursion on CPU#3, swapper/3/0
     lock: 0xffffff8a4bf29c80, .magic: dead4ead, .owner: swapper/3/0, .owner_cpu: 3
    Hardware name: Qualcomm Technologies, Inc. Popsicle based on SM8850 (DT)
    Call trace:
    spin_bug+0x0
    _raw_spin_lock_irqsave+0x80
    hrtimer_try_to_cancel+0x94
    task_contending+0x10c
    enqueue_dl_entity+0x2a4
    dl_server_start+0x74
    enqueue_task_fair+0x568
    enqueue_task+0xac
    do_activate_task+0x14c
    ttwu_do_activate+0xcc
    try_to_wake_up+0x6c8
    default_wake_function+0x20
    autoremove_wake_function+0x1c
    __wake_up+0xac
    wakeup_kswapd+0x19c
    wake_all_kswapds+0x78
    __alloc_pages_slowpath+0x1ac
    __alloc_pages_noprof+0x298
    stack_depot_save_flags+0x6b0
    stack_depot_save+0x14
    set_track_prepare+0x5c
    ___slab_alloc+0xccc
    __kmalloc_cache_noprof+0x470
    __set_page_owner+0x2bc
    post_alloc_hook[jt]+0x1b8
    prep_new_page+0x28
    get_page_from_freelist+0x1edc
    __alloc_pages_noprof+0x13c
    alloc_slab_page+0x244
    allocate_slab+0x7c
    ___slab_alloc+0x8e8
    kmem_cache_alloc_noprof+0x450
    debug_objects_fill_pool+0x22c
    debug_object_activate+0x40
    enqueue_hrtimer[jt]+0xdc
    hrtimer_start_range_ns+0x5f8
    ...
    
    Signed-off-by: yangshiguang <yangshiguang@xiaomi.com>
    Fixes: 5cf909c553e9 ("mm/slub: use stackdepot to save stack trace in objects")
    Cc: stable@vger.kernel.org
    Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

diff --git a/mm/slub.c b/mm/slub.c
index 1787e4d51e48..d257141896c9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -962,19 +962,19 @@ static struct track *get_track(struct kmem_cache *s, void *object,
 }
 
 #ifdef CONFIG_STACKDEPOT
-static noinline depot_stack_handle_t set_track_prepare(void)
+static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
 {
 	depot_stack_handle_t handle;
 	unsigned long entries[TRACK_ADDRS_COUNT];
 	unsigned int nr_entries;
 
 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
-	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+	handle = stack_depot_save(entries, nr_entries, gfp_flags);
 
 	return handle;
 }
 #else
-static inline depot_stack_handle_t set_track_prepare(void)
+static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
 {
 	return 0;
 }
@@ -996,9 +996,9 @@ static void set_track_update(struct kmem_cache *s, void *object,
 }
 
 static __always_inline void set_track(struct kmem_cache *s, void *object,
-				      enum track_item alloc, unsigned long addr)
+				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
 {
-	depot_stack_handle_t handle = set_track_prepare();
+	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
 
 	set_track_update(s, object, alloc, addr, handle);
 }
@@ -1926,9 +1926,9 @@ static inline bool free_debug_processing(struct kmem_cache *s,
 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
 static inline int check_object(struct kmem_cache *s, struct slab *slab,
 			void *object, u8 val) { return 1; }
-static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
+static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
 static inline void set_track(struct kmem_cache *s, void *object,
-			     enum track_item alloc, unsigned long addr) {}
+			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
 					struct slab *slab) {}
 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
@@ -3881,9 +3881,14 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			 * For debug caches here we had to go through
 			 * alloc_single_from_partial() so just store the
 			 * tracking info and return the object.
+			 *
+			 * Due to disabled preemption we need to disallow
+			 * blocking. The flags are further adjusted by
+			 * gfp_nested_mask() in stack_depot itself.
 			 */
 			if (s->flags & SLAB_STORE_USER)
-				set_track(s, freelist, TRACK_ALLOC, addr);
+				set_track(s, freelist, TRACK_ALLOC, addr,
+					  gfpflags & ~(__GFP_DIRECT_RECLAIM));
 
 			return freelist;
 		}
@@ -3915,7 +3920,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			goto new_objects;
 
 		if (s->flags & SLAB_STORE_USER)
-			set_track(s, freelist, TRACK_ALLOC, addr);
+			set_track(s, freelist, TRACK_ALLOC, addr,
+				  gfpflags & ~(__GFP_DIRECT_RECLAIM));
 
 		return freelist;
 	}
@@ -4426,8 +4432,12 @@ static noinline void free_to_partial_list(
 	unsigned long flags;
 	depot_stack_handle_t handle = 0;
 
+	/*
+	 * We cannot use GFP_NOWAIT as there are callsites where waking up
+	 * kswapd could deadlock
+	 */
 	if (s->flags & SLAB_STORE_USER)
-		handle = set_track_prepare();
+		handle = set_track_prepare(__GFP_NOWARN);
 
 	spin_lock_irqsave(&n->list_lock, flags);
 



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re:Re: [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
  2025-09-01  8:46       ` Vlastimil Babka
@ 2025-09-01  9:00         ` yangshiguang
  2025-09-01  9:14           ` Vlastimil Babka
  0 siblings, 1 reply; 7+ messages in thread
From: yangshiguang @ 2025-09-01  9:00 UTC (permalink / raw)
  To: Vlastimil Babka
  Cc: David Rientjes, harry.yoo, akpm, cl, roman.gushchin, glittao,
	linux-mm, linux-kernel, yangshiguang, stable



At 2025-09-01 16:46:13, "Vlastimil Babka" <vbabka@suse.cz> wrote:
>On 9/1/25 10:29, yangshiguang wrote:
>> 
>> 
>> At 2025-09-01 16:15:04, "Vlastimil Babka" <vbabka@suse.cz> wrote:
>>>On 9/1/25 09:50, David Rientjes wrote:
>>>> On Sat, 30 Aug 2025, yangshiguang1011@163.com wrote:
>>>> 
>>>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>>>> 
>>>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>>>> 
>>>> 
>>>> Duplicate lines.
>>>> 
>>>>> set_track_prepare() can incur lock recursion.
>>>>> The issue is that it is called from hrtimer_start_range_ns
>>>>> holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
>>>>> CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
>>>>> and try to hold the per_cpu(hrtimer_bases)[n].lock.
>>>>> 
>>>>> Avoid deadlock caused by implicitly waking up kswapd by
>>>>> passing in allocation flags. And the slab caller context has
>>>>> preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.
>>>>> 
>>>> 
>>>> This mentions __GFP_KSWAPD_RECLAIM, but the patch actually masks off 
>>>> __GFP_DIRECT_RECLAIM which would be a heavierweight operation.  Disabling 
>>>> direct reclaim does not necessarily imply that kswapd will be disabled as 
>>>> well.
>>>
>>>Yeah I think the changelog should say __GFP_DIRECT_RECLAIM.
>>>
>>>> Are you meaning to clear __GFP_RECLAIM in set_track_prepare()?
>>>
>>>No because if the context context (e.g. the hrtimers) can't support
>>>__GFP_KSWAPD_RECLAIM it won't have it in gfp_flags and we now pass them to
>> 
>>>set_track_prepare() so it already won't be there.
>> 
>> 
>> Sry. Should be __GFP_DIRECT_RECLAIM. I will resend the patch.
>
>I have adjusted it locally already. Also moved the masking of
>__GFP_DIRECT_RECLAIM to ___slab_alloc itself as that's where
>the preemption is disabled so it's more obvious.
>
>Does the result look good to you?

This looks good.
Currently only ___slab_alloc disables preemption context calls to set_track. 
In the future, not all callers will disable preemption. 


>
>commit 1b7052bc536650f8ca29b4f6f8682dc9f5692d16
>Author: yangshiguang <yangshiguang@xiaomi.com>
>Date:   Sat Aug 30 10:09:46 2025 +0800
>
>    mm: slub: avoid wake up kswapd in set_track_prepare
>    
>    set_track_prepare() can incur lock recursion.
>    The issue is that it is called from hrtimer_start_range_ns
>    holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
>    CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
>    and try to hold the per_cpu(hrtimer_bases)[n].lock.
>    
>    Avoid deadlock caused by implicitly waking up kswapd by passing in
>    allocation flags, which do not contain __GFP_KSWAPD_RECLAIM in the
>    debug_objects_fill_pool() case.
>    Since ___slab_alloc() has preemption disabled and thus was using
>    GFP_NOWAIT, we instead mask out __GFP_DIRECT_RECLAIM from the flags.
>    
>    The oops looks something like:
>    
>    BUG: spinlock recursion on CPU#3, swapper/3/0
>     lock: 0xffffff8a4bf29c80, .magic: dead4ead, .owner: swapper/3/0, .owner_cpu: 3
>    Hardware name: Qualcomm Technologies, Inc. Popsicle based on SM8850 (DT)
>    Call trace:
>    spin_bug+0x0
>    _raw_spin_lock_irqsave+0x80
>    hrtimer_try_to_cancel+0x94
>    task_contending+0x10c
>    enqueue_dl_entity+0x2a4
>    dl_server_start+0x74
>    enqueue_task_fair+0x568
>    enqueue_task+0xac
>    do_activate_task+0x14c
>    ttwu_do_activate+0xcc
>    try_to_wake_up+0x6c8
>    default_wake_function+0x20
>    autoremove_wake_function+0x1c
>    __wake_up+0xac
>    wakeup_kswapd+0x19c
>    wake_all_kswapds+0x78
>    __alloc_pages_slowpath+0x1ac
>    __alloc_pages_noprof+0x298
>    stack_depot_save_flags+0x6b0
>    stack_depot_save+0x14
>    set_track_prepare+0x5c
>    ___slab_alloc+0xccc
>    __kmalloc_cache_noprof+0x470
>    __set_page_owner+0x2bc
>    post_alloc_hook[jt]+0x1b8
>    prep_new_page+0x28
>    get_page_from_freelist+0x1edc
>    __alloc_pages_noprof+0x13c
>    alloc_slab_page+0x244
>    allocate_slab+0x7c
>    ___slab_alloc+0x8e8
>    kmem_cache_alloc_noprof+0x450
>    debug_objects_fill_pool+0x22c
>    debug_object_activate+0x40
>    enqueue_hrtimer[jt]+0xdc
>    hrtimer_start_range_ns+0x5f8
>    ...
>    
>    Signed-off-by: yangshiguang <yangshiguang@xiaomi.com>
>    Fixes: 5cf909c553e9 ("mm/slub: use stackdepot to save stack trace in objects")
>    Cc: stable@vger.kernel.org
>    Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>
>diff --git a/mm/slub.c b/mm/slub.c
>index 1787e4d51e48..d257141896c9 100644
>--- a/mm/slub.c
>+++ b/mm/slub.c
>@@ -962,19 +962,19 @@ static struct track *get_track(struct kmem_cache *s, void *object,
> }
> 
> #ifdef CONFIG_STACKDEPOT
>-static noinline depot_stack_handle_t set_track_prepare(void)
>+static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
> {
> 	depot_stack_handle_t handle;
> 	unsigned long entries[TRACK_ADDRS_COUNT];
> 	unsigned int nr_entries;
> 
> 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
>-	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
>+	handle = stack_depot_save(entries, nr_entries, gfp_flags);
> 
> 	return handle;
> }
> #else
>-static inline depot_stack_handle_t set_track_prepare(void)
>+static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
> {
> 	return 0;
> }
>@@ -996,9 +996,9 @@ static void set_track_update(struct kmem_cache *s, void *object,
> }
> 
> static __always_inline void set_track(struct kmem_cache *s, void *object,
>-				      enum track_item alloc, unsigned long addr)
>+				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
> {
>-	depot_stack_handle_t handle = set_track_prepare();
>+	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
> 
> 	set_track_update(s, object, alloc, addr, handle);
> }
>@@ -1926,9 +1926,9 @@ static inline bool free_debug_processing(struct kmem_cache *s,
> static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
> static inline int check_object(struct kmem_cache *s, struct slab *slab,
> 			void *object, u8 val) { return 1; }
>-static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
>+static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
> static inline void set_track(struct kmem_cache *s, void *object,
>-			     enum track_item alloc, unsigned long addr) {}
>+			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
> static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
> 					struct slab *slab) {}
> static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
>@@ -3881,9 +3881,14 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
> 			 * For debug caches here we had to go through
> 			 * alloc_single_from_partial() so just store the
> 			 * tracking info and return the object.
>+			 *
>+			 * Due to disabled preemption we need to disallow
>+			 * blocking. The flags are further adjusted by
>+			 * gfp_nested_mask() in stack_depot itself.
> 			 */
> 			if (s->flags & SLAB_STORE_USER)
>-				set_track(s, freelist, TRACK_ALLOC, addr);
>+				set_track(s, freelist, TRACK_ALLOC, addr,
>+					  gfpflags & ~(__GFP_DIRECT_RECLAIM));
> 
> 			return freelist;
> 		}
>@@ -3915,7 +3920,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
> 			goto new_objects;
> 
> 		if (s->flags & SLAB_STORE_USER)
>-			set_track(s, freelist, TRACK_ALLOC, addr);
>+			set_track(s, freelist, TRACK_ALLOC, addr,
>+				  gfpflags & ~(__GFP_DIRECT_RECLAIM));
> 
> 		return freelist;
> 	}
>@@ -4426,8 +4432,12 @@ static noinline void free_to_partial_list(
> 	unsigned long flags;
> 	depot_stack_handle_t handle = 0;
> 
>+	/*
>+	 * We cannot use GFP_NOWAIT as there are callsites where waking up
>+	 * kswapd could deadlock
>+	 */
> 	if (s->flags & SLAB_STORE_USER)
>-		handle = set_track_prepare();
>+		handle = set_track_prepare(__GFP_NOWARN);
> 
> 	spin_lock_irqsave(&n->list_lock, flags);
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare
  2025-09-01  9:00         ` yangshiguang
@ 2025-09-01  9:14           ` Vlastimil Babka
  0 siblings, 0 replies; 7+ messages in thread
From: Vlastimil Babka @ 2025-09-01  9:14 UTC (permalink / raw)
  To: yangshiguang
  Cc: David Rientjes, harry.yoo, akpm, cl, roman.gushchin, glittao,
	linux-mm, linux-kernel, yangshiguang, stable

On 9/1/25 11:00, yangshiguang wrote:
> 
> 
> At 2025-09-01 16:46:13, "Vlastimil Babka" <vbabka@suse.cz> wrote:
>>On 9/1/25 10:29, yangshiguang wrote:
>>> 
>>> 
>>> At 2025-09-01 16:15:04, "Vlastimil Babka" <vbabka@suse.cz> wrote:
>>>>On 9/1/25 09:50, David Rientjes wrote:
>>>>> On Sat, 30 Aug 2025, yangshiguang1011@163.com wrote:
>>>>> 
>>>>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>>>>> 
>>>>>> From: yangshiguang <yangshiguang@xiaomi.com>
>>>>>> 
>>>>> 
>>>>> Duplicate lines.
>>>>> 
>>>>>> set_track_prepare() can incur lock recursion.
>>>>>> The issue is that it is called from hrtimer_start_range_ns
>>>>>> holding the per_cpu(hrtimer_bases)[n].lock, but when enabled
>>>>>> CONFIG_DEBUG_OBJECTS_TIMERS, may wake up kswapd in set_track_prepare,
>>>>>> and try to hold the per_cpu(hrtimer_bases)[n].lock.
>>>>>> 
>>>>>> Avoid deadlock caused by implicitly waking up kswapd by
>>>>>> passing in allocation flags. And the slab caller context has
>>>>>> preemption disabled, so __GFP_KSWAPD_RECLAIM must not appear in gfp_flags.
>>>>>> 
>>>>> 
>>>>> This mentions __GFP_KSWAPD_RECLAIM, but the patch actually masks off 
>>>>> __GFP_DIRECT_RECLAIM which would be a heavierweight operation.  Disabling 
>>>>> direct reclaim does not necessarily imply that kswapd will be disabled as 
>>>>> well.
>>>>
>>>>Yeah I think the changelog should say __GFP_DIRECT_RECLAIM.
>>>>
>>>>> Are you meaning to clear __GFP_RECLAIM in set_track_prepare()?
>>>>
>>>>No because if the context context (e.g. the hrtimers) can't support
>>>>__GFP_KSWAPD_RECLAIM it won't have it in gfp_flags and we now pass them to
>>> 
>>>>set_track_prepare() so it already won't be there.
>>> 
>>> 
>>> Sry. Should be __GFP_DIRECT_RECLAIM. I will resend the patch.
>>
>>I have adjusted it locally already. Also moved the masking of
>>__GFP_DIRECT_RECLAIM to ___slab_alloc itself as that's where
>>the preemption is disabled so it's more obvious.
>>
>>Does the result look good to you?
> 
> This looks good.
> Currently only ___slab_alloc disables preemption context calls to set_track. 
> In the future, not all callers will disable preemption. 

Great, added to slab/for-next-fixes. Thanks!



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-09-01  9:14 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-30  2:09 [PATCH v4] mm: slub: avoid wake up kswapd in set_track_prepare yangshiguang1011
2025-09-01  7:50 ` David Rientjes
2025-09-01  8:15   ` Vlastimil Babka
2025-09-01  8:29     ` yangshiguang
2025-09-01  8:46       ` Vlastimil Babka
2025-09-01  9:00         ` yangshiguang
2025-09-01  9:14           ` Vlastimil Babka

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).