public inbox for linux-mm@kvack.org
 help / color / mirror / Atom feed
* [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
@ 2026-03-31 20:23 Uladzislau Rezki (Sony)
  2026-03-31 22:40 ` Andrew Morton
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Uladzislau Rezki (Sony) @ 2026-03-31 20:23 UTC (permalink / raw)
  To: linux-mm, Andrew Morton
  Cc: Baoquan He, LKML, Uladzislau Rezki, stable, lirongqing

drain_vmap_area_work() function can take >10ms to complete
when there are many accumulated vmap areas in a system with
high CPU count, causing workqueue watchdog warnings when run
via schedule_work():

  workqueue: drain_vmap_area_work hogged CPU for >10000us

Move the top-level drain work to a dedicated WQ_UNBOUND
workqueue so the scheduler can run this background work
on any available CPU, improving responsiveness. Use the
WQ_MEM_RECLAIM to ensure forward progress under memory
pressure.

Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
workqueue. This allows drain_vmap_work to wait for helpers
completion without creating dependency on the same rescuer
thread and avoid a potential parent/child deadlock.

Simplify purge helper scheduling by removing cpumask-based
iteration to iterating directly over vmap nodes checking
work_queued state.

Cc: stable@vger.kernel.org
Cc: lirongqing <lirongqing@baidu.com>
Fixes: 72210662c5a2 ("mm: vmalloc: offload free_vmap_area_lock lock")
Link: https://lore.kernel.org/all/20260319074307.2325-1-lirongqing@baidu.com/
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 79 ++++++++++++++++++++++++++++++++++------------------
 1 file changed, 52 insertions(+), 27 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 61caa55a4402..0fa1208a910b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -949,6 +949,7 @@ static struct vmap_node {
 	struct list_head purge_list;
 	struct work_struct purge_work;
 	unsigned long nr_purged;
+	bool work_queued;
 } single;
 
 /*
@@ -1067,6 +1068,8 @@ static void reclaim_and_purge_vmap_areas(void);
 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 static void drain_vmap_area_work(struct work_struct *work);
 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
+static struct workqueue_struct *drain_vmap_helpers_wq;
+static struct workqueue_struct *drain_vmap_wq;
 
 static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages;
 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr;
@@ -2335,6 +2338,16 @@ static void purge_vmap_node(struct work_struct *work)
 	reclaim_list_global(&local_list);
 }
 
+static bool
+schedule_drain_vmap_work(struct workqueue_struct *wq,
+		struct work_struct *work)
+{
+	if (wq)
+		return queue_work(wq, work);
+
+	return false;
+}
+
 /*
  * Purges all lazily-freed vmap areas.
  */
@@ -2342,19 +2355,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
 		bool full_pool_decay)
 {
 	unsigned long nr_purged_areas = 0;
+	unsigned int nr_purge_nodes = 0;
 	unsigned int nr_purge_helpers;
-	static cpumask_t purge_nodes;
-	unsigned int nr_purge_nodes;
 	struct vmap_node *vn;
-	int i;
 
 	lockdep_assert_held(&vmap_purge_lock);
 
-	/*
-	 * Use cpumask to mark which node has to be processed.
-	 */
-	purge_nodes = CPU_MASK_NONE;
-
 	for_each_vmap_node(vn) {
 		INIT_LIST_HEAD(&vn->purge_list);
 		vn->skip_populate = full_pool_decay;
@@ -2374,10 +2380,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
 		end = max(end, list_last_entry(&vn->purge_list,
 			struct vmap_area, list)->va_end);
 
-		cpumask_set_cpu(node_to_id(vn), &purge_nodes);
+		nr_purge_nodes++;
 	}
 
-	nr_purge_nodes = cpumask_weight(&purge_nodes);
 	if (nr_purge_nodes > 0) {
 		flush_tlb_kernel_range(start, end);
 
@@ -2385,29 +2390,31 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
 		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
 		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
 
-		for_each_cpu(i, &purge_nodes) {
-			vn = &vmap_nodes[i];
+		for_each_vmap_node(vn) {
+			vn->work_queued = false;
+
+			if (list_empty(&vn->purge_list))
+				continue;
 
 			if (nr_purge_helpers > 0) {
 				INIT_WORK(&vn->purge_work, purge_vmap_node);
+				vn->work_queued = schedule_drain_vmap_work(
+					READ_ONCE(drain_vmap_helpers_wq), &vn->purge_work);
 
-				if (cpumask_test_cpu(i, cpu_online_mask))
-					schedule_work_on(i, &vn->purge_work);
-				else
-					schedule_work(&vn->purge_work);
-
-				nr_purge_helpers--;
-			} else {
-				vn->purge_work.func = NULL;
-				purge_vmap_node(&vn->purge_work);
-				nr_purged_areas += vn->nr_purged;
+				if (vn->work_queued) {
+					nr_purge_helpers--;
+					continue;
+				}
 			}
-		}
 
-		for_each_cpu(i, &purge_nodes) {
-			vn = &vmap_nodes[i];
+			/* Sync path. Process locally. */
+			purge_vmap_node(&vn->purge_work);
+			nr_purged_areas += vn->nr_purged;
+		}
 
-			if (vn->purge_work.func) {
+		/* Wait for completion if queued any. */
+		for_each_vmap_node(vn) {
+			if (vn->work_queued) {
 				flush_work(&vn->purge_work);
 				nr_purged_areas += vn->nr_purged;
 			}
@@ -2471,7 +2478,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
 
 	/* After this point, we may free va at any time */
 	if (unlikely(nr_lazy > nr_lazy_max))
-		schedule_work(&drain_vmap_work);
+		schedule_drain_vmap_work(READ_ONCE(drain_vmap_wq),
+			&drain_vmap_work);
 }
 
 /*
@@ -5483,3 +5491,20 @@ void __init vmalloc_init(void)
 	vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
 	shrinker_register(vmap_node_shrinker);
 }
+
+static int __init vmalloc_init_workqueue(void)
+{
+	struct workqueue_struct *drain_wq, *helpers_wq;
+	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM;
+
+	drain_wq = alloc_workqueue("vmap_drain", flags, 0);
+	WARN_ON_ONCE(drain_wq == NULL);
+	WRITE_ONCE(drain_vmap_wq, drain_wq);
+
+	helpers_wq = alloc_workqueue("vmap_drain_helpers", flags, 0);
+	WARN_ON_ONCE(helpers_wq == NULL);
+	WRITE_ONCE(drain_vmap_helpers_wq, helpers_wq);
+
+	return 0;
+}
+early_initcall(vmalloc_init_workqueue);
-- 
2.47.3



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
  2026-03-31 20:23 [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain Uladzislau Rezki (Sony)
@ 2026-03-31 22:40 ` Andrew Morton
  2026-04-01  9:47 ` Baoquan He
  2026-04-02  0:23 ` Baoquan He
  2 siblings, 0 replies; 7+ messages in thread
From: Andrew Morton @ 2026-03-31 22:40 UTC (permalink / raw)
  To: Uladzislau Rezki (Sony); +Cc: linux-mm, Baoquan He, LKML, stable, lirongqing

On Tue, 31 Mar 2026 22:23:52 +0200 "Uladzislau Rezki (Sony)" <urezki@gmail.com> wrote:

> drain_vmap_area_work() function can take >10ms to complete
> when there are many accumulated vmap areas in a system with
> high CPU count, causing workqueue watchdog warnings when run
> via schedule_work():
> 
>   workqueue: drain_vmap_area_work hogged CPU for >10000us
> 
> Move the top-level drain work to a dedicated WQ_UNBOUND
> workqueue so the scheduler can run this background work
> on any available CPU, improving responsiveness. Use the
> WQ_MEM_RECLAIM to ensure forward progress under memory
> pressure.
> 
> Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
> workqueue. This allows drain_vmap_work to wait for helpers
> completion without creating dependency on the same rescuer
> thread and avoid a potential parent/child deadlock.
> 
> Simplify purge helper scheduling by removing cpumask-based
> iteration to iterating directly over vmap nodes checking
> work_queued state.

Great, thanks.

> Fixes: 72210662c5a2 ("mm: vmalloc: offload free_vmap_area_lock lock")

That was a couple of years ago so I see no need to rush this into
mainline.  I added it to the next-merge-window pile - it'll trickle
back into -stable kernels later on.



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
  2026-03-31 20:23 [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain Uladzislau Rezki (Sony)
  2026-03-31 22:40 ` Andrew Morton
@ 2026-04-01  9:47 ` Baoquan He
  2026-04-02  0:22   ` Baoquan He
  2026-04-02  0:23 ` Baoquan He
  2 siblings, 1 reply; 7+ messages in thread
From: Baoquan He @ 2026-04-01  9:47 UTC (permalink / raw)
  To: Uladzislau Rezki (Sony); +Cc: linux-mm, Andrew Morton, LKML, stable, lirongqing

On 03/31/26 at 10:23pm, Uladzislau Rezki (Sony) wrote:
> drain_vmap_area_work() function can take >10ms to complete
> when there are many accumulated vmap areas in a system with
> high CPU count, causing workqueue watchdog warnings when run
> via schedule_work():
> 
>   workqueue: drain_vmap_area_work hogged CPU for >10000us
> 
> Move the top-level drain work to a dedicated WQ_UNBOUND
> workqueue so the scheduler can run this background work
> on any available CPU, improving responsiveness. Use the
> WQ_MEM_RECLAIM to ensure forward progress under memory
> pressure.
> 
> Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
> workqueue. This allows drain_vmap_work to wait for helpers
> completion without creating dependency on the same rescuer
> thread and avoid a potential parent/child deadlock.
...snip...  
> @@ -2385,29 +2390,31 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
>  		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
>  		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
>  
> -		for_each_cpu(i, &purge_nodes) {
> -			vn = &vmap_nodes[i];
> +		for_each_vmap_node(vn) {
> +			vn->work_queued = false;
> +
> +			if (list_empty(&vn->purge_list))
> +				continue;
>  
>  			if (nr_purge_helpers > 0) {
>  				INIT_WORK(&vn->purge_work, purge_vmap_node);
> +				vn->work_queued = schedule_drain_vmap_work(
> +					READ_ONCE(drain_vmap_helpers_wq), &vn->purge_work);

The new schedule_drain_vmap_work() could submit all purge_work on one
CPU, do we need use queue_work_on(cpu, wq, work) instead?

>  
> -				if (cpumask_test_cpu(i, cpu_online_mask))
> -					schedule_work_on(i, &vn->purge_work);
> -				else
> -					schedule_work(&vn->purge_work);
> -
> -				nr_purge_helpers--;
> -			} else {
> -				vn->purge_work.func = NULL;
> -				purge_vmap_node(&vn->purge_work);
> -				nr_purged_areas += vn->nr_purged;
> +				if (vn->work_queued) {
> +					nr_purge_helpers--;
> +					continue;
> +				}
>  			}
> -		}
>  
> -		for_each_cpu(i, &purge_nodes) {
> -			vn = &vmap_nodes[i];
> +			/* Sync path. Process locally. */
> +			purge_vmap_node(&vn->purge_work);
> +			nr_purged_areas += vn->nr_purged;
> +		}
>  
> -			if (vn->purge_work.func) {
> +		/* Wait for completion if queued any. */
> +		for_each_vmap_node(vn) {
> +			if (vn->work_queued) {
>  				flush_work(&vn->purge_work);
>  				nr_purged_areas += vn->nr_purged;
>  			}
...snip...
> +
> +static int __init vmalloc_init_workqueue(void)
> +{
> +	struct workqueue_struct *drain_wq, *helpers_wq;

Maybe there's one local variable is enough like below:

	struct workqueue_struct *wq;
	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM;

	wq = alloc_workqueue("vmap_drain", flags, 0);
	WARN_ON_ONCE(wq == NULL);
	WRITE_ONCE(drain_vmap_wq, wq);

	wq = alloc_workqueue("vmap_drain_helpers", flags, 0);
	WARN_ON_ONCE(wq == NULL);
	WRITE_ONCE(drain_vmap_helpers_wq, wq);

	return 0;
}

Just personal preference on nitpick, not strong opionion.



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
  2026-04-01  9:47 ` Baoquan He
@ 2026-04-02  0:22   ` Baoquan He
  2026-04-02 16:05     ` Uladzislau Rezki
  0 siblings, 1 reply; 7+ messages in thread
From: Baoquan He @ 2026-04-02  0:22 UTC (permalink / raw)
  To: Uladzislau Rezki (Sony); +Cc: linux-mm, Andrew Morton, LKML, stable, lirongqing

On 04/01/26 at 05:47pm, Baoquan He wrote:
> On 03/31/26 at 10:23pm, Uladzislau Rezki (Sony) wrote:
> > drain_vmap_area_work() function can take >10ms to complete
> > when there are many accumulated vmap areas in a system with
> > high CPU count, causing workqueue watchdog warnings when run
> > via schedule_work():
> > 
> >   workqueue: drain_vmap_area_work hogged CPU for >10000us
> > 
> > Move the top-level drain work to a dedicated WQ_UNBOUND
> > workqueue so the scheduler can run this background work
> > on any available CPU, improving responsiveness. Use the
> > WQ_MEM_RECLAIM to ensure forward progress under memory
> > pressure.
> > 
> > Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
> > workqueue. This allows drain_vmap_work to wait for helpers
> > completion without creating dependency on the same rescuer
> > thread and avoid a potential parent/child deadlock.
> ...snip...  
> > @@ -2385,29 +2390,31 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
> >  		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
> >  		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
> >  
> > -		for_each_cpu(i, &purge_nodes) {
> > -			vn = &vmap_nodes[i];
> > +		for_each_vmap_node(vn) {
> > +			vn->work_queued = false;
> > +
> > +			if (list_empty(&vn->purge_list))
> > +				continue;
> >  
> >  			if (nr_purge_helpers > 0) {
> >  				INIT_WORK(&vn->purge_work, purge_vmap_node);
> > +				vn->work_queued = schedule_drain_vmap_work(
> > +					READ_ONCE(drain_vmap_helpers_wq), &vn->purge_work);
> 
> The new schedule_drain_vmap_work() could submit all purge_work on one
> CPU, do we need use queue_work_on(cpu, wq, work) instead?

Forgot the specified WQ_UNBOUND on alloc_workqueue(), sorry for the
noise. Then this patch looks great to me.

> 
> >  
> > -				if (cpumask_test_cpu(i, cpu_online_mask))
> > -					schedule_work_on(i, &vn->purge_work);
> > -				else
> > -					schedule_work(&vn->purge_work);
> > -
> > -				nr_purge_helpers--;
> > -			} else {
> > -				vn->purge_work.func = NULL;
> > -				purge_vmap_node(&vn->purge_work);
> > -				nr_purged_areas += vn->nr_purged;
> > +				if (vn->work_queued) {
> > +					nr_purge_helpers--;
> > +					continue;
> > +				}
> >  			}
> > -		}
> >  
> > -		for_each_cpu(i, &purge_nodes) {
> > -			vn = &vmap_nodes[i];
> > +			/* Sync path. Process locally. */
> > +			purge_vmap_node(&vn->purge_work);
> > +			nr_purged_areas += vn->nr_purged;
> > +		}
> >  
> > -			if (vn->purge_work.func) {
> > +		/* Wait for completion if queued any. */
> > +		for_each_vmap_node(vn) {
> > +			if (vn->work_queued) {
> >  				flush_work(&vn->purge_work);
> >  				nr_purged_areas += vn->nr_purged;
> >  			}
> ...snip...
> > +
> > +static int __init vmalloc_init_workqueue(void)
> > +{
> > +	struct workqueue_struct *drain_wq, *helpers_wq;
> 
> Maybe there's one local variable is enough like below:
> 
> 	struct workqueue_struct *wq;
> 	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM;
> 
> 	wq = alloc_workqueue("vmap_drain", flags, 0);
> 	WARN_ON_ONCE(wq == NULL);
> 	WRITE_ONCE(drain_vmap_wq, wq);
> 
> 	wq = alloc_workqueue("vmap_drain_helpers", flags, 0);
> 	WARN_ON_ONCE(wq == NULL);
> 	WRITE_ONCE(drain_vmap_helpers_wq, wq);
> 
> 	return 0;
> }
> 
> Just personal preference on nitpick, not strong opionion.
> 



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
  2026-03-31 20:23 [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain Uladzislau Rezki (Sony)
  2026-03-31 22:40 ` Andrew Morton
  2026-04-01  9:47 ` Baoquan He
@ 2026-04-02  0:23 ` Baoquan He
  2026-04-02 16:06   ` Uladzislau Rezki
  2 siblings, 1 reply; 7+ messages in thread
From: Baoquan He @ 2026-04-02  0:23 UTC (permalink / raw)
  To: Uladzislau Rezki (Sony); +Cc: linux-mm, Andrew Morton, LKML, stable, lirongqing

On 03/31/26 at 10:23pm, Uladzislau Rezki (Sony) wrote:
> drain_vmap_area_work() function can take >10ms to complete
> when there are many accumulated vmap areas in a system with
> high CPU count, causing workqueue watchdog warnings when run
> via schedule_work():
> 
>   workqueue: drain_vmap_area_work hogged CPU for >10000us
> 
> Move the top-level drain work to a dedicated WQ_UNBOUND
> workqueue so the scheduler can run this background work
> on any available CPU, improving responsiveness. Use the
> WQ_MEM_RECLAIM to ensure forward progress under memory
> pressure.
> 
> Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
> workqueue. This allows drain_vmap_work to wait for helpers
> completion without creating dependency on the same rescuer
> thread and avoid a potential parent/child deadlock.
> 
> Simplify purge helper scheduling by removing cpumask-based
> iteration to iterating directly over vmap nodes checking
> work_queued state.
> 
> Cc: stable@vger.kernel.org
> Cc: lirongqing <lirongqing@baidu.com>
> Fixes: 72210662c5a2 ("mm: vmalloc: offload free_vmap_area_lock lock")
> Link: https://lore.kernel.org/all/20260319074307.2325-1-lirongqing@baidu.com/
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  mm/vmalloc.c | 79 ++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 52 insertions(+), 27 deletions(-)

LGTM,

Reviewed-by: Baoquan He <bhe@redhat.com>

> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 61caa55a4402..0fa1208a910b 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -949,6 +949,7 @@ static struct vmap_node {
>  	struct list_head purge_list;
>  	struct work_struct purge_work;
>  	unsigned long nr_purged;
> +	bool work_queued;
>  } single;
>  
>  /*
> @@ -1067,6 +1068,8 @@ static void reclaim_and_purge_vmap_areas(void);
>  static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
>  static void drain_vmap_area_work(struct work_struct *work);
>  static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
> +static struct workqueue_struct *drain_vmap_helpers_wq;
> +static struct workqueue_struct *drain_vmap_wq;
>  
>  static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages;
>  static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr;
> @@ -2335,6 +2338,16 @@ static void purge_vmap_node(struct work_struct *work)
>  	reclaim_list_global(&local_list);
>  }
>  
> +static bool
> +schedule_drain_vmap_work(struct workqueue_struct *wq,
> +		struct work_struct *work)
> +{
> +	if (wq)
> +		return queue_work(wq, work);
> +
> +	return false;
> +}
> +
>  /*
>   * Purges all lazily-freed vmap areas.
>   */
> @@ -2342,19 +2355,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
>  		bool full_pool_decay)
>  {
>  	unsigned long nr_purged_areas = 0;
> +	unsigned int nr_purge_nodes = 0;
>  	unsigned int nr_purge_helpers;
> -	static cpumask_t purge_nodes;
> -	unsigned int nr_purge_nodes;
>  	struct vmap_node *vn;
> -	int i;
>  
>  	lockdep_assert_held(&vmap_purge_lock);
>  
> -	/*
> -	 * Use cpumask to mark which node has to be processed.
> -	 */
> -	purge_nodes = CPU_MASK_NONE;
> -
>  	for_each_vmap_node(vn) {
>  		INIT_LIST_HEAD(&vn->purge_list);
>  		vn->skip_populate = full_pool_decay;
> @@ -2374,10 +2380,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
>  		end = max(end, list_last_entry(&vn->purge_list,
>  			struct vmap_area, list)->va_end);
>  
> -		cpumask_set_cpu(node_to_id(vn), &purge_nodes);
> +		nr_purge_nodes++;
>  	}
>  
> -	nr_purge_nodes = cpumask_weight(&purge_nodes);
>  	if (nr_purge_nodes > 0) {
>  		flush_tlb_kernel_range(start, end);
>  
> @@ -2385,29 +2390,31 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
>  		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
>  		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
>  
> -		for_each_cpu(i, &purge_nodes) {
> -			vn = &vmap_nodes[i];
> +		for_each_vmap_node(vn) {
> +			vn->work_queued = false;
> +
> +			if (list_empty(&vn->purge_list))
> +				continue;
>  
>  			if (nr_purge_helpers > 0) {
>  				INIT_WORK(&vn->purge_work, purge_vmap_node);
> +				vn->work_queued = schedule_drain_vmap_work(
> +					READ_ONCE(drain_vmap_helpers_wq), &vn->purge_work);
>  
> -				if (cpumask_test_cpu(i, cpu_online_mask))
> -					schedule_work_on(i, &vn->purge_work);
> -				else
> -					schedule_work(&vn->purge_work);
> -
> -				nr_purge_helpers--;
> -			} else {
> -				vn->purge_work.func = NULL;
> -				purge_vmap_node(&vn->purge_work);
> -				nr_purged_areas += vn->nr_purged;
> +				if (vn->work_queued) {
> +					nr_purge_helpers--;
> +					continue;
> +				}
>  			}
> -		}
>  
> -		for_each_cpu(i, &purge_nodes) {
> -			vn = &vmap_nodes[i];
> +			/* Sync path. Process locally. */
> +			purge_vmap_node(&vn->purge_work);
> +			nr_purged_areas += vn->nr_purged;
> +		}
>  
> -			if (vn->purge_work.func) {
> +		/* Wait for completion if queued any. */
> +		for_each_vmap_node(vn) {
> +			if (vn->work_queued) {
>  				flush_work(&vn->purge_work);
>  				nr_purged_areas += vn->nr_purged;
>  			}
> @@ -2471,7 +2478,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
>  
>  	/* After this point, we may free va at any time */
>  	if (unlikely(nr_lazy > nr_lazy_max))
> -		schedule_work(&drain_vmap_work);
> +		schedule_drain_vmap_work(READ_ONCE(drain_vmap_wq),
> +			&drain_vmap_work);
>  }
>  
>  /*
> @@ -5483,3 +5491,20 @@ void __init vmalloc_init(void)
>  	vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
>  	shrinker_register(vmap_node_shrinker);
>  }
> +
> +static int __init vmalloc_init_workqueue(void)
> +{
> +	struct workqueue_struct *drain_wq, *helpers_wq;
> +	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM;
> +
> +	drain_wq = alloc_workqueue("vmap_drain", flags, 0);
> +	WARN_ON_ONCE(drain_wq == NULL);
> +	WRITE_ONCE(drain_vmap_wq, drain_wq);
> +
> +	helpers_wq = alloc_workqueue("vmap_drain_helpers", flags, 0);
> +	WARN_ON_ONCE(helpers_wq == NULL);
> +	WRITE_ONCE(drain_vmap_helpers_wq, helpers_wq);
> +
> +	return 0;
> +}
> +early_initcall(vmalloc_init_workqueue);
> -- 
> 2.47.3
> 



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
  2026-04-02  0:22   ` Baoquan He
@ 2026-04-02 16:05     ` Uladzislau Rezki
  0 siblings, 0 replies; 7+ messages in thread
From: Uladzislau Rezki @ 2026-04-02 16:05 UTC (permalink / raw)
  To: Baoquan He
  Cc: Uladzislau Rezki (Sony), linux-mm, Andrew Morton, LKML, stable,
	lirongqing

On Thu, Apr 02, 2026 at 08:22:36AM +0800, Baoquan He wrote:
> On 04/01/26 at 05:47pm, Baoquan He wrote:
> > On 03/31/26 at 10:23pm, Uladzislau Rezki (Sony) wrote:
> > > drain_vmap_area_work() function can take >10ms to complete
> > > when there are many accumulated vmap areas in a system with
> > > high CPU count, causing workqueue watchdog warnings when run
> > > via schedule_work():
> > > 
> > >   workqueue: drain_vmap_area_work hogged CPU for >10000us
> > > 
> > > Move the top-level drain work to a dedicated WQ_UNBOUND
> > > workqueue so the scheduler can run this background work
> > > on any available CPU, improving responsiveness. Use the
> > > WQ_MEM_RECLAIM to ensure forward progress under memory
> > > pressure.
> > > 
> > > Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
> > > workqueue. This allows drain_vmap_work to wait for helpers
> > > completion without creating dependency on the same rescuer
> > > thread and avoid a potential parent/child deadlock.
> > ...snip...  
> > > @@ -2385,29 +2390,31 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
> > >  		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
> > >  		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
> > >  
> > > -		for_each_cpu(i, &purge_nodes) {
> > > -			vn = &vmap_nodes[i];
> > > +		for_each_vmap_node(vn) {
> > > +			vn->work_queued = false;
> > > +
> > > +			if (list_empty(&vn->purge_list))
> > > +				continue;
> > >  
> > >  			if (nr_purge_helpers > 0) {
> > >  				INIT_WORK(&vn->purge_work, purge_vmap_node);
> > > +				vn->work_queued = schedule_drain_vmap_work(
> > > +					READ_ONCE(drain_vmap_helpers_wq), &vn->purge_work);
> > 
> > The new schedule_drain_vmap_work() could submit all purge_work on one
> > CPU, do we need use queue_work_on(cpu, wq, work) instead?
> 
> Forgot the specified WQ_UNBOUND on alloc_workqueue(), sorry for the
> noise. Then this patch looks great to me.
> 
Right. When a worker is created for UNBOUND queue, its cpumask is
updated so it can be awaken on any CPU. Scheduler decides.

--
Uladzislau Rezki


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain
  2026-04-02  0:23 ` Baoquan He
@ 2026-04-02 16:06   ` Uladzislau Rezki
  0 siblings, 0 replies; 7+ messages in thread
From: Uladzislau Rezki @ 2026-04-02 16:06 UTC (permalink / raw)
  To: Baoquan He
  Cc: Uladzislau Rezki (Sony), linux-mm, Andrew Morton, LKML, stable,
	lirongqing

On Thu, Apr 02, 2026 at 08:23:10AM +0800, Baoquan He wrote:
> On 03/31/26 at 10:23pm, Uladzislau Rezki (Sony) wrote:
> > drain_vmap_area_work() function can take >10ms to complete
> > when there are many accumulated vmap areas in a system with
> > high CPU count, causing workqueue watchdog warnings when run
> > via schedule_work():
> > 
> >   workqueue: drain_vmap_area_work hogged CPU for >10000us
> > 
> > Move the top-level drain work to a dedicated WQ_UNBOUND
> > workqueue so the scheduler can run this background work
> > on any available CPU, improving responsiveness. Use the
> > WQ_MEM_RECLAIM to ensure forward progress under memory
> > pressure.
> > 
> > Move purge helpers to separate WQ_UNBOUND | WQ_MEM_RECLAIM
> > workqueue. This allows drain_vmap_work to wait for helpers
> > completion without creating dependency on the same rescuer
> > thread and avoid a potential parent/child deadlock.
> > 
> > Simplify purge helper scheduling by removing cpumask-based
> > iteration to iterating directly over vmap nodes checking
> > work_queued state.
> > 
> > Cc: stable@vger.kernel.org
> > Cc: lirongqing <lirongqing@baidu.com>
> > Fixes: 72210662c5a2 ("mm: vmalloc: offload free_vmap_area_lock lock")
> > Link: https://lore.kernel.org/all/20260319074307.2325-1-lirongqing@baidu.com/
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > ---
> >  mm/vmalloc.c | 79 ++++++++++++++++++++++++++++++++++------------------
> >  1 file changed, 52 insertions(+), 27 deletions(-)
> 
> LGTM,
> 
> Reviewed-by: Baoquan He <bhe@redhat.com>
> 
Thanks!

--
Uladzislau Rezki


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2026-04-02 16:06 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-31 20:23 [PATCH v3] mm/vmalloc: Use dedicated unbound workqueues for vmap drain Uladzislau Rezki (Sony)
2026-03-31 22:40 ` Andrew Morton
2026-04-01  9:47 ` Baoquan He
2026-04-02  0:22   ` Baoquan He
2026-04-02 16:05     ` Uladzislau Rezki
2026-04-02  0:23 ` Baoquan He
2026-04-02 16:06   ` Uladzislau Rezki

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox