Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: Marco Crivellari <marco.crivellari@suse.com>
Cc: <linux-kernel@vger.kernel.org>, <intel-xe@lists.freedesktop.org>,
	<dri-devel@lists.freedesktop.org>, Tejun Heo <tj@kernel.org>,
	Lai Jiangshan <jiangshanlai@gmail.com>,
	Frederic Weisbecker <frederic@kernel.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Michal Hocko <mhocko@suse.com>,
	Thomas Hellstrom <thomas.hellstrom@linux.intel.com>,
	"David Airlie" <airlied@gmail.com>,
	Simona Vetter <simona@ffwll.ch>
Subject: Re: [PATCH v3 1/2] drm/xe: replace use of system_unbound_wq with system_dfl_wq
Date: Fri, 30 Jan 2026 17:03:56 -0500	[thread overview]
Message-ID: <aX0qzAHieOEAyD5w@intel.com> (raw)
In-Reply-To: <20260124145401.44992-2-marco.crivellari@suse.com>

On Sat, Jan 24, 2026 at 03:54:00PM +0100, Marco Crivellari wrote:
> This patch continues the effort to refactor workqueue APIs, which has begun
> with the changes introducing new workqueues and a new alloc_workqueue flag:
> 
>    commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
>    commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
> 
> The point of the refactoring is to eventually alter the default behavior of
> workqueues to become unbound by default so that their workload placement is
> optimized by the scheduler.
> 
> Before that to happen, workqueue users must be converted to the better named
> new workqueues with no intended behaviour changes:
> 
>    system_wq -> system_percpu_wq
>    system_unbound_wq -> system_dfl_wq
> 
> This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
> removed in the future.

Thanks for improving the message.

Could you please rebase on top of latest drm-tip. It is not applying as is.

Thanks,
Rodrigo.

> 
> Link: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/
> Suggested-by: Tejun Heo <tj@kernel.org>
> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>

> ---
>  drivers/gpu/drm/xe/xe_devcoredump.c | 2 +-
>  drivers/gpu/drm/xe/xe_execlist.c    | 2 +-
>  drivers/gpu/drm/xe/xe_guc_ct.c      | 4 ++--
>  drivers/gpu/drm/xe/xe_oa.c          | 2 +-
>  drivers/gpu/drm/xe/xe_vm.c          | 4 ++--
>  5 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
> index d444eda65ca6..6b47aaf8cc9f 100644
> --- a/drivers/gpu/drm/xe/xe_devcoredump.c
> +++ b/drivers/gpu/drm/xe/xe_devcoredump.c
> @@ -362,7 +362,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
>  
>  	xe_engine_snapshot_capture_for_queue(q);
>  
> -	queue_work(system_unbound_wq, &ss->work);
> +	queue_work(system_dfl_wq, &ss->work);
>  
>  	xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
>  	dma_fence_end_signalling(cookie);
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> index 769d05517f93..730b600a5803 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -422,7 +422,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
>  static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
>  {
>  	INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
> -	queue_work(system_unbound_wq, &q->execlist->destroy_async);
> +	queue_work(system_dfl_wq, &q->execlist->destroy_async);
>  }
>  
>  static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> index a5019d1e741b..351c9986f6cf 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> @@ -558,7 +558,7 @@ static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
>  	spin_lock_irq(&ct->dead.lock);
>  	if (ct->dead.reason) {
>  		ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
> -		queue_work(system_unbound_wq, &ct->dead.worker);
> +		queue_work(system_dfl_wq, &ct->dead.worker);
>  	}
>  	spin_unlock_irq(&ct->dead.lock);
>  #endif
> @@ -2093,7 +2093,7 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
>  
>  	spin_unlock_irqrestore(&ct->dead.lock, flags);
>  
> -	queue_work(system_unbound_wq, &(ct)->dead.worker);
> +	queue_work(system_dfl_wq, &(ct)->dead.worker);
>  }
>  
>  static void ct_dead_print(struct xe_dead_ct *dead)
> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
> index f8bb28ab8124..c8e65e38081c 100644
> --- a/drivers/gpu/drm/xe/xe_oa.c
> +++ b/drivers/gpu/drm/xe/xe_oa.c
> @@ -969,7 +969,7 @@ static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
>  	struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
>  
>  	INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
> -	queue_delayed_work(system_unbound_wq, &ofence->work,
> +	queue_delayed_work(system_dfl_wq, &ofence->work,
>  			   usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
>  	dma_fence_put(fence);
>  }
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 095bb197e8b0..ddf0a9567614 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1091,7 +1091,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
>  	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
>  
>  	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
> -	queue_work(system_unbound_wq, &vma->destroy_work);
> +	queue_work(system_dfl_wq, &vma->destroy_work);
>  }
>  
>  static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
> @@ -1854,7 +1854,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
>  	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
>  
>  	/* To destroy the VM we need to be able to sleep */
> -	queue_work(system_unbound_wq, &vm->destroy_work);
> +	queue_work(system_dfl_wq, &vm->destroy_work);
>  }
>  
>  struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
> -- 
> 2.52.0
> 

  reply	other threads:[~2026-01-30 22:04 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-24 14:53 [PATCH v3 0/2] replace system_unbound_wq, add WQ_PERCPU to alloc_workqueue Marco Crivellari
2026-01-24 14:54 ` [PATCH v3 1/2] drm/xe: replace use of system_unbound_wq with system_dfl_wq Marco Crivellari
2026-01-30 22:03   ` Rodrigo Vivi [this message]
2026-02-02  9:16     ` Marco Crivellari
2026-01-24 14:54 ` [PATCH v3 2/2] drm/xe: add WQ_PERCPU to alloc_workqueue users Marco Crivellari

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aX0qzAHieOEAyD5w@intel.com \
    --to=rodrigo.vivi@intel.com \
    --cc=airlied@gmail.com \
    --cc=bigeasy@linutronix.de \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=frederic@kernel.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=jiangshanlai@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=marco.crivellari@suse.com \
    --cc=mhocko@suse.com \
    --cc=simona@ffwll.ch \
    --cc=thomas.hellstrom@linux.intel.com \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox