From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: lucas.demarchi@intel.com, Matthew Brost <matthew.brost@intel.com>
Subject: [PATCH v2 2/3] drm/xe: Use device, gt ordered work queues for resource cleanup
Date: Mon, 1 Apr 2024 15:19:12 -0700 [thread overview]
Message-ID: <20240401221913.139672-3-matthew.brost@intel.com> (raw)
In-Reply-To: <20240401221913.139672-1-matthew.brost@intel.com>
Resource cleanup is a device private operations with no expectation of
performance. Use device, gt ordered work queues to cleanup resources to
avoid grabbing locks on shared work queues.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_device_types.h | 5 ++++-
drivers/gpu/drm/xe/xe_execlist.c | 2 +-
drivers/gpu/drm/xe/xe_gt_types.h | 5 ++++-
drivers/gpu/drm/xe/xe_guc_submit.c | 2 +-
drivers/gpu/drm/xe/xe_vm.c | 4 ++--
5 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index c710cec835a7..d696aa2de8cc 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -366,7 +366,10 @@ struct xe_device {
/** @preempt_fence_wq: used to serialize preempt fences */
struct workqueue_struct *preempt_fence_wq;
- /** @ordered_wq: used to serialize compute mode resume */
+ /**
+ * @ordered_wq: used to serialize compute mode resume, cleanup
+ * resources
+ */
struct workqueue_struct *ordered_wq;
/** @unordered_wq: used to serialize unordered work, mostly display */
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index dece2785933c..1ae922509f05 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -393,7 +393,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
{
INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ queue_work(q->gt->ordered_wq, &q->execlist->fini_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 2143dffcaf11..cd22ad6e881a 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -268,7 +268,10 @@ struct xe_gt {
} acc_queue[NUM_ACC_QUEUE];
} usm;
- /** @ordered_wq: used to serialize GT resets and TDRs */
+ /**
+ * @ordered_wq: used to serialize GT resets and TDRs, clean up
+ * resources
+ */
struct workqueue_struct *ordered_wq;
/** @uc: micro controllers on the GT */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 13b7e195c7b5..e30ad9fccf6c 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1033,7 +1033,7 @@ static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT)
__guc_exec_queue_fini_async(&q->guc->fini_async);
else
- queue_work(system_wq, &q->guc->fini_async);
+ queue_work(q->gt->ordered_wq, &q->guc->fini_async);
}
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 8b32aa5003df..7808b540c013 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1005,7 +1005,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
- queue_work(system_unbound_wq, &vma->destroy_work);
+ queue_work(xe_vma_vm(vma)->xe->ordered_wq, &vma->destroy_work);
}
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
@@ -1625,7 +1625,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
- queue_work(system_unbound_wq, &vm->destroy_work);
+ queue_work(vm->xe->ordered_wq, &vm->destroy_work);
}
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
--
2.34.1
next prev parent reply other threads:[~2024-04-01 22:18 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-01 22:19 [PATCH v2 0/3] Rework work queue usage Matthew Brost
2024-04-01 22:19 ` [PATCH v2 1/3] drm/xe: Use ordered wq for preempt fence waiting Matthew Brost
2024-04-02 5:14 ` Lucas De Marchi
2024-04-03 14:13 ` Lucas De Marchi
2024-04-01 22:19 ` Matthew Brost [this message]
2024-04-01 22:19 ` [PATCH v2 3/3] drm/xe: Use ordered WQ for TLB invalidation fences Matthew Brost
2024-04-01 22:24 ` ✓ CI.Patch_applied: success for Rework work queue usage Patchwork
2024-04-01 22:24 ` ✓ CI.checkpatch: " Patchwork
2024-04-01 22:25 ` ✓ CI.KUnit: " Patchwork
2024-04-01 22:36 ` ✓ CI.Build: " Patchwork
2024-04-01 22:39 ` ✓ CI.Hooks: " Patchwork
2024-04-01 22:41 ` ✓ CI.checksparse: " Patchwork
2024-04-01 23:17 ` ✓ CI.BAT: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240401221913.139672-3-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=lucas.demarchi@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox