From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Subject: [RFC PATCH 05/12] drm/xe: Return fence from xe_sched_job_arm and adjust job references
Date: Sun, 15 Mar 2026 21:32:48 -0700 [thread overview]
Message-ID: <20260316043255.226352-6-matthew.brost@intel.com> (raw)
In-Reply-To: <20260316043255.226352-1-matthew.brost@intel.com>
Update xe_sched_job_arm to return a fence that the caller can use to
install dma-resv fences, signal syncobjs, or wait for job completion.
The caller is responsible for dropping the fence reference when it is
finished with it.
Update xe_sched_job_push to take a reference to the job owned by the
scheduler. The caller is now responsible for dropping the initial job
creation reference.
These semantics better align with DRM dep semantics.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/tests/xe_migrate.c | 10 ++++++----
drivers/gpu/drm/xe/xe_exec.c | 12 ++++++------
drivers/gpu/drm/xe/xe_gsc.c | 5 +++--
drivers/gpu/drm/xe/xe_gsc_submit.c | 5 +++--
drivers/gpu/drm/xe/xe_gt.c | 5 +++--
drivers/gpu/drm/xe/xe_migrate.c | 25 +++++++++++++++----------
drivers/gpu/drm/xe/xe_oa.c | 5 +++--
drivers/gpu/drm/xe/xe_pxp_submit.c | 10 ++++++----
drivers/gpu/drm/xe/xe_sched_job.c | 5 +++--
drivers/gpu/drm/xe/xe_sched_job.h | 2 +-
10 files changed, 49 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 34e2f0f4631f..257b373fcaec 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -50,9 +50,10 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
return PTR_ERR(job);
}
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
if (sanity_fence_failed(xe, fence, str, test))
return -ETIMEDOUT;
@@ -463,10 +464,11 @@ static struct dma_fence *blt_copy(struct xe_tile *tile,
xe_sched_job_add_migrate_flush(job, flush_flags);
mutex_lock(&m->job_mutex);
- xe_sched_job_arm(job);
dma_fence_put(fence);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
dma_fence_put(m->fence);
m->fence = dma_fence_get(fence);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index e05dabfcd43c..0c0ddf500981 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -124,6 +124,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
+ struct dma_fence *fence;
bool write_locked;
int err = 0;
struct xe_hw_engine_group *group;
@@ -320,19 +321,19 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
* Point of no return, if we error after this point just set an error on
* the job and let the DRM scheduler / backend clean up the job.
*/
- xe_sched_job_arm(job);
+ fence = xe_sched_job_arm(job);
if (!xe_vm_in_lr_mode(vm))
- drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
+ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, fence,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
for (i = 0; i < num_syncs; i++) {
- xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_sched_job_init_user_fence(job, &syncs[i]);
}
if (!xe_vm_in_lr_mode(vm))
- xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
+ xe_exec_queue_last_fence_set(q, vm, fence);
xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm);
@@ -349,8 +350,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!xe_vm_in_lr_mode(vm))
xe_svm_notifier_unlock(vm);
err_put_job:
- if (err)
- xe_sched_job_put(job);
+ xe_sched_job_put(job);
err_exec:
if (!xe_vm_in_lr_mode(vm))
xe_validation_ctx_fini(&ctx);
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index e5c234f3d795..10168c27fc22 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -92,9 +92,10 @@ static int emit_gsc_upload(struct xe_gsc *gsc)
return PTR_ERR(job);
}
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
timeout = dma_fence_wait_timeout(fence, false, HZ);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c
index 08082b596501..2d506fb66017 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.c
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.c
@@ -202,9 +202,10 @@ int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
return PTR_ERR(job);
}
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
timeout = dma_fence_wait_timeout(fence, false, HZ);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 79f4128fe325..10885ba7c2d2 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -182,9 +182,10 @@ static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
if (IS_ERR(job))
return PTR_ERR(job);
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index fc918b4fba54..519f7c70abfb 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1022,10 +1022,11 @@ static struct dma_fence *__xe_migrate_copy(struct xe_migrate *m,
}
mutex_lock(&m->job_mutex);
- xe_sched_job_arm(job);
dma_fence_put(fence);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
dma_fence_put(m->fence);
m->fence = dma_fence_get(fence);
@@ -1438,10 +1439,11 @@ struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_off
DMA_RESV_USAGE_BOOKKEEP));
scoped_guard(mutex, &m->job_mutex) {
- xe_sched_job_arm(job);
dma_fence_put(fence);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
dma_fence_put(m->fence);
m->fence = dma_fence_get(fence);
@@ -1676,10 +1678,11 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
}
mutex_lock(&m->job_mutex);
- xe_sched_job_arm(job);
dma_fence_put(fence);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
dma_fence_put(m->fence);
m->fence = dma_fence_get(fence);
@@ -1996,9 +1999,10 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
if (is_migrate)
mutex_lock(&m->job_mutex);
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
if (is_migrate)
mutex_unlock(&m->job_mutex);
@@ -2282,9 +2286,10 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
}
mutex_lock(&m->job_mutex);
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
dma_fence_put(m->fence);
m->fence = dma_fence_get(fence);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index c176a61febb2..8de8952a2ca7 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -653,9 +653,10 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
}
}
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
xe_oa_unlock_vma(q);
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
index e60526e30030..5af384acae82 100644
--- a/drivers/gpu/drm/xe/xe_pxp_submit.c
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -315,9 +315,10 @@ int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id)
if (IS_ERR(job))
return PTR_ERR(job);
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
timeout = dma_fence_wait_timeout(fence, false, HZ);
@@ -378,9 +379,10 @@ static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
if (IS_ERR(job))
return PTR_ERR(job);
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
+ fence = xe_sched_job_arm(job);
+ dma_fence_get(fence);
xe_sched_job_push(job);
+ xe_sched_job_put(job);
timeout = dma_fence_wait_timeout(fence, false, HZ);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index ae5b38b2a884..99f11bb4d2b9 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -243,7 +243,7 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
xe_lrc_seqno(lrc));
}
-void xe_sched_job_arm(struct xe_sched_job *job)
+struct dma_fence *xe_sched_job_arm(struct xe_sched_job *job)
{
struct xe_exec_queue *q = job->q;
struct dma_fence *fence, *prev;
@@ -288,6 +288,8 @@ void xe_sched_job_arm(struct xe_sched_job *job)
job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */
drm_sched_job_arm(&job->drm);
+
+ return &job->drm.s_fence->finished;
}
void xe_sched_job_push(struct xe_sched_job *job)
@@ -295,7 +297,6 @@ void xe_sched_job_push(struct xe_sched_job *job)
xe_sched_job_get(job);
trace_xe_sched_job_exec(job);
drm_sched_entity_push_job(&job->drm);
- xe_sched_job_put(job);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index 1c1cb44216c3..a39cc4ab980b 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -55,7 +55,7 @@ static inline bool xe_sched_job_is_error(struct xe_sched_job *job)
bool xe_sched_job_started(struct xe_sched_job *job);
bool xe_sched_job_completed(struct xe_sched_job *job);
-void xe_sched_job_arm(struct xe_sched_job *job);
+struct dma_fence *xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
void xe_sched_job_init_user_fence(struct xe_sched_job *job,
--
2.34.1
next prev parent reply other threads:[~2026-03-16 4:33 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-16 4:32 [RFC PATCH 00/12] Introduce DRM dep queue Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 01/12] workqueue: Add interface to teach lockdep to warn on reclaim violations Matthew Brost
2026-03-25 15:59 ` Tejun Heo
2026-03-26 1:49 ` Matthew Brost
2026-03-26 2:19 ` Tejun Heo
2026-03-27 4:33 ` Matthew Brost
2026-03-27 17:25 ` Tejun Heo
2026-03-16 4:32 ` [RFC PATCH 02/12] drm/dep: Add DRM dependency queue layer Matthew Brost
2026-03-16 9:16 ` Boris Brezillon
2026-03-17 5:22 ` Matthew Brost
2026-03-17 8:48 ` Boris Brezillon
2026-03-16 10:25 ` Danilo Krummrich
2026-03-17 5:10 ` Matthew Brost
2026-03-17 12:19 ` Danilo Krummrich
2026-03-18 23:02 ` Matthew Brost
2026-03-17 2:47 ` Daniel Almeida
2026-03-17 5:45 ` Matthew Brost
2026-03-17 7:17 ` Miguel Ojeda
2026-03-17 8:26 ` Matthew Brost
2026-03-17 12:04 ` Daniel Almeida
2026-03-17 19:41 ` Miguel Ojeda
2026-03-23 17:31 ` Matthew Brost
2026-03-23 17:42 ` Miguel Ojeda
2026-03-17 18:14 ` Matthew Brost
2026-03-17 19:48 ` Daniel Almeida
2026-03-17 20:43 ` Boris Brezillon
2026-03-18 22:40 ` Matthew Brost
2026-03-19 9:57 ` Boris Brezillon
2026-03-22 6:43 ` Matthew Brost
2026-03-23 7:58 ` Matthew Brost
2026-03-23 10:06 ` Boris Brezillon
2026-03-23 17:11 ` Matthew Brost
2026-03-17 12:31 ` Danilo Krummrich
2026-03-17 14:25 ` Daniel Almeida
2026-03-17 14:33 ` Danilo Krummrich
2026-03-18 22:50 ` Matthew Brost
2026-03-17 8:47 ` Christian König
2026-03-17 14:55 ` Boris Brezillon
2026-03-18 23:28 ` Matthew Brost
2026-03-19 9:11 ` Boris Brezillon
2026-03-23 4:50 ` Matthew Brost
2026-03-23 9:55 ` Boris Brezillon
2026-03-23 17:08 ` Matthew Brost
2026-03-23 18:38 ` Matthew Brost
2026-03-24 9:23 ` Boris Brezillon
2026-03-24 16:06 ` Matthew Brost
2026-03-25 2:33 ` Matthew Brost
2026-03-24 8:49 ` Boris Brezillon
2026-03-24 16:51 ` Matthew Brost
2026-03-17 16:30 ` Shashank Sharma
2026-03-16 4:32 ` [RFC PATCH 03/12] drm/xe: Use WQ_MEM_WARN_ON_RECLAIM on all workqueues in the reclaim path Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 04/12] drm/xe: Issue GGTT invalidation under lock in ggtt_node_remove Matthew Brost
2026-03-26 5:45 ` Bhadane, Dnyaneshwar
2026-03-16 4:32 ` Matthew Brost [this message]
2026-03-16 4:32 ` [RFC PATCH 06/12] drm/xe: Convert to DRM dep queue scheduler layer Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 07/12] drm/xe: Make scheduler message lock IRQ-safe Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 08/12] drm/xe: Rework exec queue object on top of DRM dep Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 09/12] drm/xe: Enable IRQ job put in " Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 10/12] drm/xe: Use DRM dep queue kill semantics Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 11/12] accel/amdxdna: Convert to drm_dep scheduler layer Matthew Brost
2026-03-16 4:32 ` [RFC PATCH 12/12] drm/panthor: " Matthew Brost
2026-03-16 4:52 ` ✗ CI.checkpatch: warning for Introduce DRM dep queue Patchwork
2026-03-16 4:53 ` ✓ CI.KUnit: success " Patchwork
2026-03-16 5:28 ` ✓ Xe.CI.BAT: " Patchwork
2026-03-16 8:09 ` ✗ Xe.CI.FULL: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260316043255.226352-6-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox