From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: Matthew Brost <matthew.brost@intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
<daniele.ceraolospurio@intel.com>, <carlos.santa@intel.com>
Subject: Re: [PATCH v2 03/22] drm/xe: Store exec queue in hardware fence
Date: Thu, 5 Feb 2026 11:02:38 -0500 [thread overview]
Message-ID: <aYS_Hk5sHqcA9r4i@intel.com> (raw)
In-Reply-To: <20260105040237.1307873-4-matthew.brost@intel.com>
On Sun, Jan 04, 2026 at 08:02:18PM -0800, Matthew Brost wrote:
> Enable hardware fences to set deadlines for exec queues.
probably worth expand this message to explicitly say that
this is to be used by a follow up work that is introducing
the deadlines...
the patch itself looks good
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
>
> v2:
> - Fix kernel doc (CI)
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_hw_fence.c | 4 +++-
> drivers/gpu/drm/xe/xe_hw_fence.h | 2 +-
> drivers/gpu/drm/xe/xe_hw_fence_types.h | 6 ++++++
> drivers/gpu/drm/xe/xe_lrc.c | 6 ++++--
> drivers/gpu/drm/xe/xe_lrc.h | 3 ++-
> drivers/gpu/drm/xe/xe_sched_job.c | 2 +-
> 6 files changed, 17 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
> index f6057456e460..5995bf095843 100644
> --- a/drivers/gpu/drm/xe/xe_hw_fence.c
> +++ b/drivers/gpu/drm/xe/xe_hw_fence.c
> @@ -242,6 +242,7 @@ void xe_hw_fence_free(struct dma_fence *fence)
> * xe_hw_fence_init() - Initialize an hw fence.
> * @fence: Pointer to the fence to initialize.
> * @ctx: Pointer to the struct xe_hw_fence_ctx fence context.
> + * @q: Pointer to exec queue tied to the fence.
> * @seqno_map: Pointer to the map into where the seqno is blitted.
> *
> * Initializes a pre-allocated hw fence.
> @@ -249,12 +250,13 @@ void xe_hw_fence_free(struct dma_fence *fence)
> * dma-fence refcounting.
> */
> void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
> - struct iosys_map seqno_map)
> + struct xe_exec_queue *q, struct iosys_map seqno_map)
> {
> struct xe_hw_fence *hw_fence =
> container_of(fence, typeof(*hw_fence), dma);
>
> hw_fence->xe = gt_to_xe(ctx->gt);
> + hw_fence->q = q;
> snprintf(hw_fence->name, sizeof(hw_fence->name), "%s", ctx->name);
> hw_fence->seqno_map = seqno_map;
> INIT_LIST_HEAD(&hw_fence->irq_link);
> diff --git a/drivers/gpu/drm/xe/xe_hw_fence.h b/drivers/gpu/drm/xe/xe_hw_fence.h
> index f13a1c4982c7..7a8678c881d8 100644
> --- a/drivers/gpu/drm/xe/xe_hw_fence.h
> +++ b/drivers/gpu/drm/xe/xe_hw_fence.h
> @@ -29,5 +29,5 @@ struct dma_fence *xe_hw_fence_alloc(void);
> void xe_hw_fence_free(struct dma_fence *fence);
>
> void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
> - struct iosys_map seqno_map);
> + struct xe_exec_queue *q, struct iosys_map seqno_map);
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_hw_fence_types.h b/drivers/gpu/drm/xe/xe_hw_fence_types.h
> index 58a8d09afe5c..052bbab1fad6 100644
> --- a/drivers/gpu/drm/xe/xe_hw_fence_types.h
> +++ b/drivers/gpu/drm/xe/xe_hw_fence_types.h
> @@ -13,6 +13,7 @@
> #include <linux/spinlock.h>
>
> struct xe_device;
> +struct xe_exec_queue;
> struct xe_gt;
>
> /**
> @@ -64,6 +65,11 @@ struct xe_hw_fence {
> struct dma_fence dma;
> /** @xe: Xe device for hw fence driver name */
> struct xe_device *xe;
> + /**
> + * @q: Exec queue which fence is tied too, not ref counted, lookup
> + * protected by fence lock.
> + */
> + struct xe_exec_queue *q;
> /** @name: name of hardware fence context */
> char name[MAX_FENCE_NAME_LEN];
> /** @seqno_map: I/O map for seqno */
> diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
> index 70eae7d03a27..eccc7f2642bf 100644
> --- a/drivers/gpu/drm/xe/xe_lrc.c
> +++ b/drivers/gpu/drm/xe/xe_lrc.c
> @@ -1783,15 +1783,17 @@ void xe_lrc_free_seqno_fence(struct dma_fence *fence)
> /**
> * xe_lrc_init_seqno_fence() - Initialize an lrc seqno fence.
> * @lrc: Pointer to the lrc.
> + * @q: Pointner to exec queue.
> * @fence: Pointer to the fence to initialize.
> *
> * Initializes a pre-allocated lrc seqno fence.
> * After initialization, the fence is subject to normal
> * dma-fence refcounting.
> */
> -void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence)
> +void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct xe_exec_queue *q,
> + struct dma_fence *fence)
> {
> - xe_hw_fence_init(fence, &lrc->fence_ctx, __xe_lrc_seqno_map(lrc));
> + xe_hw_fence_init(fence, &lrc->fence_ctx, q, __xe_lrc_seqno_map(lrc));
> }
>
> s32 xe_lrc_seqno(struct xe_lrc *lrc)
> diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
> index 8acf85273c1a..3d72b4c0da8e 100644
> --- a/drivers/gpu/drm/xe/xe_lrc.h
> +++ b/drivers/gpu/drm/xe/xe_lrc.h
> @@ -118,7 +118,8 @@ u64 xe_lrc_descriptor(struct xe_lrc *lrc);
> u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc);
> struct dma_fence *xe_lrc_alloc_seqno_fence(void);
> void xe_lrc_free_seqno_fence(struct dma_fence *fence);
> -void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence);
> +void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct xe_exec_queue *q,
> + struct dma_fence *fence);
> s32 xe_lrc_seqno(struct xe_lrc *lrc);
>
> u32 xe_lrc_start_seqno_ggtt_addr(struct xe_lrc *lrc);
> diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
> index cb674a322113..6099b4445835 100644
> --- a/drivers/gpu/drm/xe/xe_sched_job.c
> +++ b/drivers/gpu/drm/xe/xe_sched_job.c
> @@ -270,7 +270,7 @@ void xe_sched_job_arm(struct xe_sched_job *job)
> struct dma_fence_chain *chain;
>
> fence = job->ptrs[i].lrc_fence;
> - xe_lrc_init_seqno_fence(q->lrc[i], fence);
> + xe_lrc_init_seqno_fence(q->lrc[i], q, fence);
> job->ptrs[i].lrc_fence = NULL;
> if (!i) {
> job->lrc_seqno = fence->seqno;
> --
> 2.34.1
>
next prev parent reply other threads:[~2026-02-05 16:03 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-05 4:02 [PATCH v2 00/22] Fence deadlines in Xe Matthew Brost
2026-01-05 4:02 ` [PATCH v2 01/22] drm/xe: Add dedicated message lock Matthew Brost
2026-01-05 4:02 ` [PATCH v2 02/22] drm/xe: Add EXEC_QUEUE_FLAG_CAP_SYS_NICE Matthew Brost
2026-02-05 16:00 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 03/22] drm/xe: Store exec queue in hardware fence Matthew Brost
2026-02-05 16:02 ` Rodrigo Vivi [this message]
2026-01-05 4:02 ` [PATCH v2 04/22] drm/xe: Add deadline exec queue vfuncs Matthew Brost
2026-02-05 16:03 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 05/22] drm/xe: Export to_xe_hw_fence Matthew Brost
2026-01-05 4:02 ` [PATCH v2 06/22] drm/xe: Export xe_hw_fence_signaled Matthew Brost
2026-01-05 4:02 ` [PATCH v2 07/22] drm/xe: Implement deadline manager Matthew Brost
2026-01-05 4:02 ` [PATCH v2 08/22] drm/xe: Initialize deadline manager on exec queues Matthew Brost
2026-01-05 4:02 ` [PATCH v2 09/22] drm/xe: Stub out execlists deadline vfuncs as NOPs Matthew Brost
2026-01-05 4:02 ` [PATCH v2 10/22] drm/xe: Make scheduler message lock IRQ-safe Matthew Brost
2026-01-05 4:02 ` [PATCH v2 11/22] drm/xe: Support unstable opcodes for static scheduler messages Matthew Brost
2026-01-05 4:02 ` [PATCH v2 12/22] drm/xe: Implement GuC submission backend ops for deadlines Matthew Brost
2026-01-10 10:48 ` kernel test robot
2026-01-05 4:02 ` [PATCH v2 13/22] drm/xe: Enable deadlines on hardware fences Matthew Brost
2026-01-05 4:02 ` [PATCH v2 14/22] drm/xe: Fix Kconfig.profile newlines Matthew Brost
2026-02-05 16:06 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 15/22] drm/xe: Add deadline Kconfig options Matthew Brost
2026-01-05 4:02 ` [PATCH v2 16/22] drm/xe: Add exec queue deadline trace points Matthew Brost
2026-01-05 4:02 ` [PATCH v2 17/22] drm/xe: Add hw fence " Matthew Brost
2026-01-05 4:02 ` [PATCH v2 18/22] drm/xe: Add timestamp_ms to LRC snapshot Matthew Brost
2026-01-05 4:02 ` [PATCH v2 19/22] drm/xe: Enforce GuC static message defines Matthew Brost
2026-01-05 4:02 ` [PATCH v2 20/22] drm/xe: Document the deadline manager Matthew Brost
2026-01-05 4:02 ` [PATCH v2 21/22] drm/atomic: Export fence deadline helper for atomic commits Matthew Brost
2026-01-05 4:02 ` [PATCH v2 22/22] drm/i915/display: Use atomic helper to set plane fence deadlines Matthew Brost
2026-01-05 4:09 ` ✗ CI.checkpatch: warning for Fence deadlines in Xe (rev2) Patchwork
2026-01-05 4:10 ` ✓ CI.KUnit: success " Patchwork
2026-01-05 4:26 ` ✗ CI.checksparse: warning " Patchwork
2026-01-05 5:07 ` ✓ Xe.CI.BAT: success " Patchwork
2026-01-05 6:51 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aYS_Hk5sHqcA9r4i@intel.com \
--to=rodrigo.vivi@intel.com \
--cc=carlos.santa@intel.com \
--cc=daniele.ceraolospurio@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox