From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: daniele.ceraolospurio@intel.com, carlos.santa@intel.com
Subject: [PATCH v2 12/22] drm/xe: Implement GuC submission backend ops for deadlines
Date: Sun, 4 Jan 2026 20:02:27 -0800 [thread overview]
Message-ID: <20260105040237.1307873-13-matthew.brost@intel.com> (raw)
In-Reply-To: <20260105040237.1307873-1-matthew.brost@intel.com>
Implement GuC submission backend ops for deadlines, which dynamically
raise or lower the priority of user queues with CAP_SYS_NICE and adjust
queue frequency upon deadline state change. The idea is that if a fence
on a queue is at risk of missing a deadline, we try to ensure this fence
completes as soon as possible.
v2:
- Disallow parallel / multi-q
- Tie removal of deadline to job's refcount
- Remove exit_deadline, rather use enum for control
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h | 2 +-
drivers/gpu/drm/xe/xe_guc_submit.c | 133 +++++++++++++++++--
drivers/gpu/drm/xe/xe_sched_job.c | 3 +
3 files changed, 126 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3b034e4b205..83dfb15aa4bd 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -31,7 +31,7 @@ struct xe_guc_exec_queue {
* a message needs to sent through the GPU scheduler but memory
* allocations are not allowed.
*/
-#define MAX_STATIC_MSG_TYPE 3
+#define MAX_STATIC_MSG_TYPE 4
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 26cd9fa6e2b3..1aca444faf8b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -20,6 +20,8 @@
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_deadline_mgr.h"
+#include "xe_deadline_mgr_types.h"
#include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
@@ -552,6 +554,35 @@ static const int xe_exec_queue_prio_to_guc[] = {
[XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
};
+static void deadline_policies(struct xe_guc *guc, struct xe_exec_queue *q,
+ enum xe_deadline_mgr_state state)
+{
+ struct exec_queue_policy policy;
+ enum xe_exec_queue_priority prio = q->sched_props.priority;
+ u32 slpc_exec_queue_freq_req = 0;
+
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q) &&
+ !xe_exec_queue_is_multi_queue_secondary(q));
+ xe_gt_assert(guc_to_gt(guc), state !=
+ XE_DEADLINE_MGR_STATE_UNSUPPORTED);
+
+ if (state == XE_DEADLINE_MGR_STATE_PRIO_BOOST &&
+ (q->flags & EXEC_QUEUE_FLAG_CAP_SYS_NICE))
+ prio = XE_EXEC_QUEUE_PRIORITY_HIGH;
+
+ if (state != XE_DEADLINE_MGR_STATE_NO_BOOST ||
+ (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY))
+ slpc_exec_queue_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
+ __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
+ __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
+ __guc_exec_queue_policy_add_slpc_exec_queue_freq_req(&policy,
+ slpc_exec_queue_freq_req);
+
+ xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
+ __guc_exec_queue_policy_action_size(&policy), 0, 0);
+}
+
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
@@ -1863,6 +1894,18 @@ static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q
guc_exec_queue_destroy_async(q);
}
+#define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
+#define SET_SCHED_PROPS 2
+#define SUSPEND 3
+#define RESUME 4
+#define SET_MULTI_QUEUE_PRIORITY 5
+#define ENTER_DEADLINE_FREQ 6
+#define ENTER_DEADLINE_PRIO 7
+#define EXIT_DEADLINE 8
+#define OPCODE_MASK 0xf
+#define MSG_LOCKED BIT(8)
+#define MSG_HEAD BIT(9)
+
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{
struct xe_exec_queue *q = msg->private_data;
@@ -2037,14 +2080,24 @@ static void __guc_exec_queue_process_msg_set_multi_queue_priority(struct xe_sche
kfree(msg);
}
-#define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
-#define SET_SCHED_PROPS 2
-#define SUSPEND 3
-#define RESUME 4
-#define SET_MULTI_QUEUE_PRIORITY 5
-#define OPCODE_MASK 0xf
-#define MSG_LOCKED BIT(8)
-#define MSG_HEAD BIT(9)
+static void
+__guc_exec_queue_process_msg_set_deadline_state(struct xe_sched_msg *msg,
+ unsigned int opcode)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ enum xe_deadline_mgr_state state;
+
+ if (opcode == EXIT_DEADLINE)
+ state = XE_DEADLINE_MGR_STATE_NO_BOOST;
+ else if (opcode == ENTER_DEADLINE_FREQ)
+ state = XE_DEADLINE_MGR_STATE_FREQ_BOOST;
+ else
+ state = XE_DEADLINE_MGR_STATE_PRIO_BOOST;
+
+ if (guc_exec_queue_allowed_to_change_state(q))
+ deadline_policies(guc, q, state);
+}
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg,
unsigned int opcode)
@@ -2069,6 +2122,11 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg,
case SET_MULTI_QUEUE_PRIORITY:
__guc_exec_queue_process_msg_set_multi_queue_priority(msg);
break;
+ case ENTER_DEADLINE_FREQ:
+ case ENTER_DEADLINE_PRIO:
+ case EXIT_DEADLINE:
+ __guc_exec_queue_process_msg_set_deadline_state(msg, opcode);
+ break;
default:
XE_WARN_ON("Unknown message type");
}
@@ -2232,9 +2290,11 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
return true;
}
-#define STATIC_MSG_CLEANUP 0
-#define STATIC_MSG_SUSPEND 1
-#define STATIC_MSG_RESUME 2
+#define STATIC_MSG_CLEANUP 0
+#define STATIC_MSG_SUSPEND 1
+#define STATIC_MSG_RESUME 2
+#define STATIC_MSG_SET_DEADLINE_STATE 3
+
static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
@@ -2402,6 +2462,55 @@ static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
}
+static void guc_exec_queue_set_deadline(struct xe_exec_queue *q,
+ struct dma_fence *fence,
+ ktime_t deadline)
+{
+ xe_deadline_mgr_add_deadline(&q->deadline_mgr, fence, deadline);
+}
+
+static void guc_exec_queue_set_deadline_state(struct xe_exec_queue *q,
+ enum xe_deadline_mgr_state state)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_msg *msg = q->guc->static_msgs +
+ STATIC_MSG_SET_DEADLINE_STATE;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ unsigned int opcode;
+
+ xe_gt_assert(guc_to_gt(guc), state !=
+ XE_DEADLINE_MGR_STATE_UNSUPPORTED);
+
+ switch (state) {
+ case XE_DEADLINE_MGR_STATE_NO_BOOST:
+ opcode = EXIT_DEADLINE;
+ break;
+ case XE_DEADLINE_MGR_STATE_FREQ_BOOST:
+ opcode = ENTER_DEADLINE_FREQ;
+ break;
+ case XE_DEADLINE_MGR_STATE_PRIO_BOOST:
+ opcode = ENTER_DEADLINE_PRIO;
+ break;
+ default:
+ drm_warn(&guc_to_xe(guc)->drm, "NOT POSSIBLE");
+ }
+
+ xe_sched_msg_scoped_guard(sched) {
+ if (!guc_exec_queue_try_add_msg(q, msg, opcode)) {
+ bool added;
+
+ /*
+ * A deadline state change has yet to be processed,
+ * removed it.
+ */
+ list_del_init(&msg->link);
+
+ added = guc_exec_queue_try_add_msg(q, msg, opcode);
+ xe_gt_assert(guc_to_gt(guc), added);
+ }
+ }
+}
+
/*
* All of these functions are an abstraction layer which other parts of Xe can
* use to trap into the GuC backend. All of these functions, aside from init,
@@ -2421,6 +2530,8 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.suspend_wait = guc_exec_queue_suspend_wait,
.resume = guc_exec_queue_resume,
.reset_status = guc_exec_queue_reset_status,
+ .set_deadline = guc_exec_queue_set_deadline,
+ .set_deadline_state = guc_exec_queue_set_deadline_state,
};
static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 6099b4445835..3d02f02ae9bb 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -9,6 +9,7 @@
#include <linux/dma-fence-chain.h>
#include <linux/slab.h>
+#include "xe_deadline_mgr.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_gt.h"
@@ -174,6 +175,8 @@ void xe_sched_job_destroy(struct kref *ref)
struct xe_device *xe = job_to_xe(job);
struct xe_exec_queue *q = job->q;
+ if (job->fence)
+ xe_deadline_mgr_remove_deadline(&q->deadline_mgr, job->fence);
xe_sched_job_free_fences(job);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
--
2.34.1
next prev parent reply other threads:[~2026-01-05 4:02 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-05 4:02 [PATCH v2 00/22] Fence deadlines in Xe Matthew Brost
2026-01-05 4:02 ` [PATCH v2 01/22] drm/xe: Add dedicated message lock Matthew Brost
2026-01-05 4:02 ` [PATCH v2 02/22] drm/xe: Add EXEC_QUEUE_FLAG_CAP_SYS_NICE Matthew Brost
2026-02-05 16:00 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 03/22] drm/xe: Store exec queue in hardware fence Matthew Brost
2026-02-05 16:02 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 04/22] drm/xe: Add deadline exec queue vfuncs Matthew Brost
2026-02-05 16:03 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 05/22] drm/xe: Export to_xe_hw_fence Matthew Brost
2026-01-05 4:02 ` [PATCH v2 06/22] drm/xe: Export xe_hw_fence_signaled Matthew Brost
2026-01-05 4:02 ` [PATCH v2 07/22] drm/xe: Implement deadline manager Matthew Brost
2026-01-05 4:02 ` [PATCH v2 08/22] drm/xe: Initialize deadline manager on exec queues Matthew Brost
2026-01-05 4:02 ` [PATCH v2 09/22] drm/xe: Stub out execlists deadline vfuncs as NOPs Matthew Brost
2026-01-05 4:02 ` [PATCH v2 10/22] drm/xe: Make scheduler message lock IRQ-safe Matthew Brost
2026-01-05 4:02 ` [PATCH v2 11/22] drm/xe: Support unstable opcodes for static scheduler messages Matthew Brost
2026-01-05 4:02 ` Matthew Brost [this message]
2026-01-10 10:48 ` [PATCH v2 12/22] drm/xe: Implement GuC submission backend ops for deadlines kernel test robot
2026-01-05 4:02 ` [PATCH v2 13/22] drm/xe: Enable deadlines on hardware fences Matthew Brost
2026-01-05 4:02 ` [PATCH v2 14/22] drm/xe: Fix Kconfig.profile newlines Matthew Brost
2026-02-05 16:06 ` Rodrigo Vivi
2026-01-05 4:02 ` [PATCH v2 15/22] drm/xe: Add deadline Kconfig options Matthew Brost
2026-01-05 4:02 ` [PATCH v2 16/22] drm/xe: Add exec queue deadline trace points Matthew Brost
2026-01-05 4:02 ` [PATCH v2 17/22] drm/xe: Add hw fence " Matthew Brost
2026-01-05 4:02 ` [PATCH v2 18/22] drm/xe: Add timestamp_ms to LRC snapshot Matthew Brost
2026-01-05 4:02 ` [PATCH v2 19/22] drm/xe: Enforce GuC static message defines Matthew Brost
2026-01-05 4:02 ` [PATCH v2 20/22] drm/xe: Document the deadline manager Matthew Brost
2026-01-05 4:02 ` [PATCH v2 21/22] drm/atomic: Export fence deadline helper for atomic commits Matthew Brost
2026-01-05 4:02 ` [PATCH v2 22/22] drm/i915/display: Use atomic helper to set plane fence deadlines Matthew Brost
2026-01-05 4:09 ` ✗ CI.checkpatch: warning for Fence deadlines in Xe (rev2) Patchwork
2026-01-05 4:10 ` ✓ CI.KUnit: success " Patchwork
2026-01-05 4:26 ` ✗ CI.checksparse: warning " Patchwork
2026-01-05 5:07 ` ✓ Xe.CI.BAT: success " Patchwork
2026-01-05 6:51 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260105040237.1307873-13-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=carlos.santa@intel.com \
--cc=daniele.ceraolospurio@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox