From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: daniele.ceraolospurio@intel.com, carlos.santa@intel.com
Subject: [RFC PATCH 10/13] drm/xe: Implement GuC submission backend ops for deadlines
Date: Wed, 24 Dec 2025 17:17:31 -0800 [thread overview]
Message-ID: <20251225011734.341683-11-matthew.brost@intel.com> (raw)
In-Reply-To: <20251225011734.341683-1-matthew.brost@intel.com>
Implement GuC submission backend ops for deadlines, which dynamically
raise or lower the priority of user queues with CAP_SYS_NICE and adjust
queue frequency upon deadline entry or exit. The idea is that if a fence
on a queue is at risk of missing a deadline, we try to ensure this fence
completes as soon as possible.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h | 2 +-
drivers/gpu/drm/xe/xe_guc_submit.c | 110 ++++++++++++++++++-
2 files changed, 108 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3b034e4b205..fcc7bca2405a 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -31,7 +31,7 @@ struct xe_guc_exec_queue {
* a message needs to sent through the GPU scheduler but memory
* allocations are not allowed.
*/
-#define MAX_STATIC_MSG_TYPE 3
+#define MAX_STATIC_MSG_TYPE 5
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 76460b8ab407..791c64d6397f 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -20,6 +20,7 @@
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_deadline_mgr.h"
#include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
@@ -552,6 +553,26 @@ static const int xe_exec_queue_prio_to_guc[] = {
[XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
};
+static void deadline_policies(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct exec_queue_policy policy;
+ enum xe_exec_queue_priority prio =
+ q->flags & EXEC_QUEUE_FLAG_CAP_SYS_NICE ?
+ XE_EXEC_QUEUE_PRIORITY_HIGH : q->sched_props.priority;
+ u32 slpc_exec_queue_freq_req = SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q) &&
+ !xe_exec_queue_is_multi_queue_secondary(q));
+
+ __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
+ __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
+ __guc_exec_queue_policy_add_slpc_exec_queue_freq_req(&policy,
+ slpc_exec_queue_freq_req);
+
+ xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
+ __guc_exec_queue_policy_action_size(&policy), 0, 0);
+}
+
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
@@ -1249,6 +1270,7 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
trace_xe_sched_job_free(job);
+ xe_deadline_mgr_remove_deadline(&job->q->deadline_mgr, job->fence);
xe_sched_job_put(job);
}
@@ -2037,11 +2059,39 @@ static void __guc_exec_queue_process_msg_set_multi_queue_priority(struct xe_sche
kfree(msg);
}
+static void __guc_exec_queue_process_msg_enter_deadline(struct xe_sched_msg *msg)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ /* XXX: Rethink multi-q implementation */
+ if (xe_exec_queue_is_multi_queue_secondary(q))
+ q = xe_exec_queue_multi_queue_primary(q);
+
+ if (guc_exec_queue_allowed_to_change_state(q))
+ deadline_policies(guc, q);
+}
+
+static void __guc_exec_queue_process_msg_exit_deadline(struct xe_sched_msg *msg)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ /* XXX: Rethink multi-q implementation */
+ if (xe_exec_queue_is_multi_queue_secondary(q))
+ q = xe_exec_queue_multi_queue_primary(q);
+
+ if (guc_exec_queue_allowed_to_change_state(q))
+ init_policies(guc, q);
+}
+
#define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
#define SET_SCHED_PROPS 2
#define SUSPEND 3
#define RESUME 4
#define SET_MULTI_QUEUE_PRIORITY 5
+#define ENTER_DEADLINE 6
+#define EXIT_DEADLINE 7
#define OPCODE_MASK 0xf
#define MSG_LOCKED BIT(8)
#define MSG_HEAD BIT(9)
@@ -2068,6 +2118,12 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
case SET_MULTI_QUEUE_PRIORITY:
__guc_exec_queue_process_msg_set_multi_queue_priority(msg);
break;
+ case ENTER_DEADLINE:
+ __guc_exec_queue_process_msg_enter_deadline(msg);
+ break;
+ case EXIT_DEADLINE:
+ __guc_exec_queue_process_msg_exit_deadline(msg);
+ break;
default:
XE_WARN_ON("Unknown message type");
}
@@ -2231,9 +2287,11 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
return true;
}
-#define STATIC_MSG_CLEANUP 0
-#define STATIC_MSG_SUSPEND 1
-#define STATIC_MSG_RESUME 2
+#define STATIC_MSG_CLEANUP 0
+#define STATIC_MSG_SUSPEND 1
+#define STATIC_MSG_RESUME 2
+#define STATIC_MSG_ENTER_DEADLINE 3
+#define STATIC_MSG_EXIT_DEADLINE 4
static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
@@ -2401,6 +2459,49 @@ static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
}
+static void guc_exec_queue_set_deadline(struct xe_exec_queue *q,
+ struct dma_fence *fence,
+ ktime_t deadline)
+{
+ xe_deadline_mgr_add_deadline(&q->deadline_mgr, fence, deadline);
+}
+
+static void guc_exec_queue_enter_deadline(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_msg *msg = q->guc->static_msgs +
+ STATIC_MSG_ENTER_DEADLINE;
+
+ xe_sched_msg_lock(sched) {
+ if (!guc_exec_queue_try_add_msg(q, msg, ENTER_DEADLINE)) {
+ /*
+ * Corner case where a deadline enter + exit are in
+ * message list, delete the exit deadline message.
+ */
+ msg = q->guc->static_msgs + STATIC_MSG_EXIT_DEADLINE;
+ list_del_init(&msg->link);
+ }
+ }
+}
+
+static void guc_exec_queue_exit_deadline(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_msg *msg = q->guc->static_msgs +
+ STATIC_MSG_EXIT_DEADLINE;
+
+ xe_sched_msg_lock(sched) {
+ if (!guc_exec_queue_try_add_msg(q, msg, EXIT_DEADLINE)) {
+ /*
+ * Corner case where a deadline exit + enter are in
+ * message list, delete the enter deadline message.
+ */
+ msg = q->guc->static_msgs + STATIC_MSG_ENTER_DEADLINE;
+ list_del_init(&msg->link);
+ }
+ }
+}
+
/*
* All of these functions are an abstraction layer which other parts of Xe can
* use to trap into the GuC backend. All of these functions, aside from init,
@@ -2420,6 +2521,9 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.suspend_wait = guc_exec_queue_suspend_wait,
.resume = guc_exec_queue_resume,
.reset_status = guc_exec_queue_reset_status,
+ .set_deadline = guc_exec_queue_set_deadline,
+ .enter_deadline = guc_exec_queue_enter_deadline,
+ .exit_deadline = guc_exec_queue_exit_deadline,
};
static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
--
2.34.1
next prev parent reply other threads:[~2025-12-25 1:18 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-25 1:17 [RFC PATCH 00/13] Fence deadlines in Xe Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 01/13] drm/xe: Add dedicated message lock Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 02/13] drm/xe: Add EXEC_QUEUE_FLAG_CAP_SYS_NICE Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 03/13] drm/xe: Store exec queue in hardware fence Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 04/13] drm/xe: Add deadline exec queue vfuncs Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 05/13] drm/xe: Export to_xe_hw_fence Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 06/13] drm/xe: Add deadline manager Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 07/13] drm/xe: Add deadline manager to user exec queues Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 08/13] drm/xe: Stub out execlists deadline vfuncs as NOPs Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 09/13] drm/xe: Make scheduler message lock IRQ-safe Matthew Brost
2025-12-25 1:17 ` Matthew Brost [this message]
2025-12-25 1:17 ` [RFC PATCH 11/13] drm/xe: Enable deadlines on hardware fences Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 12/13] drm/xe: Add CONFIG_DRM_XE_DEADLINE_WINDOW Matthew Brost
2025-12-25 1:17 ` [RFC PATCH 13/13] drm/xe: Add exec queue deadline trace points Matthew Brost
2025-12-25 1:25 ` ✗ CI.checkpatch: warning for Fence deadlines in Xe Patchwork
2025-12-25 1:26 ` ✓ CI.KUnit: success " Patchwork
2025-12-25 2:01 ` ✗ Xe.CI.BAT: failure " Patchwork
2025-12-25 3:02 ` ✓ Xe.CI.Full: success " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251225011734.341683-11-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=carlos.santa@intel.com \
--cc=daniele.ceraolospurio@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox