Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: daniele.ceraolospurio@intel.com, carlos.santa@intel.com
Subject: [PATCH v2 10/22] drm/xe: Make scheduler message lock IRQ-safe
Date: Sun,  4 Jan 2026 20:02:25 -0800	[thread overview]
Message-ID: <20260105040237.1307873-11-matthew.brost@intel.com> (raw)
In-Reply-To: <20260105040237.1307873-1-matthew.brost@intel.com>

It is legal to modify deadlines in IRQ contexts (e.g., in a hrtimer),
and deadlines can add messages. Therefore, the scheduler message lock
needs to be IRQ-safe. Change xe_sched_msg_lock to use scoped_guard,
which is IRQ-safe.

v2:
 - Fix macro warnings (CI)
 - Rename macro as 'scoped_guard'

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_gpu_scheduler.c | 28 +++++++++++++--------------
 drivers/gpu/drm/xe/xe_gpu_scheduler.h | 17 ++++++++--------
 drivers/gpu/drm/xe/xe_guc_submit.c    | 23 ++++++++++------------
 3 files changed, 32 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index f4f23317191f..8ea5480a517d 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -15,11 +15,12 @@ static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
 {
 	struct xe_sched_msg *msg;
 
-	xe_sched_msg_lock(sched);
-	msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
-	if (msg)
-		xe_sched_process_msg_queue(sched);
-	xe_sched_msg_unlock(sched);
+	xe_sched_msg_scoped_guard(sched) {
+		msg = list_first_entry_or_null(&sched->msgs,
+					       struct xe_sched_msg, link);
+		if (msg)
+			xe_sched_process_msg_queue(sched);
+	}
 }
 
 static struct xe_sched_msg *
@@ -27,12 +28,12 @@ xe_sched_get_msg(struct xe_gpu_scheduler *sched)
 {
 	struct xe_sched_msg *msg;
 
-	xe_sched_msg_lock(sched);
-	msg = list_first_entry_or_null(&sched->msgs,
-				       struct xe_sched_msg, link);
-	if (msg)
-		list_del_init(&msg->link);
-	xe_sched_msg_unlock(sched);
+	xe_sched_msg_scoped_guard(sched) {
+		msg = list_first_entry_or_null(&sched->msgs,
+					       struct xe_sched_msg, link);
+		if (msg)
+			list_del_init(&msg->link);
+	}
 
 	return msg;
 }
@@ -110,9 +111,8 @@ void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
 		      struct xe_sched_msg *msg)
 {
-	xe_sched_msg_lock(sched);
-	xe_sched_add_msg_locked(sched, msg);
-	xe_sched_msg_unlock(sched);
+	xe_sched_msg_scoped_guard(sched)
+		xe_sched_add_msg_locked(sched, msg);
 }
 
 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index dceb2cd0ee5b..269508c62b8c 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -31,15 +31,14 @@ void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
 void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
 			   struct xe_sched_msg *msg);
 
-static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
-{
-	spin_lock(&sched->msg_lock);
-}
-
-static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
-{
-	spin_unlock(&sched->msg_lock);
-}
+/**
+ * xe_sched_msg_scoped_guard() - Scoped guard for scheduler message lock
+ * @__sched: xe_gpu_scheduler object
+ *
+ * IRQ-safe scoped guard for scheduler message lock
+ */
+#define xe_sched_msg_scoped_guard(__sched)	\
+	scoped_guard(spinlock_irqsave, &(__sched)->msg_lock)
 
 static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
 {
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 7a4218f76024..07ffab338e4a 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -2329,10 +2329,10 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
 	if (exec_queue_killed_or_banned_or_wedged(q))
 		return -EINVAL;
 
-	xe_sched_msg_lock(sched);
-	if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
-		q->guc->suspend_pending = true;
-	xe_sched_msg_unlock(sched);
+	xe_sched_msg_scoped_guard(sched) {
+		if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
+			q->guc->suspend_pending = true;
+	}
 
 	return 0;
 }
@@ -2388,9 +2388,8 @@ static void guc_exec_queue_resume(struct xe_exec_queue *q)
 
 	xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
 
-	xe_sched_msg_lock(sched);
-	guc_exec_queue_try_add_msg(q, msg, RESUME);
-	xe_sched_msg_unlock(sched);
+	xe_sched_msg_scoped_guard(sched)
+		guc_exec_queue_try_add_msg(q, msg, RESUME);
 }
 
 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
@@ -2810,9 +2809,8 @@ static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
 	if (q->guc->needs_suspend) {
 		msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
 
-		xe_sched_msg_lock(sched);
-		guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
-		xe_sched_msg_unlock(sched);
+		xe_sched_msg_scoped_guard(sched)
+			guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
 
 		q->guc->needs_suspend = false;
 	}
@@ -2825,9 +2823,8 @@ static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
 	if (q->guc->needs_resume) {
 		msg = q->guc->static_msgs + STATIC_MSG_RESUME;
 
-		xe_sched_msg_lock(sched);
-		guc_exec_queue_try_add_msg_head(q, msg, RESUME);
-		xe_sched_msg_unlock(sched);
+		xe_sched_msg_scoped_guard(sched)
+			guc_exec_queue_try_add_msg_head(q, msg, RESUME);
 
 		q->guc->needs_resume = false;
 	}
-- 
2.34.1


  parent reply	other threads:[~2026-01-05  4:02 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-05  4:02 [PATCH v2 00/22] Fence deadlines in Xe Matthew Brost
2026-01-05  4:02 ` [PATCH v2 01/22] drm/xe: Add dedicated message lock Matthew Brost
2026-01-05  4:02 ` [PATCH v2 02/22] drm/xe: Add EXEC_QUEUE_FLAG_CAP_SYS_NICE Matthew Brost
2026-02-05 16:00   ` Rodrigo Vivi
2026-01-05  4:02 ` [PATCH v2 03/22] drm/xe: Store exec queue in hardware fence Matthew Brost
2026-02-05 16:02   ` Rodrigo Vivi
2026-01-05  4:02 ` [PATCH v2 04/22] drm/xe: Add deadline exec queue vfuncs Matthew Brost
2026-02-05 16:03   ` Rodrigo Vivi
2026-01-05  4:02 ` [PATCH v2 05/22] drm/xe: Export to_xe_hw_fence Matthew Brost
2026-01-05  4:02 ` [PATCH v2 06/22] drm/xe: Export xe_hw_fence_signaled Matthew Brost
2026-01-05  4:02 ` [PATCH v2 07/22] drm/xe: Implement deadline manager Matthew Brost
2026-01-05  4:02 ` [PATCH v2 08/22] drm/xe: Initialize deadline manager on exec queues Matthew Brost
2026-01-05  4:02 ` [PATCH v2 09/22] drm/xe: Stub out execlists deadline vfuncs as NOPs Matthew Brost
2026-01-05  4:02 ` Matthew Brost [this message]
2026-01-05  4:02 ` [PATCH v2 11/22] drm/xe: Support unstable opcodes for static scheduler messages Matthew Brost
2026-01-05  4:02 ` [PATCH v2 12/22] drm/xe: Implement GuC submission backend ops for deadlines Matthew Brost
2026-01-10 10:48   ` kernel test robot
2026-01-05  4:02 ` [PATCH v2 13/22] drm/xe: Enable deadlines on hardware fences Matthew Brost
2026-01-05  4:02 ` [PATCH v2 14/22] drm/xe: Fix Kconfig.profile newlines Matthew Brost
2026-02-05 16:06   ` Rodrigo Vivi
2026-01-05  4:02 ` [PATCH v2 15/22] drm/xe: Add deadline Kconfig options Matthew Brost
2026-01-05  4:02 ` [PATCH v2 16/22] drm/xe: Add exec queue deadline trace points Matthew Brost
2026-01-05  4:02 ` [PATCH v2 17/22] drm/xe: Add hw fence " Matthew Brost
2026-01-05  4:02 ` [PATCH v2 18/22] drm/xe: Add timestamp_ms to LRC snapshot Matthew Brost
2026-01-05  4:02 ` [PATCH v2 19/22] drm/xe: Enforce GuC static message defines Matthew Brost
2026-01-05  4:02 ` [PATCH v2 20/22] drm/xe: Document the deadline manager Matthew Brost
2026-01-05  4:02 ` [PATCH v2 21/22] drm/atomic: Export fence deadline helper for atomic commits Matthew Brost
2026-01-05  4:02 ` [PATCH v2 22/22] drm/i915/display: Use atomic helper to set plane fence deadlines Matthew Brost
2026-01-05  4:09 ` ✗ CI.checkpatch: warning for Fence deadlines in Xe (rev2) Patchwork
2026-01-05  4:10 ` ✓ CI.KUnit: success " Patchwork
2026-01-05  4:26 ` ✗ CI.checksparse: warning " Patchwork
2026-01-05  5:07 ` ✓ Xe.CI.BAT: success " Patchwork
2026-01-05  6:51 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260105040237.1307873-11-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=carlos.santa@intel.com \
    --cc=daniele.ceraolospurio@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox