Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matt Roper <matthew.d.roper@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: matthew.d.roper@intel.com, Gustavo Sousa <gustavo.sousa@intel.com>
Subject: [PATCH v3 05/27] drm/xe/guc: Use scope-based cleanup
Date: Fri, 14 Nov 2025 13:43:41 -0800	[thread overview]
Message-ID: <20251114214335.2388972-34-matthew.d.roper@intel.com> (raw)
In-Reply-To: <20251114214335.2388972-29-matthew.d.roper@intel.com>

Use scope-based cleanup for forcewake and runtime PM.

Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/xe/xe_guc.c           | 13 ++++---------
 drivers/gpu/drm/xe/xe_guc_log.c       | 10 ++++------
 drivers/gpu/drm/xe/xe_guc_submit.c    | 11 +++--------
 drivers/gpu/drm/xe/xe_guc_tlb_inval.c |  4 +---
 4 files changed, 12 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index a686b04879d6..cf92de1c88a7 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -660,11 +660,9 @@ static void guc_fini_hw(void *arg)
 {
 	struct xe_guc *guc = arg;
 	struct xe_gt *gt = guc_to_gt(guc);
-	unsigned int fw_ref;
 
-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
-	xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+	xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL)
+		xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
 
 	guc_g2g_fini(guc);
 }
@@ -1621,15 +1619,14 @@ int xe_guc_start(struct xe_guc *guc)
 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
 {
 	struct xe_gt *gt = guc_to_gt(guc);
-	unsigned int fw_ref;
 	u32 status;
 	int i;
 
 	xe_uc_fw_print(&guc->fw, p);
 
 	if (!IS_SRIOV_VF(gt_to_xe(gt))) {
-		fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-		if (!fw_ref)
+		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+		if (!fw_ref.domains)
 			return;
 
 		status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
@@ -1649,8 +1646,6 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
 			drm_printf(p, "\t%2d: \t0x%x\n",
 				   i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
 		}
-
-		xe_force_wake_put(gt_to_fw(gt), fw_ref);
 	}
 
 	drm_puts(p, "\n");
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index c01ccb35dc75..0c704a11078a 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -145,7 +145,6 @@ struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log,
 	struct xe_device *xe = log_to_xe(log);
 	struct xe_guc *guc = log_to_guc(log);
 	struct xe_gt *gt = log_to_gt(log);
-	unsigned int fw_ref;
 	size_t remain;
 	int i;
 
@@ -165,13 +164,12 @@ struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log,
 		remain -= size;
 	}
 
-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-	if (!fw_ref) {
+	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+	if (!fw_ref.domains)
 		snapshot->stamp = ~0ULL;
-	} else {
+	else
 		snapshot->stamp = xe_mmio_read64_2x32(&gt->mmio, GUC_PMTIMESTAMP_LO);
-		xe_force_wake_put(gt_to_fw(gt), fw_ref);
-	}
+
 	snapshot->ktime = ktime_get_boottime_ns();
 	snapshot->level = log->level;
 	snapshot->ver_found = guc->fw.versions.found[XE_UC_FW_VER_RELEASE];
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index d4ffdb71ef3d..7e0882074a99 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1225,7 +1225,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
 	struct xe_guc *guc = exec_queue_to_guc(q);
 	const char *process_name = "no process";
 	struct xe_device *xe = guc_to_xe(guc);
-	unsigned int fw_ref;
 	int err = -ETIME;
 	pid_t pid = -1;
 	int i = 0;
@@ -1258,13 +1257,11 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
 	if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
 	    !xe_guc_capture_get_matching_and_lock(q)) {
 		/* take force wake before engine register manual capture */
-		fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
-		if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+		CLASS(xe_force_wake, fw_ref)(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+		if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
 			xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
 
 		xe_engine_snapshot_capture_for_queue(q);
-
-		xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
 	}
 
 	/*
@@ -1455,7 +1452,7 @@ static void __guc_exec_queue_destroy_async(struct work_struct *w)
 	struct xe_exec_queue *q = ge->q;
 	struct xe_guc *guc = exec_queue_to_guc(q);
 
-	xe_pm_runtime_get(guc_to_xe(guc));
+	guard(xe_pm_runtime)(guc_to_xe(guc));
 	trace_xe_exec_queue_destroy(q);
 
 	if (xe_exec_queue_is_lr(q))
@@ -1464,8 +1461,6 @@ static void __guc_exec_queue_destroy_async(struct work_struct *w)
 	cancel_delayed_work_sync(&ge->sched.base.work_tdr);
 
 	xe_exec_queue_fini(q);
-
-	xe_pm_runtime_put(guc_to_xe(guc));
 }
 
 static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
index a80175c7c478..848d3493df10 100644
--- a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
@@ -71,12 +71,11 @@ static int send_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval, u32 seqno)
 		return send_tlb_inval(guc, action, ARRAY_SIZE(action));
 	} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
 		struct xe_mmio *mmio = &gt->mmio;
-		unsigned int fw_ref;
 
 		if (IS_SRIOV_VF(xe))
 			return -ECANCELED;
 
-		fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
 		if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
 			xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
 					PVC_GUC_TLB_INV_DESC1_INVALIDATE);
@@ -86,7 +85,6 @@ static int send_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval, u32 seqno)
 			xe_mmio_write32(mmio, GUC_TLB_INV_CR,
 					GUC_TLB_INV_CR_INVALIDATE);
 		}
-		xe_force_wake_put(gt_to_fw(gt), fw_ref);
 	}
 
 	return -ECANCELED;
-- 
2.51.1


  parent reply	other threads:[~2025-11-14 21:43 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-14 21:43 [PATCH v3 00/27] Scope-based forcewake and runtime PM Matt Roper
2025-11-14 21:43 ` [PATCH v3 01/27] drm/xe/forcewake: Add scope-based cleanup for forcewake Matt Roper
2025-11-17 22:03   ` Gustavo Sousa
2025-11-17 22:17     ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 02/27] drm/xe/pm: Add scope-based cleanup helper for runtime PM Matt Roper
2025-11-17 22:04   ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 03/27] drm/xe/gt: Use scope-based cleanup Matt Roper
2025-11-14 21:43 ` [PATCH v3 04/27] drm/xe/gt_idle: " Matt Roper
2025-11-14 21:43 ` Matt Roper [this message]
2025-11-14 21:43 ` [PATCH v3 06/27] drm/xe/guc_pc: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 07/27] drm/xe/mocs: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 08/27] drm/xe/pat: Use scope-based forcewake Matt Roper
2025-11-14 21:43 ` [PATCH v3 09/27] drm/xe/pxp: Use scope-based cleanup Matt Roper
2025-11-14 21:43 ` [PATCH v3 10/27] drm/xe/gsc: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 11/27] drm/xe/device: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 12/27] drm/xe/devcoredump: " Matt Roper
2025-11-17 22:09   ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 13/27] drm/xe/display: Use scoped-cleanup Matt Roper
2025-11-17 22:11   ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 14/27] drm/xe: Return forcewake reference type from force_wake_get_any_engine() Matt Roper
2025-11-17 22:19   ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 15/27] drm/xe/drm_client: Use scope-based cleanup Matt Roper
2025-11-17 22:28   ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 16/27] drm/xe/gt_debugfs: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 17/27] drm/xe/huc: Use scope-based forcewake Matt Roper
2025-11-14 21:43 ` [PATCH v3 18/27] drm/xe/query: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 19/27] drm/xe/reg_sr: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 20/27] drm/xe/vram: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 21/27] drm/xe/bo: Use scope-based runtime PM Matt Roper
2025-11-14 21:43 ` [PATCH v3 22/27] drm/xe/ggtt: Use scope-based runtime pm Matt Roper
2025-11-14 21:43 ` [PATCH v3 23/27] drm/xe/hwmon: Use scope-based runtime PM Matt Roper
2025-11-14 21:44 ` [PATCH v3 24/27] drm/xe/sriov: " Matt Roper
2025-11-14 21:44 ` [PATCH v3 25/27] drm/xe/tests: " Matt Roper
2025-11-14 21:44 ` [PATCH v3 26/27] drm/xe/sysfs: Use scope-based runtime power management Matt Roper
2025-11-14 21:44 ` [PATCH v3 27/27] drm/xe/debugfs: Use scope-based runtime PM Matt Roper
2025-11-14 23:22 ` ✗ CI.checkpatch: warning for Scope-based forcewake and runtime PM (rev4) Patchwork
2025-11-14 23:23 ` ✓ CI.KUnit: success " Patchwork
2025-11-15  0:14 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-15 11:18 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251114214335.2388972-34-matthew.d.roper@intel.com \
    --to=matthew.d.roper@intel.com \
    --cc=gustavo.sousa@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox