From: Matt Roper <matthew.d.roper@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: matthew.d.roper@intel.com
Subject: [CI 03/27] drm/xe/gt: Use scope-based cleanup
Date: Tue, 18 Nov 2025 08:43:42 -0800 [thread overview]
Message-ID: <20251118164338.3572146-32-matthew.d.roper@intel.com> (raw)
In-Reply-To: <20251118164338.3572146-29-matthew.d.roper@intel.com>
Using scope-based cleanup for forcewake and runtime PM allows us to
reduce or eliminate some of the goto-based error handling and simplify
several functions.
v2:
- Drop changes to do_gt_restart(). This function still has goto-based
logic, making scope-based cleanup unsafe for now. (Gustavo)
Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
---
drivers/gpu/drm/xe/xe_gt.c | 130 ++++++++++++-------------------------
1 file changed, 41 insertions(+), 89 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index dbb5e7a9bc6a..86e7b35ef1b4 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -103,14 +103,13 @@ void xe_gt_sanitize(struct xe_gt *gt)
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
{
- unsigned int fw_ref;
u32 reg;
if (!XE_GT_WA(gt, 16023588340))
return;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref.domains)
return;
if (xe_gt_is_main_type(gt)) {
@@ -120,12 +119,10 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
}
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
{
- unsigned int fw_ref;
u32 reg;
if (!XE_GT_WA(gt, 16023588340))
@@ -134,15 +131,13 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
if (xe_gt_is_media_type(gt))
return;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref.domains)
return;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg &= ~CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void gt_reset_worker(struct work_struct *w);
@@ -389,7 +384,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
int xe_gt_init_early(struct xe_gt *gt)
{
- unsigned int fw_ref;
int err;
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -436,13 +430,12 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref.domains)
return -ETIMEDOUT;
xe_gt_mcr_init_early(gt);
xe_pat_init(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
}
@@ -460,16 +453,15 @@ static void dump_pat_on_error(struct xe_gt *gt)
static int gt_init_with_gt_forcewake(struct xe_gt *gt)
{
- unsigned int fw_ref;
int err;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref.domains)
return -ETIMEDOUT;
err = xe_uc_init(>->uc);
if (err)
- goto err_force_wake;
+ return err;
xe_gt_topology_init(gt);
xe_gt_mcr_init(gt);
@@ -478,7 +470,7 @@ static int gt_init_with_gt_forcewake(struct xe_gt *gt)
if (xe_gt_is_main_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
if (err)
- goto err_force_wake;
+ return err;
if (IS_SRIOV_PF(gt_to_xe(gt)))
xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
}
@@ -492,17 +484,17 @@ static int gt_init_with_gt_forcewake(struct xe_gt *gt)
err = xe_hw_engines_init_early(gt);
if (err) {
dump_pat_on_error(gt);
- goto err_force_wake;
+ return err;
}
err = xe_hw_engine_class_sysfs_init(gt);
if (err)
- goto err_force_wake;
+ return err;
/* Initialize CCS mode sysfs after early initialization of HW engines */
err = xe_gt_ccs_mode_sysfs_init(gt);
if (err)
- goto err_force_wake;
+ return err;
/*
* Stash hardware-reported version. Since this register does not exist
@@ -510,25 +502,16 @@ static int gt_init_with_gt_forcewake(struct xe_gt *gt)
*/
gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
-
-err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return err;
}
static int gt_init_with_all_forcewake(struct xe_gt *gt)
{
- unsigned int fw_ref;
int err;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- err = -ETIMEDOUT;
- goto err_force_wake;
- }
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
+ return -ETIMEDOUT;
xe_gt_mcr_set_implicit_defaults(gt);
xe_wa_process_gt(gt);
@@ -537,20 +520,20 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
err = xe_gt_clock_init(gt);
if (err)
- goto err_force_wake;
+ return err;
xe_mocs_init(gt);
err = xe_execlist_init(gt);
if (err)
- goto err_force_wake;
+ return err;
err = xe_hw_engines_init(gt);
if (err)
- goto err_force_wake;
+ return err;
err = xe_uc_init_post_hwconfig(>->uc);
if (err)
- goto err_force_wake;
+ return err;
if (xe_gt_is_main_type(gt)) {
/*
@@ -561,10 +544,8 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
- if (IS_ERR(gt->usm.bb_pool)) {
- err = PTR_ERR(gt->usm.bb_pool);
- goto err_force_wake;
- }
+ if (IS_ERR(gt->usm.bb_pool))
+ return PTR_ERR(gt->usm.bb_pool);
}
}
@@ -573,12 +554,12 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
err = xe_migrate_init(tile->migrate);
if (err)
- goto err_force_wake;
+ return err;
}
err = xe_uc_load_hw(>->uc);
if (err)
- goto err_force_wake;
+ return err;
/* Configure default CCS mode of 1 engine with all resources */
if (xe_gt_ccs_mode_enabled(gt)) {
@@ -592,14 +573,7 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)))
xe_gt_sriov_pf_init_hw(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
return 0;
-
-err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return err;
}
static void xe_gt_fini(void *arg)
@@ -901,56 +875,42 @@ void xe_gt_reset_async(struct xe_gt *gt)
void xe_gt_suspend_prepare(struct xe_gt *gt)
{
- unsigned int fw_ref;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
-
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_suspend_prepare(>->uc);
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
int xe_gt_suspend(struct xe_gt *gt)
{
- unsigned int fw_ref;
int err;
xe_gt_dbg(gt, "suspending\n");
xe_gt_sanitize(gt);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
- goto err_msg;
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
+ xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
+ return -ETIMEDOUT;
+ }
err = xe_uc_suspend(>->uc);
- if (err)
- goto err_force_wake;
+ if (err) {
+ xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
+ return err;
+ }
xe_gt_idle_disable_pg(gt);
xe_gt_disable_host_l2_vram(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_dbg(gt, "suspended\n");
return 0;
-
-err_msg:
- err = -ETIMEDOUT;
-err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
-
- return err;
}
void xe_gt_shutdown(struct xe_gt *gt)
{
- unsigned int fw_ref;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
do_gt_reset(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
/**
@@ -975,32 +935,24 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int xe_gt_resume(struct xe_gt *gt)
{
- unsigned int fw_ref;
int err;
xe_gt_dbg(gt, "resuming\n");
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
- goto err_msg;
+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
+ xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
+ return -ETIMEDOUT;
+ }
err = do_gt_restart(gt);
if (err)
- goto err_force_wake;
+ return err;
xe_gt_idle_enable_pg(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_dbg(gt, "resumed\n");
return 0;
-
-err_msg:
- err = -ETIMEDOUT;
-err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
-
- return err;
}
struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
--
2.51.1
next prev parent reply other threads:[~2025-11-18 16:44 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-18 16:43 [CI 00/27] Scope-based forcewake and runtime PM Matt Roper
2025-11-18 16:43 ` [CI 01/27] drm/xe/forcewake: Add scope-based cleanup for forcewake Matt Roper
2025-11-18 16:43 ` [CI 02/27] drm/xe/pm: Add scope-based cleanup helper for runtime PM Matt Roper
2025-11-18 16:43 ` Matt Roper [this message]
2025-11-18 16:43 ` [CI 04/27] drm/xe/gt_idle: Use scope-based cleanup Matt Roper
2025-11-18 16:43 ` [CI 05/27] drm/xe/guc: " Matt Roper
2025-11-18 16:43 ` [CI 06/27] drm/xe/guc_pc: " Matt Roper
2025-11-18 16:43 ` [CI 07/27] drm/xe/mocs: " Matt Roper
2025-11-18 16:43 ` [CI 08/27] drm/xe/pat: Use scope-based forcewake Matt Roper
2025-11-18 16:43 ` [CI 09/27] drm/xe/pxp: Use scope-based cleanup Matt Roper
2025-11-18 16:43 ` [CI 10/27] drm/xe/gsc: " Matt Roper
2025-11-18 16:43 ` [CI 11/27] drm/xe/device: " Matt Roper
2025-11-18 16:43 ` [CI 12/27] drm/xe/devcoredump: " Matt Roper
2025-11-18 16:43 ` [CI 13/27] drm/xe/display: Use scoped-cleanup Matt Roper
2025-11-18 16:43 ` [CI 14/27] drm/xe: Return forcewake reference type from force_wake_get_any_engine() Matt Roper
2025-11-18 16:43 ` [CI 15/27] drm/xe/drm_client: Use scope-based cleanup Matt Roper
2025-11-18 16:43 ` [CI 16/27] drm/xe/gt_debugfs: " Matt Roper
2025-11-18 16:43 ` [CI 17/27] drm/xe/huc: Use scope-based forcewake Matt Roper
2025-11-18 16:43 ` [CI 18/27] drm/xe/query: " Matt Roper
2025-11-18 16:43 ` [CI 19/27] drm/xe/reg_sr: " Matt Roper
2025-11-18 16:43 ` [CI 20/27] drm/xe/vram: " Matt Roper
2025-11-18 16:44 ` [CI 21/27] drm/xe/bo: Use scope-based runtime PM Matt Roper
2025-11-18 16:44 ` [CI 22/27] drm/xe/ggtt: Use scope-based runtime pm Matt Roper
2025-11-18 16:44 ` [CI 23/27] drm/xe/hwmon: Use scope-based runtime PM Matt Roper
2025-11-18 16:44 ` [CI 24/27] drm/xe/sriov: " Matt Roper
2025-11-18 16:44 ` [CI 25/27] drm/xe/tests: " Matt Roper
2025-11-18 16:44 ` [CI 26/27] drm/xe/sysfs: Use scope-based runtime power management Matt Roper
2025-11-18 16:44 ` [CI 27/27] drm/xe/debugfs: Use scope-based runtime PM Matt Roper
2025-11-18 18:08 ` ✗ CI.checkpatch: warning for Scope-based forcewake and runtime PM (rev5) Patchwork
2025-11-18 18:10 ` ✓ CI.KUnit: success " Patchwork
2025-11-18 18:47 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-19 16:19 ` ✗ CI.checkpatch: warning for Scope-based forcewake and runtime PM (rev6) Patchwork
2025-11-19 16:20 ` ✓ CI.KUnit: success " Patchwork
2025-11-19 17:31 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-19 19:57 ` ✓ Xe.CI.Full: " Patchwork
2025-11-19 20:06 ` Matt Roper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251118164338.3572146-32-matthew.d.roper@intel.com \
--to=matthew.d.roper@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox