Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Gustavo Sousa <gustavo.sousa@intel.com>
To: Matt Roper <matthew.d.roper@intel.com>, <intel-xe@lists.freedesktop.org>
Cc: <matthew.d.roper@intel.com>
Subject: Re: [PATCH v2 15/30] drm/xe/devcoredump: Use scope-based cleanup
Date: Thu, 13 Nov 2025 11:14:04 -0300	[thread overview]
Message-ID: <176304324487.3698.16197271036502887154@intel.com> (raw)
In-Reply-To: <20251110232017.1475869-47-matthew.d.roper@intel.com>

Quoting Matt Roper (2025-11-10 20:20:33-03:00)
>Use scope-based cleanup for forcewake and runtime PM in the devcoredump
>code.  This eliminates some goto-based error handling and slightly
>simplifies other functions.
>
>Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
>---
> drivers/gpu/drm/xe/xe_devcoredump.c | 26 ++++++++++----------------
> 1 file changed, 10 insertions(+), 16 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
>index 203e3038cc81..599c886c865b 100644
>--- a/drivers/gpu/drm/xe/xe_devcoredump.c
>+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
>@@ -276,7 +276,6 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
>         struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
>         struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
>         struct xe_device *xe = coredump_to_xe(coredump);
>-        unsigned int fw_ref;
> 
>         /*
>          * NB: Despite passing a GFP_ flags parameter here, more allocations are done
>@@ -287,15 +286,15 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
>                               xe_devcoredump_read, xe_devcoredump_free,
>                               XE_COREDUMP_TIMEOUT_JIFFIES);
> 
>-        xe_pm_runtime_get(xe);
>+        guard(xe_pm_runtime)(xe);
> 
>         /* keep going if fw fails as we still want to save the memory and SW data */
>-        fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
>-        if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
>-                xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
>-        xe_vm_snapshot_capture_delayed(ss->vm);
>-        xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
>-        xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
>+        xe_with_force_wake(fw_ref, gt_to_fw(ss->gt), XE_FORCEWAKE_ALL) {
>+                if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
>+                        xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
>+                xe_vm_snapshot_capture_delayed(ss->vm);
>+                xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
>+        }
> 
>         ss->read.chunk_position = 0;
> 
>@@ -306,7 +305,7 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
>                 ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX,
>                                            GFP_USER);
>                 if (!ss->read.buffer)
>-                        goto put_pm;
>+                        return;
> 
>                 __xe_devcoredump_read(ss->read.buffer,
>                                       XE_DEVCOREDUMP_CHUNK_MAX,
>@@ -314,15 +313,12 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
>         } else {
>                 ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
>                 if (!ss->read.buffer)
>-                        goto put_pm;
>+                        return;
> 
>                 __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0,
>                                       coredump);
>                 xe_devcoredump_snapshot_free(ss);
>         }
>-
>-put_pm:
>-        xe_pm_runtime_put(xe);
> }
> 
> static void devcoredump_snapshot(struct xe_devcoredump *coredump,
>@@ -332,7 +328,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
>         struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
>         struct xe_guc *guc = exec_queue_to_guc(q);
>         const char *process_name = "no process";
>-        unsigned int fw_ref;
>         bool cookie;
> 
>         ss->snapshot_time = ktime_get_real();
>@@ -351,7 +346,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
>         cookie = dma_fence_begin_signalling();
> 
>         /* keep going if fw fails as we still want to save the memory and SW data */
>-        fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
>+        CLASS(xe_force_wake, fw_ref)(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);

Should we move this to happen before dma_fence_begin_signalling(), just
so we keep the LIFO cleanup order at the end of the function?

--
Gustavo Sousa

> 
>         ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
>         ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
>@@ -364,7 +359,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
> 
>         queue_work(system_unbound_wq, &ss->work);
> 
>-        xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
>         dma_fence_end_signalling(cookie);
> }
> 
>-- 
>2.51.1
>

  reply	other threads:[~2025-11-13 14:14 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-10 23:20 [PATCH v2 00/30] Scope-based forcewake and runtime PM Matt Roper
2025-11-10 23:20 ` [PATCH v2 01/30] drm/xe/forcewake: Improve kerneldoc Matt Roper
2025-11-12 14:04   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 02/30] drm/xe/eustall: Store forcewake reference in stream structure Matt Roper
2025-11-12 15:36   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 03/30] drm/xe/oa: " Matt Roper
2025-11-12 16:11   ` Gustavo Sousa
2025-11-13 17:10   ` Dixit, Ashutosh
2025-11-10 23:20 ` [PATCH v2 04/30] drm/xe/forcewake: Add scope-based cleanup for forcewake Matt Roper
2025-11-12 20:00   ` Gustavo Sousa
2025-11-12 21:01     ` Matt Roper
2025-11-12 21:16     ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 05/30] drm/xe/pm: Add scope-based cleanup helper for runtime PM Matt Roper
2025-11-12 19:53   ` Michal Wajdeczko
2025-11-12 21:48     ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 06/30] drm/xe/gt: Use scope-based cleanup Matt Roper
2025-11-13 12:26   ` Gustavo Sousa
2025-11-13 22:58     ` Matt Roper
2025-11-10 23:20 ` [PATCH v2 07/30] drm/xe/gt_idle: " Matt Roper
2025-11-13 12:39   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 08/30] drm/xe/guc: " Matt Roper
2025-11-13 12:46   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 09/30] drm/xe/guc_pc: " Matt Roper
2025-11-13 13:00   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 10/30] drm/xe/mocs: " Matt Roper
2025-11-13 13:30   ` Gustavo Sousa
2025-11-13 23:28     ` Matt Roper
2025-11-10 23:20 ` [PATCH v2 11/30] drm/xe/pat: Use scope-based forcewake Matt Roper
2025-11-13 13:37   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 12/30] drm/xe/pxp: Use scope-based cleanup Matt Roper
2025-11-13 13:40   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 13/30] drm/xe/gsc: " Matt Roper
2025-11-13 13:46   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 14/30] drm/xe/device: " Matt Roper
2025-11-13 14:04   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 15/30] drm/xe/devcoredump: " Matt Roper
2025-11-13 14:14   ` Gustavo Sousa [this message]
2025-11-10 23:20 ` [PATCH v2 16/30] drm/xe/display: Use scoped-cleanup Matt Roper
2025-11-13 14:25   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 17/30] drm/xe: Create scoped cleanup class for force_wake_get_any_engine() Matt Roper
2025-11-13 17:39   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 18/30] drm/xe/drm_client: Use scope-based cleanup Matt Roper
2025-11-10 23:20 ` [PATCH v2 19/30] drm/xe/gt_debugfs: " Matt Roper
2025-11-13 17:45   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 20/30] drm/xe/huc: Use scope-based forcewake Matt Roper
2025-11-13 17:46   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 21/30] drm/xe/query: " Matt Roper
2025-11-13 17:50   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 22/30] drm/xe/reg_sr: " Matt Roper
2025-11-13 17:51   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 23/30] drm/xe/vram: " Matt Roper
2025-11-10 23:57   ` [PATCH v2.1 " Matt Roper
2025-11-13 17:52     ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 24/30] drm/xe/bo: Use scope-based runtime PM Matt Roper
2025-11-13 17:54   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 25/30] drm/xe/ggtt: Use scope-based runtime pm Matt Roper
2025-11-13 17:55   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 26/30] drm/xe/hwmon: Use scope-based runtime PM Matt Roper
2025-11-13 18:01   ` Gustavo Sousa
2025-11-13 18:05     ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 27/30] drm/xe/sriov: " Matt Roper
2025-11-13 18:09   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 28/30] drm/xe/tests: " Matt Roper
2025-11-13 18:15   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 29/30] drm/xe/sysfs: Use scope-based runtime power management Matt Roper
2025-11-13 18:25   ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 30/30] drm/xe/debugfs: Use scope-based runtime PM Matt Roper
2025-11-13 18:30   ` Gustavo Sousa
2025-11-11  0:20 ` ✓ CI.KUnit: success for Scope-based forcewake and runtime PM (rev3) Patchwork
2025-11-11  0:57 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-11 10:50 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-11 10:57 ` [PATCH v2 00/30] Scope-based forcewake and runtime PM Jani Nikula
2025-11-12 16:01   ` Matt Roper
2025-11-13 22:11 ` Matt Roper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=176304324487.3698.16197271036502887154@intel.com \
    --to=gustavo.sousa@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.d.roper@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox