From: Gustavo Sousa <gustavo.sousa@intel.com>
To: Matt Roper <matthew.d.roper@intel.com>, <intel-xe@lists.freedesktop.org>
Cc: <matthew.d.roper@intel.com>
Subject: Re: [PATCH v2 16/30] drm/xe/display: Use scoped-cleanup
Date: Thu, 13 Nov 2025 11:25:48 -0300 [thread overview]
Message-ID: <176304394887.3698.6245959522925522278@intel.com> (raw)
In-Reply-To: <20251110232017.1475869-48-matthew.d.roper@intel.com>
Quoting Matt Roper (2025-11-10 20:20:34-03:00)
>Eliminate some goto-based cleanup by utilizing scoped cleanup helpers.
>
>Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
>---
> drivers/gpu/drm/xe/display/xe_fb_pin.c | 23 +++++++++-------------
> drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 25 ++++++++----------------
> 2 files changed, 17 insertions(+), 31 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
>index 1fd4a815e784..6a935a75f2a4 100644
>--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
>+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
>@@ -210,10 +210,11 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> /* TODO: Consider sharing framebuffer mapping?
> * embed i915_vma inside intel_framebuffer
> */
>- xe_pm_runtime_get_noresume(xe);
>- ret = mutex_lock_interruptible(&ggtt->lock);
>+ guard(xe_pm_runtime_noresume)(xe);
>+ ACQUIRE(mutex_intr, lock)(&ggtt->lock);
>+ ret = ACQUIRE_ERR(mutex_intr, &lock);
> if (ret)
>- goto out;
>+ return ret;
>
> align = XE_PAGE_SIZE;
> if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
>@@ -223,15 +224,13 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> vma->node = bo->ggtt_node[tile0->id];
> } else if (view->type == I915_GTT_VIEW_NORMAL) {
> vma->node = xe_ggtt_node_init(ggtt);
>- if (IS_ERR(vma->node)) {
>- ret = PTR_ERR(vma->node);
>- goto out_unlock;
>- }
>+ if (IS_ERR(vma->node))
>+ return PTR_ERR(vma->node);
>
> ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0);
> if (ret) {
> xe_ggtt_node_fini(vma->node);
>- goto out_unlock;
>+ return ret;
> }
>
> xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
>@@ -245,13 +244,13 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> vma->node = xe_ggtt_node_init(ggtt);
> if (IS_ERR(vma->node)) {
> ret = PTR_ERR(vma->node);
>- goto out_unlock;
>+ return ret;
> }
>
> ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
> if (ret) {
> xe_ggtt_node_fini(vma->node);
>- goto out_unlock;
>+ return ret;
> }
>
> ggtt_ofs = vma->node->base.start;
>@@ -265,10 +264,6 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> rot_info->plane[i].dst_stride);
> }
>
>-out_unlock:
>- mutex_unlock(&ggtt->lock);
>-out:
>- xe_pm_runtime_put(xe);
> return ret;
> }
>
>diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
>index 4ae847b628e2..084baddb160e 100644
>--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
>+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
>@@ -37,7 +37,6 @@ bool intel_hdcp_gsc_check_status(struct drm_device *drm)
> struct xe_gt *gt = tile->media_gt;
> struct xe_gsc *gsc = >->uc.gsc;
> bool ret = true;
>- unsigned int fw_ref;
>
> if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) {
> drm_dbg_kms(&xe->drm,
>@@ -45,21 +44,17 @@ bool intel_hdcp_gsc_check_status(struct drm_device *drm)
> return false;
> }
>
>- xe_pm_runtime_get(xe);
>- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
>- if (!fw_ref) {
>+ guard(xe_pm_runtime)(xe);
>+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GSC);
>+ if (!fw_ref.domains) {
> drm_dbg_kms(&xe->drm,
> "failed to get forcewake to check proxy status\n");
>- ret = false;
>- goto out;
>+ return false;
> }
>
> if (!xe_gsc_proxy_init_done(gsc))
> ret = false;
We don't need ret anymore, right?
I think we can just return xe_gsc_proxy_init_done(gsc).
>
>- xe_force_wake_put(gt_to_fw(gt), fw_ref);
>-out:
>- xe_pm_runtime_put(xe);
> return ret;
> }
>
>@@ -166,17 +161,15 @@ ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
> u32 addr_out_off, addr_in_wr_off = 0;
> int ret, tries = 0;
>
>- if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
>- ret = -ENOSPC;
>- goto out;
>- }
>+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
>+ return -ENOSPC;
Huh.. Did we just fix a bug in this function?
It appears this function was putting without getting when the "if"
condition evaluates to true.
--
Gustavo Sousa
>
> msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
> msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
> addr_out_off = PAGE_SIZE;
>
> host_session_id = xe_gsc_create_host_session_id();
>- xe_pm_runtime_get_noresume(xe);
>+ guard(xe_pm_runtime_noresume)(xe);
> addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
> addr_in_wr_off, HECI_MEADDRESS_HDCP,
> host_session_id, msg_in_len);
>@@ -201,13 +194,11 @@ ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
> } while (++tries < 20);
>
> if (ret)
>- goto out;
>+ return ret;
>
> xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
> addr_out_off + HDCP_GSC_HEADER_SIZE,
> msg_out_len);
>
>-out:
>- xe_pm_runtime_put(xe);
> return ret;
> }
>--
>2.51.1
>
next prev parent reply other threads:[~2025-11-13 14:26 UTC|newest]
Thread overview: 74+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-10 23:20 [PATCH v2 00/30] Scope-based forcewake and runtime PM Matt Roper
2025-11-10 23:20 ` [PATCH v2 01/30] drm/xe/forcewake: Improve kerneldoc Matt Roper
2025-11-12 14:04 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 02/30] drm/xe/eustall: Store forcewake reference in stream structure Matt Roper
2025-11-12 15:36 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 03/30] drm/xe/oa: " Matt Roper
2025-11-12 16:11 ` Gustavo Sousa
2025-11-13 17:10 ` Dixit, Ashutosh
2025-11-10 23:20 ` [PATCH v2 04/30] drm/xe/forcewake: Add scope-based cleanup for forcewake Matt Roper
2025-11-12 20:00 ` Gustavo Sousa
2025-11-12 21:01 ` Matt Roper
2025-11-12 21:16 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 05/30] drm/xe/pm: Add scope-based cleanup helper for runtime PM Matt Roper
2025-11-12 19:53 ` Michal Wajdeczko
2025-11-12 21:48 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 06/30] drm/xe/gt: Use scope-based cleanup Matt Roper
2025-11-13 12:26 ` Gustavo Sousa
2025-11-13 22:58 ` Matt Roper
2025-11-10 23:20 ` [PATCH v2 07/30] drm/xe/gt_idle: " Matt Roper
2025-11-13 12:39 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 08/30] drm/xe/guc: " Matt Roper
2025-11-13 12:46 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 09/30] drm/xe/guc_pc: " Matt Roper
2025-11-13 13:00 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 10/30] drm/xe/mocs: " Matt Roper
2025-11-13 13:30 ` Gustavo Sousa
2025-11-13 23:28 ` Matt Roper
2025-11-10 23:20 ` [PATCH v2 11/30] drm/xe/pat: Use scope-based forcewake Matt Roper
2025-11-13 13:37 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 12/30] drm/xe/pxp: Use scope-based cleanup Matt Roper
2025-11-13 13:40 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 13/30] drm/xe/gsc: " Matt Roper
2025-11-13 13:46 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 14/30] drm/xe/device: " Matt Roper
2025-11-13 14:04 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 15/30] drm/xe/devcoredump: " Matt Roper
2025-11-13 14:14 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 16/30] drm/xe/display: Use scoped-cleanup Matt Roper
2025-11-13 14:25 ` Gustavo Sousa [this message]
2025-11-10 23:20 ` [PATCH v2 17/30] drm/xe: Create scoped cleanup class for force_wake_get_any_engine() Matt Roper
2025-11-13 17:39 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 18/30] drm/xe/drm_client: Use scope-based cleanup Matt Roper
2025-11-10 23:20 ` [PATCH v2 19/30] drm/xe/gt_debugfs: " Matt Roper
2025-11-13 17:45 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 20/30] drm/xe/huc: Use scope-based forcewake Matt Roper
2025-11-13 17:46 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 21/30] drm/xe/query: " Matt Roper
2025-11-13 17:50 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 22/30] drm/xe/reg_sr: " Matt Roper
2025-11-13 17:51 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 23/30] drm/xe/vram: " Matt Roper
2025-11-10 23:57 ` [PATCH v2.1 " Matt Roper
2025-11-13 17:52 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 24/30] drm/xe/bo: Use scope-based runtime PM Matt Roper
2025-11-13 17:54 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 25/30] drm/xe/ggtt: Use scope-based runtime pm Matt Roper
2025-11-13 17:55 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 26/30] drm/xe/hwmon: Use scope-based runtime PM Matt Roper
2025-11-13 18:01 ` Gustavo Sousa
2025-11-13 18:05 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 27/30] drm/xe/sriov: " Matt Roper
2025-11-13 18:09 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 28/30] drm/xe/tests: " Matt Roper
2025-11-13 18:15 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 29/30] drm/xe/sysfs: Use scope-based runtime power management Matt Roper
2025-11-13 18:25 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 30/30] drm/xe/debugfs: Use scope-based runtime PM Matt Roper
2025-11-13 18:30 ` Gustavo Sousa
2025-11-11 0:20 ` ✓ CI.KUnit: success for Scope-based forcewake and runtime PM (rev3) Patchwork
2025-11-11 0:57 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-11 10:50 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-11 10:57 ` [PATCH v2 00/30] Scope-based forcewake and runtime PM Jani Nikula
2025-11-12 16:01 ` Matt Roper
2025-11-13 22:11 ` Matt Roper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=176304394887.3698.6245959522925522278@intel.com \
--to=gustavo.sousa@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.d.roper@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox