From: Gustavo Sousa <gustavo.sousa@intel.com>
To: Matt Roper <matthew.d.roper@intel.com>, <intel-xe@lists.freedesktop.org>
Cc: <matthew.d.roper@intel.com>
Subject: Re: [PATCH v3 13/27] drm/xe/display: Use scoped-cleanup
Date: Mon, 17 Nov 2025 19:11:39 -0300 [thread overview]
Message-ID: <176341749971.5989.17296693505198711220@intel.com> (raw)
In-Reply-To: <20251114214335.2388972-42-matthew.d.roper@intel.com>
Quoting Matt Roper (2025-11-14 18:43:49-03:00)
>Eliminate some goto-based cleanup by utilizing scoped cleanup helpers.
>
>v2:
> - Eliminate unnecessary 'ret' variable in intel_hdcp_gsc_check_status()
> (Gustavo)
>
>Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
>---
> drivers/gpu/drm/xe/display/xe_fb_pin.c | 23 +++++++-----------
> drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 31 +++++++-----------------
> 2 files changed, 18 insertions(+), 36 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
>index 1fd4a815e784..6a935a75f2a4 100644
>--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
>+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
>@@ -210,10 +210,11 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> /* TODO: Consider sharing framebuffer mapping?
> * embed i915_vma inside intel_framebuffer
> */
>- xe_pm_runtime_get_noresume(xe);
>- ret = mutex_lock_interruptible(&ggtt->lock);
>+ guard(xe_pm_runtime_noresume)(xe);
>+ ACQUIRE(mutex_intr, lock)(&ggtt->lock);
>+ ret = ACQUIRE_ERR(mutex_intr, &lock);
> if (ret)
>- goto out;
>+ return ret;
>
> align = XE_PAGE_SIZE;
> if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
>@@ -223,15 +224,13 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> vma->node = bo->ggtt_node[tile0->id];
> } else if (view->type == I915_GTT_VIEW_NORMAL) {
> vma->node = xe_ggtt_node_init(ggtt);
>- if (IS_ERR(vma->node)) {
>- ret = PTR_ERR(vma->node);
>- goto out_unlock;
>- }
>+ if (IS_ERR(vma->node))
>+ return PTR_ERR(vma->node);
>
> ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0);
> if (ret) {
> xe_ggtt_node_fini(vma->node);
>- goto out_unlock;
>+ return ret;
> }
>
> xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
>@@ -245,13 +244,13 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> vma->node = xe_ggtt_node_init(ggtt);
> if (IS_ERR(vma->node)) {
> ret = PTR_ERR(vma->node);
>- goto out_unlock;
>+ return ret;
> }
>
> ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
> if (ret) {
> xe_ggtt_node_fini(vma->node);
>- goto out_unlock;
>+ return ret;
> }
>
> ggtt_ofs = vma->node->base.start;
>@@ -265,10 +264,6 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> rot_info->plane[i].dst_stride);
> }
>
>-out_unlock:
>- mutex_unlock(&ggtt->lock);
>-out:
>- xe_pm_runtime_put(xe);
> return ret;
> }
>
>diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
>index 4ae847b628e2..71d21fde1736 100644
>--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
>+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
>@@ -36,8 +36,6 @@ bool intel_hdcp_gsc_check_status(struct drm_device *drm)
> struct xe_tile *tile = xe_device_get_root_tile(xe);
> struct xe_gt *gt = tile->media_gt;
> struct xe_gsc *gsc = >->uc.gsc;
>- bool ret = true;
>- unsigned int fw_ref;
>
> if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) {
> drm_dbg_kms(&xe->drm,
>@@ -45,22 +43,15 @@ bool intel_hdcp_gsc_check_status(struct drm_device *drm)
> return false;
> }
>
>- xe_pm_runtime_get(xe);
>- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
>- if (!fw_ref) {
>+ guard(xe_pm_runtime)(xe);
>+ CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GSC);
>+ if (!fw_ref.domains) {
> drm_dbg_kms(&xe->drm,
> "failed to get forcewake to check proxy status\n");
>- ret = false;
>- goto out;
>+ return false;
> }
>
>- if (!xe_gsc_proxy_init_done(gsc))
>- ret = false;
>-
>- xe_force_wake_put(gt_to_fw(gt), fw_ref);
>-out:
>- xe_pm_runtime_put(xe);
>- return ret;
>+ return xe_gsc_proxy_init_done(gsc);
> }
>
> /*This function helps allocate memory for the command that we will send to gsc cs */
>@@ -166,17 +157,15 @@ ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
> u32 addr_out_off, addr_in_wr_off = 0;
> int ret, tries = 0;
>
>- if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
>- ret = -ENOSPC;
>- goto out;
>- }
>+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
>+ return -ENOSPC;
>
> msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
> msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
> addr_out_off = PAGE_SIZE;
>
> host_session_id = xe_gsc_create_host_session_id();
>- xe_pm_runtime_get_noresume(xe);
>+ guard(xe_pm_runtime_noresume)(xe);
> addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
> addr_in_wr_off, HECI_MEADDRESS_HDCP,
> host_session_id, msg_in_len);
>@@ -201,13 +190,11 @@ ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
> } while (++tries < 20);
>
> if (ret)
>- goto out;
>+ return ret;
>
> xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
> addr_out_off + HDCP_GSC_HEADER_SIZE,
> msg_out_len);
>
>-out:
>- xe_pm_runtime_put(xe);
> return ret;
> }
>--
>2.51.1
>
next prev parent reply other threads:[~2025-11-17 22:11 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-14 21:43 [PATCH v3 00/27] Scope-based forcewake and runtime PM Matt Roper
2025-11-14 21:43 ` [PATCH v3 01/27] drm/xe/forcewake: Add scope-based cleanup for forcewake Matt Roper
2025-11-17 22:03 ` Gustavo Sousa
2025-11-17 22:17 ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 02/27] drm/xe/pm: Add scope-based cleanup helper for runtime PM Matt Roper
2025-11-17 22:04 ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 03/27] drm/xe/gt: Use scope-based cleanup Matt Roper
2025-11-14 21:43 ` [PATCH v3 04/27] drm/xe/gt_idle: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 05/27] drm/xe/guc: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 06/27] drm/xe/guc_pc: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 07/27] drm/xe/mocs: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 08/27] drm/xe/pat: Use scope-based forcewake Matt Roper
2025-11-14 21:43 ` [PATCH v3 09/27] drm/xe/pxp: Use scope-based cleanup Matt Roper
2025-11-14 21:43 ` [PATCH v3 10/27] drm/xe/gsc: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 11/27] drm/xe/device: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 12/27] drm/xe/devcoredump: " Matt Roper
2025-11-17 22:09 ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 13/27] drm/xe/display: Use scoped-cleanup Matt Roper
2025-11-17 22:11 ` Gustavo Sousa [this message]
2025-11-14 21:43 ` [PATCH v3 14/27] drm/xe: Return forcewake reference type from force_wake_get_any_engine() Matt Roper
2025-11-17 22:19 ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 15/27] drm/xe/drm_client: Use scope-based cleanup Matt Roper
2025-11-17 22:28 ` Gustavo Sousa
2025-11-14 21:43 ` [PATCH v3 16/27] drm/xe/gt_debugfs: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 17/27] drm/xe/huc: Use scope-based forcewake Matt Roper
2025-11-14 21:43 ` [PATCH v3 18/27] drm/xe/query: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 19/27] drm/xe/reg_sr: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 20/27] drm/xe/vram: " Matt Roper
2025-11-14 21:43 ` [PATCH v3 21/27] drm/xe/bo: Use scope-based runtime PM Matt Roper
2025-11-14 21:43 ` [PATCH v3 22/27] drm/xe/ggtt: Use scope-based runtime pm Matt Roper
2025-11-14 21:43 ` [PATCH v3 23/27] drm/xe/hwmon: Use scope-based runtime PM Matt Roper
2025-11-14 21:44 ` [PATCH v3 24/27] drm/xe/sriov: " Matt Roper
2025-11-14 21:44 ` [PATCH v3 25/27] drm/xe/tests: " Matt Roper
2025-11-14 21:44 ` [PATCH v3 26/27] drm/xe/sysfs: Use scope-based runtime power management Matt Roper
2025-11-14 21:44 ` [PATCH v3 27/27] drm/xe/debugfs: Use scope-based runtime PM Matt Roper
2025-11-14 23:22 ` ✗ CI.checkpatch: warning for Scope-based forcewake and runtime PM (rev4) Patchwork
2025-11-14 23:23 ` ✓ CI.KUnit: success " Patchwork
2025-11-15 0:14 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-15 11:18 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=176341749971.5989.17296693505198711220@intel.com \
--to=gustavo.sousa@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.d.roper@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox