From: Gustavo Sousa <gustavo.sousa@intel.com>
To: Matt Roper <matthew.d.roper@intel.com>, <intel-xe@lists.freedesktop.org>
Cc: <matthew.d.roper@intel.com>
Subject: Re: [PATCH v2 29/30] drm/xe/sysfs: Use scope-based runtime power management
Date: Thu, 13 Nov 2025 15:25:40 -0300 [thread overview]
Message-ID: <176305834030.3698.9234090232327711230@intel.com> (raw)
In-Reply-To: <20251110232017.1475869-61-matthew.d.roper@intel.com>
Quoting Matt Roper (2025-11-10 20:20:47-03:00)
>Switch sysfs to use scope-based runtime power management to slightly
>simplify the code.
>
>Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
>---
> drivers/gpu/drm/xe/xe_device_sysfs.c | 33 ++++++++-----------
> drivers/gpu/drm/xe/xe_gt_freq.c | 27 +++++----------
> drivers/gpu/drm/xe/xe_gt_throttle.c | 3 +-
> drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c | 6 ++--
> 4 files changed, 25 insertions(+), 44 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
>index ec9c06b06fb5..a73e0e957cb0 100644
>--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
>+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
>@@ -57,9 +57,8 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
>
> drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
>
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
> ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
>- xe_pm_runtime_put(xe);
>
> return ret ?: count;
> }
>@@ -84,33 +83,31 @@ lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, c
> u16 major = 0, minor = 0, hotfix = 0, build = 0;
> int ret;
>
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
>
> ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
> &cap, NULL);
> if (ret)
>- goto out;
>+ return ret;
>
> if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
> ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
> &ver_low, NULL);
> if (ret)
>- goto out;
>+ return ret;
>
> ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
> &ver_high, NULL);
> if (ret)
>- goto out;
>+ return ret;
>
> major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
> minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
> hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
> build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
> }
>-out:
>- xe_pm_runtime_put(xe);
>
>- return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
>+ return sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
> }
> static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
>
>@@ -123,33 +120,31 @@ lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *a
> u16 major = 0, minor = 0, hotfix = 0, build = 0;
> int ret;
>
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
>
> ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
> &cap, NULL);
> if (ret)
>- goto out;
>+ return ret;
>
> if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
> ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
> &ver_low, NULL);
> if (ret)
>- goto out;
>+ return ret;
>
> ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
> &ver_high, NULL);
> if (ret)
>- goto out;
>+ return ret;
>
> major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
> minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
> hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
> build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
> }
>-out:
>- xe_pm_runtime_put(xe);
>
>- return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
>+ return sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
> }
> static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
>
>@@ -233,9 +228,8 @@ auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *at
> struct xe_device *xe = pdev_to_xe_device(pdev);
> u32 cap, val;
>
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
> val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
>- xe_pm_runtime_put(xe);
>
> cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
> return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
>@@ -251,11 +245,10 @@ auto_link_downgrade_status_show(struct device *dev, struct device_attribute *att
> u32 val = 0;
> int ret;
>
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
> ret = xe_pcode_read(xe_device_get_root_tile(xe),
> PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
> &val, NULL);
>- xe_pm_runtime_put(xe);
>
> return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
> }
>diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
>index 849ea6c86e8e..6284a4daf00a 100644
>--- a/drivers/gpu/drm/xe/xe_gt_freq.c
>+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
>@@ -70,9 +70,8 @@ static ssize_t act_freq_show(struct kobject *kobj,
> struct xe_guc_pc *pc = dev_to_pc(dev);
> u32 freq;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> freq = xe_guc_pc_get_act_freq(pc);
>- xe_pm_runtime_put(dev_to_xe(dev));
>
> return sysfs_emit(buf, "%d\n", freq);
> }
>@@ -86,9 +85,8 @@ static ssize_t cur_freq_show(struct kobject *kobj,
> u32 freq;
> ssize_t ret;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> ret = xe_guc_pc_get_cur_freq(pc, &freq);
>- xe_pm_runtime_put(dev_to_xe(dev));
> if (ret)
> return ret;
>
>@@ -113,9 +111,8 @@ static ssize_t rpe_freq_show(struct kobject *kobj,
> struct xe_guc_pc *pc = dev_to_pc(dev);
> u32 freq;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> freq = xe_guc_pc_get_rpe_freq(pc);
>- xe_pm_runtime_put(dev_to_xe(dev));
>
> return sysfs_emit(buf, "%d\n", freq);
> }
>@@ -128,9 +125,8 @@ static ssize_t rpa_freq_show(struct kobject *kobj,
> struct xe_guc_pc *pc = dev_to_pc(dev);
> u32 freq;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> freq = xe_guc_pc_get_rpa_freq(pc);
>- xe_pm_runtime_put(dev_to_xe(dev));
>
> return sysfs_emit(buf, "%d\n", freq);
> }
>@@ -154,9 +150,8 @@ static ssize_t min_freq_show(struct kobject *kobj,
> u32 freq;
> ssize_t ret;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> ret = xe_guc_pc_get_min_freq(pc, &freq);
>- xe_pm_runtime_put(dev_to_xe(dev));
> if (ret)
> return ret;
>
>@@ -175,9 +170,8 @@ static ssize_t min_freq_store(struct kobject *kobj,
> if (ret)
> return ret;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> ret = xe_guc_pc_set_min_freq(pc, freq);
>- xe_pm_runtime_put(dev_to_xe(dev));
> if (ret)
> return ret;
>
>@@ -193,9 +187,8 @@ static ssize_t max_freq_show(struct kobject *kobj,
> u32 freq;
> ssize_t ret;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> ret = xe_guc_pc_get_max_freq(pc, &freq);
>- xe_pm_runtime_put(dev_to_xe(dev));
> if (ret)
> return ret;
>
>@@ -214,9 +207,8 @@ static ssize_t max_freq_store(struct kobject *kobj,
> if (ret)
> return ret;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> ret = xe_guc_pc_set_max_freq(pc, freq);
>- xe_pm_runtime_put(dev_to_xe(dev));
> if (ret)
> return ret;
>
>@@ -243,9 +235,8 @@ static ssize_t power_profile_store(struct kobject *kobj,
> struct xe_guc_pc *pc = dev_to_pc(dev);
> int err;
>
>- xe_pm_runtime_get(dev_to_xe(dev));
>+ guard(xe_pm_runtime)(dev_to_xe(dev));
> err = xe_guc_pc_set_power_profile(pc, buff);
>- xe_pm_runtime_put(dev_to_xe(dev));
>
> return err ?: count;
> }
>diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
>index 82c5fbcdfbe3..0ee288389e71 100644
>--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
>+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
>@@ -97,9 +97,8 @@ u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt)
> else
> mask = GT0_PERF_LIMIT_REASONS_MASK;
>
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
> val = xe_mmio_read32(>->mmio, reg) & mask;
>- xe_pm_runtime_put(xe);
>
> return val;
We can drop variable val and return directly.
> }
>diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
>index 640950172088..1d3511d0d025 100644
>--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
>+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
>@@ -47,9 +47,8 @@ static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
>
> kattr = container_of(attr, struct kobj_attribute, attr);
> if (kattr->show) {
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
> ret = kattr->show(kobj, kattr, buf);
>- xe_pm_runtime_put(xe);
> }
>
> return ret;
I think we are able to drop variable ret by using return directly in the
two places.
>@@ -66,9 +65,8 @@ static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
>
> kattr = container_of(attr, struct kobj_attribute, attr);
> if (kattr->store) {
>- xe_pm_runtime_get(xe);
>+ guard(xe_pm_runtime)(xe);
> ret = kattr->store(kobj, kattr, buf, count);
>- xe_pm_runtime_put(xe);
> }
>
> return ret;
In here as well.
Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
>--
>2.51.1
>
next prev parent reply other threads:[~2025-11-13 18:26 UTC|newest]
Thread overview: 74+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-10 23:20 [PATCH v2 00/30] Scope-based forcewake and runtime PM Matt Roper
2025-11-10 23:20 ` [PATCH v2 01/30] drm/xe/forcewake: Improve kerneldoc Matt Roper
2025-11-12 14:04 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 02/30] drm/xe/eustall: Store forcewake reference in stream structure Matt Roper
2025-11-12 15:36 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 03/30] drm/xe/oa: " Matt Roper
2025-11-12 16:11 ` Gustavo Sousa
2025-11-13 17:10 ` Dixit, Ashutosh
2025-11-10 23:20 ` [PATCH v2 04/30] drm/xe/forcewake: Add scope-based cleanup for forcewake Matt Roper
2025-11-12 20:00 ` Gustavo Sousa
2025-11-12 21:01 ` Matt Roper
2025-11-12 21:16 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 05/30] drm/xe/pm: Add scope-based cleanup helper for runtime PM Matt Roper
2025-11-12 19:53 ` Michal Wajdeczko
2025-11-12 21:48 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 06/30] drm/xe/gt: Use scope-based cleanup Matt Roper
2025-11-13 12:26 ` Gustavo Sousa
2025-11-13 22:58 ` Matt Roper
2025-11-10 23:20 ` [PATCH v2 07/30] drm/xe/gt_idle: " Matt Roper
2025-11-13 12:39 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 08/30] drm/xe/guc: " Matt Roper
2025-11-13 12:46 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 09/30] drm/xe/guc_pc: " Matt Roper
2025-11-13 13:00 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 10/30] drm/xe/mocs: " Matt Roper
2025-11-13 13:30 ` Gustavo Sousa
2025-11-13 23:28 ` Matt Roper
2025-11-10 23:20 ` [PATCH v2 11/30] drm/xe/pat: Use scope-based forcewake Matt Roper
2025-11-13 13:37 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 12/30] drm/xe/pxp: Use scope-based cleanup Matt Roper
2025-11-13 13:40 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 13/30] drm/xe/gsc: " Matt Roper
2025-11-13 13:46 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 14/30] drm/xe/device: " Matt Roper
2025-11-13 14:04 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 15/30] drm/xe/devcoredump: " Matt Roper
2025-11-13 14:14 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 16/30] drm/xe/display: Use scoped-cleanup Matt Roper
2025-11-13 14:25 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 17/30] drm/xe: Create scoped cleanup class for force_wake_get_any_engine() Matt Roper
2025-11-13 17:39 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 18/30] drm/xe/drm_client: Use scope-based cleanup Matt Roper
2025-11-10 23:20 ` [PATCH v2 19/30] drm/xe/gt_debugfs: " Matt Roper
2025-11-13 17:45 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 20/30] drm/xe/huc: Use scope-based forcewake Matt Roper
2025-11-13 17:46 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 21/30] drm/xe/query: " Matt Roper
2025-11-13 17:50 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 22/30] drm/xe/reg_sr: " Matt Roper
2025-11-13 17:51 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 23/30] drm/xe/vram: " Matt Roper
2025-11-10 23:57 ` [PATCH v2.1 " Matt Roper
2025-11-13 17:52 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 24/30] drm/xe/bo: Use scope-based runtime PM Matt Roper
2025-11-13 17:54 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 25/30] drm/xe/ggtt: Use scope-based runtime pm Matt Roper
2025-11-13 17:55 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 26/30] drm/xe/hwmon: Use scope-based runtime PM Matt Roper
2025-11-13 18:01 ` Gustavo Sousa
2025-11-13 18:05 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 27/30] drm/xe/sriov: " Matt Roper
2025-11-13 18:09 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 28/30] drm/xe/tests: " Matt Roper
2025-11-13 18:15 ` Gustavo Sousa
2025-11-10 23:20 ` [PATCH v2 29/30] drm/xe/sysfs: Use scope-based runtime power management Matt Roper
2025-11-13 18:25 ` Gustavo Sousa [this message]
2025-11-10 23:20 ` [PATCH v2 30/30] drm/xe/debugfs: Use scope-based runtime PM Matt Roper
2025-11-13 18:30 ` Gustavo Sousa
2025-11-11 0:20 ` ✓ CI.KUnit: success for Scope-based forcewake and runtime PM (rev3) Patchwork
2025-11-11 0:57 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-11 10:50 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-11 10:57 ` [PATCH v2 00/30] Scope-based forcewake and runtime PM Jani Nikula
2025-11-12 16:01 ` Matt Roper
2025-11-13 22:11 ` Matt Roper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=176305834030.3698.9234090232327711230@intel.com \
--to=gustavo.sousa@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.d.roper@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox