From: Darren Powell <darren.powell@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Darren Powell <darren.powell@amd.com>
Subject: [PATCH v3 3/3] amdgpu/pm: Linked emit_clock_levels to use cases amdgpu_get_pp_{dpm_clock, od_clk_voltage}
Date: Tue, 25 Jan 2022 23:54:41 -0500 [thread overview]
Message-ID: <20220126045441.3891-4-darren.powell@amd.com> (raw)
In-Reply-To: <20220126045441.3891-1-darren.powell@amd.com>
(v3)
Rewrote patchset to order patches as (API, hw impl, usecase)
- modified amdgpu_get_pp_od_clk_voltage to try amdgpu_dpm_emit_clock_levels and
fallback to amdgpu_dpm_print_clock_levels if emit is not implemented.
- modified amdgpu_get_pp_dpm_clock to try amdgpu_dpm_emit_clock_levels and
fallback to amdgpu_dpm_print_clock_levels if emit is not implemented.
- Newline is printed to buf if no output produced
== Test ==
LOGFILE=pp_clk.test.log
AMDGPU_PCI_ADDR=`lspci -nn | grep "VGA\|Display" | cut -d " " -f 1`
AMDGPU_HWMON=`ls -la /sys/class/hwmon | grep $AMDGPU_PCI_ADDR | awk '{print $9}'`
HWMON_DIR=/sys/class/hwmon/${AMDGPU_HWMON}
lspci -nn | grep "VGA\|Display" > $LOGFILE
FILES="pp_od_clk_voltage
pp_dpm_sclk
pp_dpm_mclk
pp_dpm_pcie
pp_dpm_socclk
pp_dpm_fclk
pp_dpm_dcefclk
pp_dpm_vclk
pp_dpm_dclk "
for f in $FILES
do
echo === $f === >> $LOGFILE
cat $HWMON_DIR/device/$f >> $LOGFILE
done
cat $LOGFILE
Signed-off-by: Darren Powell <darren.powell@amd.com>
---
drivers/gpu/drm/amd/pm/amdgpu_pm.c | 49 +++++++++++++++++++++---------
1 file changed, 35 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index d2823aaeca09..a11def0ee761 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -832,8 +832,17 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
+ int size = 0;
int ret;
+ enum pp_clock_type od_clocks[6] = {
+ OD_SCLK,
+ OD_MCLK,
+ OD_VDDC_CURVE,
+ OD_RANGE,
+ OD_VDDGFX_OFFSET,
+ OD_CCLK,
+ };
+ uint clk_index;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -846,16 +855,25 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return ret;
}
- size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- if (size > 0) {
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
- } else {
- size = sysfs_emit(buf, "\n");
+ for(clk_index = 0 ; clk_index < 6 ; clk_index++) {
+ ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
+ if (ret)
+ break;
+ }
+ if (ret == -ENOENT) {
+ size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
+ if (size > 0) {
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
+ }
}
+
+ if (size == 0)
+ size = sysfs_emit(buf, "\n");
+
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -980,8 +998,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
+ int size = 0;
+ int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -994,8 +1012,11 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
return ret;
}
- size = amdgpu_dpm_print_clock_levels(adev, type, buf);
- if (size <= 0)
+ ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
+ if (ret == -ENOENT)
+ size = amdgpu_dpm_print_clock_levels(adev, type, buf);
+
+ if (size == 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
--
2.34.1
next prev parent reply other threads:[~2022-01-26 4:55 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-26 4:54 [PATCH v3 0/3] amdgpu/pm: Implement parallel sysfs_emit solution for navi10 Darren Powell
2022-01-26 4:54 ` [PATCH v3 1/3] amdgpu/pm: Implement new API function "emit" that accepts buffer base and write offset Darren Powell
2022-01-26 4:54 ` [PATCH v3 2/3] amdgpu/pm: Implemention of emit_clk_levels for navi10 " Darren Powell
2022-01-26 4:54 ` Darren Powell [this message]
2022-01-27 2:22 ` [PATCH v3 3/3] amdgpu/pm: Linked emit_clock_levels to use cases amdgpu_get_pp_{dpm_clock,od_clk_voltage} Quan, Evan
2022-02-24 4:11 ` [PATCH v3 0/3] amdgpu/pm: Implement parallel sysfs_emit solution for navi10 Alex Deucher
2022-02-26 21:32 ` Powell, Darren
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220126045441.3891-4-darren.powell@amd.com \
--to=darren.powell@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.