From: Sumit Gupta <sumitg@nvidia.com>
To: <rafael@kernel.org>, <viresh.kumar@linaro.org>, <lenb@kernel.org>,
<robert.moore@intel.com>, <corbet@lwn.net>,
<linux-pm@vger.kernel.org>, <linux-acpi@vger.kernel.org>,
<linux-doc@vger.kernel.org>, <acpica-devel@lists.linux.dev>,
<linux-kernel@vger.kernel.org>
Cc: <linux-tegra@vger.kernel.org>, <treding@nvidia.com>,
<jonathanh@nvidia.com>, <sashal@nvidia.com>, <vsethi@nvidia.com>,
<ksitaraman@nvidia.com>, <sanjayc@nvidia.com>, <bbasu@nvidia.com>,
<sumitg@nvidia.com>
Subject: [Patch 3/5] ACPI: CPPC: support updating epp, auto_sel and {min|max_perf} from sysfs
Date: Tue, 11 Feb 2025 16:07:35 +0530 [thread overview]
Message-ID: <20250211103737.447704-4-sumitg@nvidia.com> (raw)
In-Reply-To: <20250211103737.447704-1-sumitg@nvidia.com>
Add support to update the CPC registers used for Autonomous
Performance Level Selection from acpi_cppc sysfs store nodes.
Registers supported for updation are:
- Engergy Performance Preference (EPP): energy_perf
- Autonomous Selection: auto_sel
- Maximum Performance: max_perf
- Minimum Performance: min_perf
Also, enable show nodes to read of the following CPC registers:
- Performance Limited: perf_limited
- Autonomous Activity Window: auto_activity_window
Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
---
drivers/acpi/cppc_acpi.c | 191 ++++++++++++++++++++++++++++++++++++---
include/acpi/cppc_acpi.h | 5 +
2 files changed, 183 insertions(+), 13 deletions(-)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index cc2bf958e84f..c60ad66ece85 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -170,6 +170,133 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
} \
define_one_cppc(member_name, mode)
+static ssize_t store_min_perf(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
+ struct cpufreq_policy *policy;
+ struct cppc_cpudata *cpu_data;
+ u32 min_perf, input;
+ int ret;
+
+ policy = cpufreq_cpu_get(cpc_ptr->cpu_id);
+ cpu_data = policy->driver_data;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ if (input > cpu_data->perf_ctrls.max_perf)
+ return -EINVAL;
+
+ input = clamp(input, cpu_data->perf_caps.lowest_perf, cpu_data->perf_caps.highest_perf);
+
+ min_perf = cpu_data->perf_ctrls.min_perf;
+ cpu_data->perf_ctrls.min_perf = input;
+
+ ret = cppc_set_perf_ctrls(cpc_ptr->cpu_id, &cpu_data->perf_ctrls);
+ if (ret) {
+ pr_debug("Err writing CPU%d perf ctrls: ret:%d\n", cpc_ptr->cpu_id, ret);
+ cpu_data->perf_ctrls.min_perf = min_perf;
+ return ret;
+ }
+ cpufreq_cpu_put(policy);
+
+ return count;
+}
+
+static ssize_t store_max_perf(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
+ struct cpufreq_policy *policy;
+ struct cppc_cpudata *cpu_data;
+ u32 max_perf, input;
+ int ret;
+
+ policy = cpufreq_cpu_get(cpc_ptr->cpu_id);
+ cpu_data = policy->driver_data;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ if (input < cpu_data->perf_ctrls.min_perf)
+ return -EINVAL;
+
+ input = clamp(input, cpu_data->perf_caps.lowest_perf, cpu_data->perf_caps.highest_perf);
+
+ max_perf = cpu_data->perf_ctrls.max_perf;
+ cpu_data->perf_ctrls.max_perf = input;
+
+ ret = cppc_set_perf_ctrls(cpc_ptr->cpu_id, &cpu_data->perf_ctrls);
+ if (ret) {
+ pr_debug("Err writing CPU%d perf ctrls: ret:%d\n", cpc_ptr->cpu_id, ret);
+ cpu_data->perf_ctrls.max_perf = max_perf;
+ return ret;
+ }
+ cpufreq_cpu_put(policy);
+
+ return count;
+}
+
+static ssize_t store_energy_perf(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
+ struct cpufreq_policy *policy;
+ struct cppc_cpudata *cpu_data;
+ u64 epp, input;
+ int ret;
+
+ policy = cpufreq_cpu_get(cpc_ptr->cpu_id);
+ cpu_data = policy->driver_data;
+
+ if (kstrtou64(buf, 10, &input))
+ return -EINVAL;
+
+ input = clamp(input, CPPC_EPP_PERFORMANCE_PREF, CPPC_EPP_ENERGY_EFFICIENCY_PREF);
+
+ epp = cpu_data->perf_ctrls.energy_perf;
+ cpu_data->perf_ctrls.energy_perf = input;
+
+ ret = cppc_set_epp_perf(cpc_ptr->cpu_id, &cpu_data->perf_ctrls,
+ cpu_data->perf_caps.auto_sel);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ cpu_data->perf_ctrls.energy_perf = epp;
+ return ret;
+ }
+ cpufreq_cpu_put(policy);
+
+ return count;
+}
+
+static ssize_t store_auto_sel(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
+ struct cpufreq_policy *policy;
+ struct cppc_cpudata *cpu_data;
+ bool input = false;
+ int ret;
+
+ policy = cpufreq_cpu_get(cpc_ptr->cpu_id);
+ cpu_data = policy->driver_data;
+
+ if (kstrtobool(buf, &input))
+ return -EINVAL;
+
+ ret = cppc_set_auto_sel(cpc_ptr->cpu_id, input);
+ if (ret) {
+ pr_info("failed to set autonomous selection (%d)\n", ret);
+ return ret;
+ }
+ cpu_data->perf_caps.auto_sel = input;
+
+ cpufreq_cpu_put(policy);
+
+ return count;
+}
+
sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf, ro);
sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf, ro);
sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf, ro);
@@ -177,9 +304,16 @@ sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf, ro);
sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf, ro);
sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq, ro);
sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq, ro);
+sysfs_cppc_data(cppc_get_perf_caps, cppc_perf_caps, auto_sel, rw);
sysfs_cppc_data(cppc_get_perf_fb_ctrs, cppc_perf_fb_ctrs, reference_perf, ro);
sysfs_cppc_data(cppc_get_perf_fb_ctrs, cppc_perf_fb_ctrs, wraparound_time, ro);
+sysfs_cppc_data(cppc_get_perf_fb_ctrs, cppc_perf_fb_ctrs, perf_limited, ro);
+
+sysfs_cppc_data(cppc_get_perf_ctrls, cppc_perf_ctrls, min_perf, rw);
+sysfs_cppc_data(cppc_get_perf_ctrls, cppc_perf_ctrls, max_perf, rw);
+sysfs_cppc_data(cppc_get_perf_ctrls, cppc_perf_ctrls, energy_perf, rw);
+sysfs_cppc_data(cppc_get_perf_ctrls, cppc_perf_ctrls, auto_activity_window, ro);
/* Check for valid access_width, otherwise, fallback to using bit_width */
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
@@ -218,6 +352,12 @@ static struct attribute *cppc_attrs[] = {
&nominal_perf.attr,
&nominal_freq.attr,
&lowest_freq.attr,
+ &auto_sel.attr,
+ &max_perf.attr,
+ &min_perf.attr,
+ &perf_limited.attr,
+ &auto_activity_window.attr,
+ &energy_perf.attr,
NULL
};
ATTRIBUTE_GROUPS(cppc);
@@ -1286,8 +1426,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *highest_reg, *lowest_reg,
*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
- *low_freq_reg = NULL, *nom_freq_reg = NULL;
- u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
+ *low_freq_reg = NULL, *nom_freq_reg = NULL, *auto_sel_reg = NULL;
+ u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0, auto_sel = 0;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0, regs_in_pcc = 0;
@@ -1304,11 +1444,12 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
- CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
+ CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) || CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
@@ -1356,6 +1497,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
perf_caps->lowest_freq = low_f;
perf_caps->nominal_freq = nom_f;
+ if (CPC_SUPPORTED(auto_sel_reg))
+ cpc_read(cpunum, auto_sel_reg, &auto_sel);
+ perf_caps->auto_sel = (bool)auto_sel;
out_err:
if (regs_in_pcc)
@@ -1535,8 +1679,22 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
} else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
+ if (CPC_SUPPORTED(auto_sel_reg) && CPC_SUPPORTED(epp_set_reg)) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret) {
+ pr_debug("Error in writing auto_sel for CPU:%d\n", cpu);
+ return ret;
+ }
+
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
+ if (ret) {
+ pr_debug("Error in writing energy_perf for CPU:%d\n", cpu);
+ return ret;
+ }
+ } else {
+ ret = -EOPNOTSUPP;
+ pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
+ }
}
return ret;
@@ -1553,6 +1711,7 @@ int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *auto_sel_reg;
u64 auto_sel;
+ int ret = 0;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
@@ -1561,13 +1720,9 @@ int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
- if (!CPC_SUPPORTED(auto_sel_reg))
- pr_warn_once("Autonomous mode is not unsupported!\n");
-
if (CPC_IN_PCC(auto_sel_reg)) {
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
if (pcc_ss_id < 0)
return -ENODEV;
@@ -1588,7 +1743,15 @@ int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
return ret;
}
- return 0;
+ if (CPC_SUPPORTED(auto_sel_reg)) {
+ cpc_read(cpunum, auto_sel_reg, &auto_sel);
+ } else {
+ pr_debug("Autonomous mode is not unsupported!\n");
+ ret = -EOPNOTSUPP;
+ }
+ perf_caps->auto_sel = (bool)auto_sel;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
@@ -1630,11 +1793,13 @@ int cppc_set_auto_sel(int cpu, bool enable)
/* after writing CPC, transfer the ownership of PCC to platform */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
- } else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
+
+ return ret;
}
+ if (CPC_SUPPORTED(auto_sel_reg))
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+
return ret;
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 31f4fd288b65..b072ef11f128 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -32,6 +32,9 @@
#define CMD_READ 0
#define CMD_WRITE 1
+#define CPPC_EPP_PERFORMANCE_PREF 0x00
+#define CPPC_EPP_ENERGY_EFFICIENCY_PREF 0xFF
+
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor;
@@ -118,6 +121,7 @@ struct cppc_perf_ctrls {
u32 min_perf;
u32 desired_perf;
u32 energy_perf;
+ u32 auto_activity_window;
};
struct cppc_perf_fb_ctrs {
@@ -125,6 +129,7 @@ struct cppc_perf_fb_ctrs {
u64 delivered;
u64 reference_perf;
u64 wraparound_time;
+ u32 perf_limited;
};
/* Per CPU container for runtime CPPC management. */
--
2.25.1
next prev parent reply other threads:[~2025-02-11 10:38 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-11 10:37 [Patch 0/5] Support Autonomous Selection mode in cppc_cpufreq Sumit Gupta
2025-02-11 10:37 ` [Patch 1/5] ACPI: CPPC: add read perf ctrls api and rename few existing Sumit Gupta
2025-02-12 8:03 ` kernel test robot
2025-02-12 8:25 ` kernel test robot
2025-02-11 10:37 ` [Patch 2/5] ACPI: CPPC: expand macro to create store acpi_cppc sysfs node Sumit Gupta
2025-02-11 10:37 ` Sumit Gupta [this message]
2025-02-24 10:24 ` [Patch 3/5] ACPI: CPPC: support updating epp, auto_sel and {min|max_perf} from sysfs Pierre Gondois
2025-03-14 13:11 ` Sumit Gupta
2025-02-11 10:37 ` [Patch 4/5] Documentation: ACPI: add autonomous mode ctrls info in cppc_sysfs.txt Sumit Gupta
2025-02-11 10:37 ` [Patch 5/5] cpufreq: CPPC: Add cppc_cpufreq_epp instance for Autonomous mode Sumit Gupta
2025-02-12 9:27 ` kernel test robot
2025-02-11 10:44 ` [Patch 0/5] Support Autonomous Selection mode in cppc_cpufreq Viresh Kumar
2025-02-11 12:01 ` zhenglifeng (A)
2025-02-11 14:08 ` Sumit Gupta
2025-02-12 10:52 ` zhenglifeng (A)
2025-02-14 7:08 ` Sumit Gupta
2025-02-18 19:23 ` Rafael J. Wysocki
2025-02-21 13:14 ` Sumit Gupta
2025-02-22 10:06 ` zhenglifeng (A)
2025-02-26 10:22 ` zhenglifeng (A)
2025-03-14 12:48 ` Sumit Gupta
2025-04-01 13:56 ` zhenglifeng (A)
2025-04-19 7:44 ` zhenglifeng (A)
2025-04-27 6:23 ` zhenglifeng (A)
2025-04-30 15:00 ` Sumit Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250211103737.447704-4-sumitg@nvidia.com \
--to=sumitg@nvidia.com \
--cc=acpica-devel@lists.linux.dev \
--cc=bbasu@nvidia.com \
--cc=corbet@lwn.net \
--cc=jonathanh@nvidia.com \
--cc=ksitaraman@nvidia.com \
--cc=lenb@kernel.org \
--cc=linux-acpi@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=rafael@kernel.org \
--cc=robert.moore@intel.com \
--cc=sanjayc@nvidia.com \
--cc=sashal@nvidia.com \
--cc=treding@nvidia.com \
--cc=viresh.kumar@linaro.org \
--cc=vsethi@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).