From: Sumit Gupta <sumitg@nvidia.com>
To: <rafael@kernel.org>, <viresh.kumar@linaro.org>, <lenb@kernel.org>,
<robert.moore@intel.com>, <corbet@lwn.net>,
<pierre.gondois@arm.com>, <zhenglifeng1@huawei.com>,
<rdunlap@infradead.org>, <ray.huang@amd.com>,
<gautham.shenoy@amd.com>, <mario.limonciello@amd.com>,
<perry.yuan@amd.com>, <linux-pm@vger.kernel.org>,
<linux-acpi@vger.kernel.org>, <linux-doc@vger.kernel.org>,
<acpica-devel@lists.linux.dev>, <linux-kernel@vger.kernel.org>
Cc: <linux-tegra@vger.kernel.org>, <treding@nvidia.com>,
<jonathanh@nvidia.com>, <vsethi@nvidia.com>,
<ksitaraman@nvidia.com>, <sanjayc@nvidia.com>, <bbasu@nvidia.com>,
<sumitg@nvidia.com>
Subject: [PATCH v3 3/8] ACPI: CPPC: extend APIs to support auto_sel and epp
Date: Wed, 1 Oct 2025 20:30:59 +0530 [thread overview]
Message-ID: <20251001150104.1275188-4-sumitg@nvidia.com> (raw)
In-Reply-To: <20251001150104.1275188-1-sumitg@nvidia.com>
- Add auto_sel read support in cppc_get_perf_caps().
- Add write of both auto_sel and energy_perf in cppc_set_epp_perf().
- Remove redundant energy_perf field from 'struct cppc_perf_caps' as
the same is available in 'struct cppc_perf_ctrls' which is used.
Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
---
drivers/acpi/cppc_acpi.c | 30 ++++++++++++++++++++++++++----
include/acpi/cppc_acpi.h | 1 -
2 files changed, 26 insertions(+), 5 deletions(-)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index ab8dd5cdb13b..12b2516b971c 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1344,8 +1344,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *highest_reg, *lowest_reg,
*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
- *low_freq_reg = NULL, *nom_freq_reg = NULL;
- u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
+ *low_freq_reg = NULL, *nom_freq_reg = NULL, *auto_sel_reg = NULL;
+ u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0, auto_sel = 0;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0, regs_in_pcc = 0;
@@ -1362,11 +1362,12 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
- CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
+ CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) || CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
@@ -1414,6 +1415,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
perf_caps->lowest_freq = low_f;
perf_caps->nominal_freq = nom_f;
+ if (CPC_SUPPORTED(auto_sel_reg))
+ cpc_read(cpunum, auto_sel_reg, &auto_sel);
+ perf_caps->auto_sel = (bool)auto_sel;
out_err:
if (regs_in_pcc)
@@ -1555,6 +1559,8 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
struct cpc_register_resource *auto_sel_reg;
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
+ bool autosel_support_in_ffh_or_sysmem;
+ bool epp_support_in_ffh_or_sysmem;
int ret;
if (!cpc_desc) {
@@ -1565,6 +1571,11 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
+ epp_support_in_ffh_or_sysmem = CPC_SUPPORTED(epp_set_reg) &&
+ (CPC_IN_FFH(epp_set_reg) || CPC_IN_SYSTEM_MEMORY(epp_set_reg));
+ autosel_support_in_ffh_or_sysmem = CPC_SUPPORTED(auto_sel_reg) &&
+ (CPC_IN_FFH(auto_sel_reg) || CPC_IN_SYSTEM_MEMORY(auto_sel_reg));
+
if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
@@ -1590,8 +1601,19 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
} else if (osc_cpc_flexible_adr_space_confirmed &&
- CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
+ epp_support_in_ffh_or_sysmem && autosel_support_in_ffh_or_sysmem) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret) {
+ pr_debug("Failed to write auto_sel=%d for CPU:%d\n", enable, cpu);
+ return ret;
+ }
+
ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
+ if (ret) {
+ pr_debug("Failed to write energy_perf=%u for CPU:%d\n",
+ perf_ctrls->energy_perf, cpu);
+ return ret;
+ }
} else {
ret = -ENOTSUPP;
pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 213bd389ec57..3babc6d6e70a 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -119,7 +119,6 @@ struct cppc_perf_caps {
u32 lowest_nonlinear_perf;
u32 lowest_freq;
u32 nominal_freq;
- u32 energy_perf;
bool auto_sel;
};
--
2.34.1
next prev parent reply other threads:[~2025-10-01 15:02 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-01 15:00 [PATCH v3 0/8] Enhanced autonomous selection and improvements Sumit Gupta
2025-10-01 15:00 ` [PATCH v3 1/8] cpufreq: CPPC: Add generic helpers for sysfs show/store Sumit Gupta
2025-10-10 3:24 ` Jie Zhan
2025-10-13 12:51 ` Sumit Gupta
2025-10-01 15:00 ` [PATCH v3 2/8] ACPI: CPPC: Add cppc_get_perf() API to read performance controls Sumit Gupta
2025-10-01 15:00 ` Sumit Gupta [this message]
2025-10-22 9:12 ` [PATCH v3 3/8] ACPI: CPPC: extend APIs to support auto_sel and epp Ionela Voinescu
2025-10-24 13:12 ` Sumit Gupta
2025-10-01 15:01 ` [PATCH v3 4/8] ACPI: CPPC: add APIs and sysfs interface for min/max_perf Sumit Gupta
2025-10-22 10:58 ` Ionela Voinescu
2025-10-24 13:22 ` Sumit Gupta
2025-10-01 15:01 ` [PATCH v3 5/8] ACPI: CPPC: add APIs and sysfs interface for perf_limited register Sumit Gupta
2025-10-01 15:01 ` [PATCH v3 6/8] cpufreq: CPPC: Add sysfs for min/max_perf and perf_limited Sumit Gupta
2025-10-01 17:03 ` Mario Limonciello
2025-10-08 10:16 ` Sumit Gupta
2025-10-10 3:29 ` Jie Zhan
2025-10-13 11:59 ` Sumit Gupta
2025-10-22 12:02 ` Ionela Voinescu
2025-10-24 13:32 ` Sumit Gupta
2025-10-01 15:01 ` [PATCH v3 7/8] cpufreq: CPPC: update policy min/max when toggling auto_select Sumit Gupta
2025-10-01 15:01 ` [PATCH v3 8/8] cpufreq: CPPC: add autonomous mode boot parameter support Sumit Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251001150104.1275188-4-sumitg@nvidia.com \
--to=sumitg@nvidia.com \
--cc=acpica-devel@lists.linux.dev \
--cc=bbasu@nvidia.com \
--cc=corbet@lwn.net \
--cc=gautham.shenoy@amd.com \
--cc=jonathanh@nvidia.com \
--cc=ksitaraman@nvidia.com \
--cc=lenb@kernel.org \
--cc=linux-acpi@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=mario.limonciello@amd.com \
--cc=perry.yuan@amd.com \
--cc=pierre.gondois@arm.com \
--cc=rafael@kernel.org \
--cc=ray.huang@amd.com \
--cc=rdunlap@infradead.org \
--cc=robert.moore@intel.com \
--cc=sanjayc@nvidia.com \
--cc=treding@nvidia.com \
--cc=viresh.kumar@linaro.org \
--cc=vsethi@nvidia.com \
--cc=zhenglifeng1@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).