From: Mario Limonciello <superm1@kernel.org>
To: "Gautham R . Shenoy" <gautham.shenoy@amd.com>,
Perry Yuan <perry.yuan@amd.com>
Cc: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>,
linux-kernel@vger.kernel.org (open list:X86 ARCHITECTURE (32-BIT
AND 64-BIT)),
linux-pm@vger.kernel.org (open list:CPU FREQUENCY SCALING
FRAMEWORK), Mario Limonciello <mario.limonciello@amd.com>,
Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
Subject: [PATCH v2 13/17] cpufreq/amd-pstate: Move all EPP tracing into *_update_perf and *_set_epp functions
Date: Fri, 14 Feb 2025 18:52:40 -0600 [thread overview]
Message-ID: <20250215005244.1212285-14-superm1@kernel.org> (raw)
In-Reply-To: <20250215005244.1212285-1-superm1@kernel.org>
From: Mario Limonciello <mario.limonciello@amd.com>
The EPP tracing is done by the caller today, but this precludes the
information about whether the CPPC request has changed.
Move it into the update_perf and set_epp functions and include information
about whether the request has changed from the last one.
Reviewed-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate-trace.h | 13 +++-
drivers/cpufreq/amd-pstate.c | 119 +++++++++++++++++------------
2 files changed, 81 insertions(+), 51 deletions(-)
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index f457d4af2c62e..32e1bdc588c52 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -90,7 +90,8 @@ TRACE_EVENT(amd_pstate_epp_perf,
u8 epp,
u8 min_perf,
u8 max_perf,
- bool boost
+ bool boost,
+ bool changed
),
TP_ARGS(cpu_id,
@@ -98,7 +99,8 @@ TRACE_EVENT(amd_pstate_epp_perf,
epp,
min_perf,
max_perf,
- boost),
+ boost,
+ changed),
TP_STRUCT__entry(
__field(unsigned int, cpu_id)
@@ -107,6 +109,7 @@ TRACE_EVENT(amd_pstate_epp_perf,
__field(u8, min_perf)
__field(u8, max_perf)
__field(bool, boost)
+ __field(bool, changed)
),
TP_fast_assign(
@@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
__entry->min_perf = min_perf;
__entry->max_perf = max_perf;
__entry->boost = boost;
+ __entry->changed = changed;
),
- TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u",
+ TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
(unsigned int)__entry->cpu_id,
(u8)__entry->min_perf,
(u8)__entry->max_perf,
(u8)__entry->highest_perf,
(u8)__entry->epp,
- (bool)__entry->boost
+ (bool)__entry->boost,
+ (bool)__entry->changed
)
);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9517da9b7e692..1304bdc23e809 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata)
return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
}
-static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
value = prev = READ_ONCE(cpudata->cppc_req_cached);
@@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -256,24 +269,26 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- WRITE_ONCE(cpudata->epp_cached, epp);
+ if (epp != cpudata->epp_cached)
+ WRITE_ONCE(cpudata->epp_cached, epp);
return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
u8 min_perf, u8 des_perf,
u8 max_perf, u8 epp,
bool fast_switch)
{
- return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
max_perf, epp, fast_switch);
}
-static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
int ret;
@@ -281,6 +296,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
value &= ~AMD_CPPC_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -299,15 +327,29 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp)
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- return static_call(amd_pstate_set_epp)(cpudata, epp);
+ return static_call(amd_pstate_set_epp)(policy, epp);
}
-static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp)
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- int ret;
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ int ret;
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != cpudata->epp_cached);
+ }
if (epp == cpudata->epp_cached)
return 0;
@@ -339,17 +381,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
return -EBUSY;
}
- if (trace_amd_pstate_epp_perf_enabled()) {
- union perf_cached perf = READ_ONCE(cpudata->perf);
-
- trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
- epp,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
- policy->boost_enabled);
- }
-
- return amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_set_epp(policy, epp);
}
static inline int msr_cppc_enable(bool enable)
@@ -492,15 +524,16 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
u64 value, prev;
int ret;
if (cppc_state == AMD_PSTATE_ACTIVE) {
- int ret = shmem_set_epp(cpudata, epp);
+ int ret = shmem_set_epp(policy, epp);
if (ret)
return ret;
@@ -515,6 +548,18 @@ static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -592,7 +637,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
cpudata->cpu, fast_switch);
}
- amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
@@ -1527,7 +1572,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_req_cached, value);
}
- ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1568,14 +1613,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
epp = READ_ONCE(cpudata->epp_cached);
perf = READ_ONCE(cpudata->perf);
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, epp,
- perf.min_limit_perf,
- perf.max_limit_perf,
- policy->boost_enabled);
- }
- return amd_pstate_update_perf(cpudata, perf.min_limit_perf, 0U,
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
perf.max_limit_perf, epp, false);
}
@@ -1615,14 +1654,7 @@ static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
- cpudata->epp_cached,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- perf.highest_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, 0, 0, perf.highest_perf, cpudata->epp_cached, false);
+ return amd_pstate_update_perf(policy, 0, 0, perf.highest_perf, cpudata->epp_cached, false);
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
@@ -1648,14 +1680,7 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
if (cpudata->suspended)
return 0;
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE,
- perf.lowest_perf, perf.lowest_perf,
- policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, perf.lowest_perf, 0, perf.lowest_perf,
+ return amd_pstate_update_perf(policy, perf.lowest_perf, 0, perf.lowest_perf,
AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
}
--
2.43.0
next prev parent reply other threads:[~2025-02-15 0:53 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-15 0:52 [PATCH v2 00/17] amd-pstate cleanups Mario Limonciello
2025-02-15 0:52 ` [PATCH v2 01/17] cpufreq/amd-pstate: Show a warning when a CPU fails to setup Mario Limonciello
2025-02-17 5:53 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 02/17] cpufreq/amd-pstate: Drop min and max cached frequencies Mario Limonciello
2025-02-17 6:38 ` Gautham R. Shenoy
2025-02-17 21:06 ` Mario Limonciello
2025-02-15 0:52 ` [PATCH v2 03/17] cpufreq/amd-pstate: Move perf values into a union Mario Limonciello
2025-02-17 9:01 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 04/17] cpufreq/amd-pstate: Overhaul locking Mario Limonciello
2025-02-17 9:12 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 05/17] cpufreq/amd-pstate: Drop `cppc_cap1_cached` Mario Limonciello
2025-02-17 9:12 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 06/17] cpufreq/amd-pstate-ut: Use _free macro to free put policy Mario Limonciello
2025-02-17 10:11 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 07/17] cpufreq/amd-pstate-ut: Allow lowest nonlinear and lowest to be the same Mario Limonciello
2025-02-17 10:14 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 08/17] cpufreq/amd-pstate-ut: Drop SUCCESS and FAIL enums Mario Limonciello
2025-02-17 11:34 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 09/17] cpufreq/amd-pstate-ut: Continue on missing policies Mario Limonciello
2025-02-17 11:38 ` Gautham R. Shenoy
2025-02-17 17:04 ` Mario Limonciello
2025-02-15 0:52 ` [PATCH v2 10/17] cpufreq/amd-pstate-ut: Adjust variable scope for amd_pstate_ut_check_freq() Mario Limonciello
2025-02-17 11:39 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 11/17] cpufreq/amd-pstate: Replace all AMD_CPPC_* macros with masks Mario Limonciello
2025-02-17 13:48 ` Gautham R. Shenoy
2025-02-17 17:12 ` kernel test robot
2025-02-15 0:52 ` [PATCH v2 12/17] cpufreq/amd-pstate: Cache CPPC request in shared mem case too Mario Limonciello
2025-02-17 13:54 ` Gautham R. Shenoy
2025-02-15 0:52 ` Mario Limonciello [this message]
2025-02-17 14:14 ` [PATCH v2 13/17] cpufreq/amd-pstate: Move all EPP tracing into *_update_perf and *_set_epp functions Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 14/17] cpufreq/amd-pstate: Update cppc_req_cached for shared mem EPP writes Mario Limonciello
2025-02-17 14:30 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 15/17] cpufreq/amd-pstate: Drop debug statements for policy setting Mario Limonciello
2025-02-17 14:31 ` Gautham R. Shenoy
2025-02-15 0:52 ` [PATCH v2 16/17] cpufreq/amd-pstate: Rework CPPC enabling Mario Limonciello
2025-02-15 0:52 ` [PATCH v2 17/17] cpufreq/amd-pstate: Stop caching EPP Mario Limonciello
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250215005244.1212285-14-superm1@kernel.org \
--to=superm1@kernel.org \
--cc=Dhananjay.Ugwekar@amd.com \
--cc=gautham.shenoy@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=mario.limonciello@amd.com \
--cc=perry.yuan@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).