From: Chun-Tse Shao <ctshao@google.com>
To: linux-kernel@vger.kernel.org
Cc: Chun-Tse Shao <ctshao@google.com>,
peterz@infradead.org, mingo@redhat.com, acme@kernel.org,
namhyung@kernel.org, mark.rutland@arm.com,
alexander.shishkin@linux.intel.com, jolsa@kernel.org,
irogers@google.com, adrian.hunter@intel.com,
james.clark@linaro.org, zide.chen@intel.com,
ravi.bangoria@amd.com, linux-perf-users@vger.kernel.org
Subject: [PATCH v5 1/2] perf pmu intel: Generalize SNC cpumask adjustment for multiple platforms
Date: Tue, 7 Apr 2026 13:38:42 -0700 [thread overview]
Message-ID: <20260407203918.3178481-1-ctshao@google.com> (raw)
Prepare for supporting more Intel platforms with sub-NUMA clustering by
generalizing the GNR specific logic.
Signed-off-by: Chun-Tse Shao <ctshao@google.com>
---
v5:
Split patch.
v4: lore.kernel.org/20260402205300.1953706-1-ctshao@google.com
Rebase.
v3: lore.kernel.org/20260212223942.3832857-1-ctshao@google.com
Fix a typo.
v2: lore.kernel.org/20260205232220.1980168-1-ctshao@google.com
Split EMR and GNR in the SNC2 IMC cpu map.
v1: lore.kernel.org/20260108184430.1210223-1-ctshao@google.com
tools/perf/arch/x86/util/pmu.c | 44 +++++++++++++++++++++-------------
1 file changed, 27 insertions(+), 17 deletions(-)
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index 0661e0f0b02d..938be36ec0f7 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -23,20 +23,28 @@
#include "util/env.h"
#include "util/header.h"
-static bool x86__is_intel_graniterapids(void)
+static bool x86__is_snc_supported(void)
{
- static bool checked_if_graniterapids;
- static bool is_graniterapids;
+ static bool checked_if_snc_supported;
+ static bool is_supported;
- if (!checked_if_graniterapids) {
- const char *graniterapids_cpuid = "GenuineIntel-6-A[DE]";
+ if (!checked_if_snc_supported) {
+
+ /* Graniterapids supports SNC configuration. */
+ static const char *const supported_cpuids[] = {
+ "GenuineIntel-6-A[DE]", /* Graniterapids */
+ };
char *cpuid = get_cpuid_str((struct perf_cpu){0});
- is_graniterapids = cpuid && strcmp_cpuid_str(graniterapids_cpuid, cpuid) == 0;
+ for (size_t i = 0; i < ARRAY_SIZE(supported_cpuids); i++) {
+ is_supported = cpuid && strcmp_cpuid_str(supported_cpuids[i], cpuid) == 0;
+ if (is_supported)
+ break;
+ }
free(cpuid);
- checked_if_graniterapids = true;
+ checked_if_snc_supported = true;
}
- return is_graniterapids;
+ return is_supported;
}
static struct perf_cpu_map *read_sysfs_cpu_map(const char *sysfs_path)
@@ -133,8 +141,8 @@ static int uncore_imc_snc(struct perf_pmu *pmu)
// Compute the IMC SNC using lookup tables.
unsigned int imc_num;
int snc_nodes = snc_nodes_per_l3_cache();
- const u8 snc2_map[] = {1, 1, 0, 0, 1, 1, 0, 0};
- const u8 snc3_map[] = {1, 1, 0, 0, 2, 2, 1, 1, 0, 0, 2, 2};
+ const u8 snc2_map[] = {1, 1, 0, 0};
+ const u8 snc3_map[] = {1, 1, 0, 0, 2, 2};
const u8 *snc_map;
size_t snc_map_len;
@@ -157,11 +165,12 @@ static int uncore_imc_snc(struct perf_pmu *pmu)
pr_warning("Unexpected: unable to compute IMC number '%s'\n", pmu->name);
return 0;
}
- if (imc_num >= snc_map_len) {
+ if (imc_num >= snc_map_len * perf_cpu_map__nr(pmu->cpus)) {
pr_warning("Unexpected IMC %d for SNC%d mapping\n", imc_num, snc_nodes);
return 0;
}
- return snc_map[imc_num];
+
+ return snc_map[imc_num % snc_map_len];
}
static int uncore_cha_imc_compute_cpu_adjust(int pmu_snc)
@@ -201,7 +210,7 @@ static int uncore_cha_imc_compute_cpu_adjust(int pmu_snc)
return cpu_adjust[pmu_snc];
}
-static void gnr_uncore_cha_imc_adjust_cpumask_for_snc(struct perf_pmu *pmu, bool cha)
+static void uncore_cha_imc_adjust_cpumask_for_snc(struct perf_pmu *pmu, bool cha)
{
// With sub-NUMA clustering (SNC) there is a NUMA node per SNC in the
// topology. For example, a two socket graniterapids machine may be set
@@ -301,11 +310,12 @@ void perf_pmu__arch_init(struct perf_pmu *pmu)
pmu->mem_events = perf_mem_events_intel_aux;
else
pmu->mem_events = perf_mem_events_intel;
- } else if (x86__is_intel_graniterapids()) {
+ } else if (x86__is_snc_supported()) {
if (strstarts(pmu->name, "uncore_cha_"))
- gnr_uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/true);
- else if (strstarts(pmu->name, "uncore_imc_"))
- gnr_uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/false);
+ uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/true);
+ else if (strstarts(pmu->name, "uncore_imc_") &&
+ !strstarts(pmu->name, "uncore_imc_free_running"))
+ uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/false);
}
}
}
--
2.53.0.1213.gd9a14994de-goog
next reply other threads:[~2026-04-07 20:39 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-07 20:38 Chun-Tse Shao [this message]
2026-04-07 20:38 ` [PATCH v5 2/2] perf pmu intel: Adjust cpumaks for sub-NUMA clusters on Emeraldrapids Chun-Tse Shao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260407203918.3178481-1-ctshao@google.com \
--to=ctshao@google.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=irogers@google.com \
--cc=james.clark@linaro.org \
--cc=jolsa@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=ravi.bangoria@amd.com \
--cc=zide.chen@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox