From: Ian Rogers <irogers@google.com>
To: Adrian Hunter <adrian.hunter@intel.com>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Benjamin Gray <bgray@linux.ibm.com>,
Caleb Biggers <caleb.biggers@intel.com>,
Edward Baker <edward.baker@intel.com>,
Ian Rogers <irogers@google.com>, Ingo Molnar <mingo@redhat.com>,
James Clark <james.clark@linaro.org>,
Jing Zhang <renyu.zj@linux.alibaba.com>,
Jiri Olsa <jolsa@kernel.org>,
John Garry <john.g.garry@oracle.com>, Leo Yan <leo.yan@arm.com>,
Namhyung Kim <namhyung@kernel.org>,
Perry Taylor <perry.taylor@intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Samantha Alt <samantha.alt@intel.com>,
Sandipan Das <sandipan.das@amd.com>,
Thomas Falcon <thomas.falcon@intel.com>,
Weilin Wang <weilin.wang@intel.com>, Xu Yang <xu.yang_2@nxp.com>,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org
Subject: [PATCH v9 46/48] perf jevents: Add collection of topdown like metrics for arm64
Date: Tue, 2 Dec 2025 09:50:41 -0800 [thread overview]
Message-ID: <20251202175043.623597-47-irogers@google.com> (raw)
In-Reply-To: <20251202175043.623597-1-irogers@google.com>
Metrics are created using legacy, common and recommended events. As
events may be missing a TryEvent function will give None if an event
is missing. To workaround missing JSON events for cortex-a53, sysfs
encodings are used.
Signed-off-by: Ian Rogers <irogers@google.com>
---
An earlier review of this patch by Leo Yan is here:
https://lore.kernel.org/lkml/8168c713-005c-4fd9-a928-66763dab746a@arm.com/
Hopefully all corrections were made.
---
tools/perf/pmu-events/arm64_metrics.py | 145 ++++++++++++++++++++++++-
1 file changed, 142 insertions(+), 3 deletions(-)
diff --git a/tools/perf/pmu-events/arm64_metrics.py b/tools/perf/pmu-events/arm64_metrics.py
index ac717ca3513a..9678253e2e0e 100755
--- a/tools/perf/pmu-events/arm64_metrics.py
+++ b/tools/perf/pmu-events/arm64_metrics.py
@@ -2,13 +2,150 @@
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
import argparse
import os
-from metric import (JsonEncodeMetric, JsonEncodeMetricGroupDescriptions, LoadEvents,
- MetricGroup)
+from typing import Optional
+from metric import (d_ratio, Event, JsonEncodeMetric, JsonEncodeMetricGroupDescriptions,
+ LoadEvents, Metric, MetricGroup)
# Global command line arguments.
_args = None
+def Arm64Topdown() -> MetricGroup:
+ """Returns a MetricGroup representing ARM64 topdown like metrics."""
+ def TryEvent(name: str) -> Optional[Event]:
+ # Skip an event if not in the json files.
+ try:
+ return Event(name)
+ except:
+ return None
+ # ARM models like a53 lack JSON for INST_RETIRED but have the
+ # architetural standard event in sysfs. Use the PMU name to identify
+ # the sysfs event.
+ pmu_name = f'armv8_{_args.model.replace("-", "_")}'
+ ins = Event("instructions")
+ ins_ret = Event("INST_RETIRED", f"{pmu_name}/inst_retired/")
+ cycles = Event("cpu\\-cycles")
+ stall_fe = TryEvent("STALL_FRONTEND")
+ stall_be = TryEvent("STALL_BACKEND")
+ br_ret = TryEvent("BR_RETIRED")
+ br_mp_ret = TryEvent("BR_MIS_PRED_RETIRED")
+ dtlb_walk = TryEvent("DTLB_WALK")
+ itlb_walk = TryEvent("ITLB_WALK")
+ l1d_tlb = TryEvent("L1D_TLB")
+ l1i_tlb = TryEvent("L1I_TLB")
+ l1d_refill = Event("L1D_CACHE_REFILL", f"{pmu_name}/l1d_cache_refill/")
+ l2d_refill = Event("L2D_CACHE_REFILL", f"{pmu_name}/l2d_cache_refill/")
+ l1i_refill = Event("L1I_CACHE_REFILL", f"{pmu_name}/l1i_cache_refill/")
+ l1d_access = Event("L1D_CACHE", f"{pmu_name}/l1d_cache/")
+ l2d_access = Event("L2D_CACHE", f"{pmu_name}/l2d_cache/")
+ llc_access = TryEvent("LL_CACHE_RD")
+ l1i_access = Event("L1I_CACHE", f"{pmu_name}/l1i_cache/")
+ llc_miss_rd = TryEvent("LL_CACHE_MISS_RD")
+ ase_spec = TryEvent("ASE_SPEC")
+ ld_spec = TryEvent("LD_SPEC")
+ st_spec = TryEvent("ST_SPEC")
+ vfp_spec = TryEvent("VFP_SPEC")
+ dp_spec = TryEvent("DP_SPEC")
+ br_immed_spec = TryEvent("BR_IMMED_SPEC")
+ br_indirect_spec = TryEvent("BR_INDIRECT_SPEC")
+ br_ret_spec = TryEvent("BR_RETURN_SPEC")
+ crypto_spec = TryEvent("CRYPTO_SPEC")
+ inst_spec = TryEvent("INST_SPEC")
+ return MetricGroup("lpm_topdown", [
+ MetricGroup("lpm_topdown_tl", [
+ Metric("lpm_topdown_tl_ipc", "Instructions per cycle", d_ratio(
+ ins, cycles), "insn/cycle"),
+ Metric("lpm_topdown_tl_stall_fe_rate", "Frontend stalls to all cycles",
+ d_ratio(stall_fe, cycles), "100%") if stall_fe else None,
+ Metric("lpm_topdown_tl_stall_be_rate", "Backend stalls to all cycles",
+ d_ratio(stall_be, cycles), "100%") if stall_be else None,
+ ]),
+ MetricGroup("lpm_topdown_fe_bound", [
+ MetricGroup("lpm_topdown_fe_br", [
+ Metric("lpm_topdown_fe_br_mp_per_insn",
+ "Branch mispredicts per instruction retired",
+ d_ratio(br_mp_ret, ins_ret), "br/insn") if br_mp_ret else None,
+ Metric("lpm_topdown_fe_br_ins_rate",
+ "Branches per instruction retired", d_ratio(
+ br_ret, ins_ret), "100%") if br_ret else None,
+ Metric("lpm_topdown_fe_br_mispredict",
+ "Branch mispredicts per branch instruction",
+ d_ratio(br_mp_ret, br_ret), "100%") if (br_mp_ret and br_ret) else None,
+ ]),
+ MetricGroup("lpm_topdown_fe_itlb", [
+ Metric("lpm_topdown_fe_itlb_walks", "Itlb walks per insn",
+ d_ratio(itlb_walk, ins_ret), "walk/insn"),
+ Metric("lpm_topdown_fe_itlb_walk_rate", "Itlb walks per L1I TLB access",
+ d_ratio(itlb_walk, l1i_tlb) if l1i_tlb else None, "100%"),
+ ]) if itlb_walk else None,
+ MetricGroup("lpm_topdown_fe_icache", [
+ Metric("lpm_topdown_fe_icache_l1i_per_insn",
+ "L1I cache refills per instruction",
+ d_ratio(l1i_refill, ins_ret), "l1i/insn"),
+ Metric("lpm_topdown_fe_icache_l1i_miss_rate",
+ "L1I cache refills per L1I cache access",
+ d_ratio(l1i_refill, l1i_access), "100%"),
+ ]),
+ ]),
+ MetricGroup("lpm_topdown_be_bound", [
+ MetricGroup("lpm_topdown_be_dtlb", [
+ Metric("lpm_topdown_be_dtlb_walks", "Dtlb walks per instruction",
+ d_ratio(dtlb_walk, ins_ret), "walk/insn"),
+ Metric("lpm_topdown_be_dtlb_walk_rate", "Dtlb walks per L1D TLB access",
+ d_ratio(dtlb_walk, l1d_tlb) if l1d_tlb else None, "100%"),
+ ]) if dtlb_walk else None,
+ MetricGroup("lpm_topdown_be_mix", [
+ Metric("lpm_topdown_be_mix_ld", "Percentage of load instructions",
+ d_ratio(ld_spec, inst_spec), "100%") if ld_spec else None,
+ Metric("lpm_topdown_be_mix_st", "Percentage of store instructions",
+ d_ratio(st_spec, inst_spec), "100%") if st_spec else None,
+ Metric("lpm_topdown_be_mix_simd", "Percentage of SIMD instructions",
+ d_ratio(ase_spec, inst_spec), "100%") if ase_spec else None,
+ Metric("lpm_topdown_be_mix_fp",
+ "Percentage of floating point instructions",
+ d_ratio(vfp_spec, inst_spec), "100%") if vfp_spec else None,
+ Metric("lpm_topdown_be_mix_dp",
+ "Percentage of data processing instructions",
+ d_ratio(dp_spec, inst_spec), "100%") if dp_spec else None,
+ Metric("lpm_topdown_be_mix_crypto",
+ "Percentage of data processing instructions",
+ d_ratio(crypto_spec, inst_spec), "100%") if crypto_spec else None,
+ Metric(
+ "lpm_topdown_be_mix_br", "Percentage of branch instructions",
+ d_ratio(br_immed_spec + br_indirect_spec + br_ret_spec,
+ inst_spec), "100%") if br_immed_spec and br_indirect_spec and br_ret_spec else None,
+ ], description="Breakdown of instructions by type. Counts include both useful and wasted speculative instructions"
+ ) if inst_spec else None,
+ MetricGroup("lpm_topdown_be_dcache", [
+ MetricGroup("lpm_topdown_be_dcache_l1", [
+ Metric("lpm_topdown_be_dcache_l1_per_insn",
+ "L1D cache refills per instruction",
+ d_ratio(l1d_refill, ins_ret), "refills/insn"),
+ Metric("lpm_topdown_be_dcache_l1_miss_rate",
+ "L1D cache refills per L1D cache access",
+ d_ratio(l1d_refill, l1d_access), "100%")
+ ]),
+ MetricGroup("lpm_topdown_be_dcache_l2", [
+ Metric("lpm_topdown_be_dcache_l2_per_insn",
+ "L2D cache refills per instruction",
+ d_ratio(l2d_refill, ins_ret), "refills/insn"),
+ Metric("lpm_topdown_be_dcache_l2_miss_rate",
+ "L2D cache refills per L2D cache access",
+ d_ratio(l2d_refill, l2d_access), "100%")
+ ]),
+ MetricGroup("lpm_topdown_be_dcache_llc", [
+ Metric("lpm_topdown_be_dcache_llc_per_insn",
+ "Last level cache misses per instruction",
+ d_ratio(llc_miss_rd, ins_ret), "miss/insn"),
+ Metric("lpm_topdown_be_dcache_llc_miss_rate",
+ "Last level cache misses per last level cache access",
+ d_ratio(llc_miss_rd, llc_access), "100%")
+ ]) if llc_miss_rd and llc_access else None,
+ ]),
+ ]),
+ ])
+
+
def main() -> None:
global _args
@@ -34,7 +171,9 @@ def main() -> None:
directory = f"{_args.events_path}/arm64/{_args.vendor}/{_args.model}/"
LoadEvents(directory)
- all_metrics = MetricGroup("", [])
+ all_metrics = MetricGroup("", [
+ Arm64Topdown(),
+ ])
if _args.metricgroups:
print(JsonEncodeMetricGroupDescriptions(all_metrics))
--
2.52.0.158.g65b55ccf14-goog
next prev parent reply other threads:[~2025-12-02 17:54 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-02 17:49 [PATCH v9 00/48] AMD, ARM, Intel metric generation with Python Ian Rogers
2025-12-02 17:49 ` [PATCH v9 01/48] perf python: Correct copying of metric_leader in an evsel Ian Rogers
2025-12-02 17:49 ` [PATCH v9 02/48] perf ilist: Be tolerant of reading a metric on the wrong CPU Ian Rogers
2025-12-02 17:49 ` [PATCH v9 03/48] perf jevents: Allow multiple metricgroups.json files Ian Rogers
2025-12-02 17:49 ` [PATCH v9 04/48] perf jevents: Update metric constraint support Ian Rogers
2025-12-02 17:50 ` [PATCH v9 05/48] perf jevents: Add descriptions to metricgroup abstraction Ian Rogers
2025-12-02 17:50 ` [PATCH v9 06/48] perf jevents: Allow metric groups not to be named Ian Rogers
2025-12-02 17:50 ` [PATCH v9 07/48] perf jevents: Support parsing negative exponents Ian Rogers
2025-12-02 17:50 ` [PATCH v9 08/48] perf jevents: Term list fix in event parsing Ian Rogers
2025-12-02 17:50 ` [PATCH v9 09/48] perf jevents: Add threshold expressions to Metric Ian Rogers
2025-12-02 17:50 ` [PATCH v9 10/48] perf jevents: Move json encoding to its own functions Ian Rogers
2025-12-02 17:50 ` [PATCH v9 11/48] perf jevents: Drop duplicate pending metrics Ian Rogers
2025-12-02 17:50 ` [PATCH v9 12/48] perf jevents: Skip optional metrics in metric group list Ian Rogers
2025-12-02 17:50 ` [PATCH v9 13/48] perf jevents: Build support for generating metrics from python Ian Rogers
2025-12-02 17:50 ` [PATCH v9 14/48] perf jevents: Add load event json to verify and allow fallbacks Ian Rogers
2025-12-02 17:50 ` [PATCH v9 15/48] perf jevents: Add RAPL event metric for AMD zen models Ian Rogers
2025-12-02 17:50 ` [PATCH v9 16/48] perf jevents: Add idle " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 17/48] perf jevents: Add upc metric for uops per cycle for AMD Ian Rogers
2025-12-08 9:46 ` Sandipan Das
2025-12-02 17:50 ` [PATCH v9 18/48] perf jevents: Add br metric group for branch statistics on AMD Ian Rogers
2025-12-08 12:42 ` Sandipan Das
2025-12-02 17:50 ` [PATCH v9 19/48] perf jevents: Add itlb metric group for AMD Ian Rogers
2025-12-02 17:50 ` [PATCH v9 20/48] perf jevents: Add dtlb " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 21/48] perf jevents: Add uncore l3 " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 22/48] perf jevents: Add load store breakdown metrics ldst " Ian Rogers
2025-12-08 9:21 ` Sandipan Das
2025-12-02 17:50 ` [PATCH v9 23/48] perf jevents: Add context switch metrics " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 24/48] perf jevents: Add RAPL metrics for all Intel models Ian Rogers
2025-12-02 17:50 ` [PATCH v9 25/48] perf jevents: Add idle metric for " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 26/48] perf jevents: Add CheckPmu to see if a PMU is in loaded json events Ian Rogers
2025-12-02 17:50 ` [PATCH v9 27/48] perf jevents: Add smi metric group for Intel models Ian Rogers
2025-12-02 17:50 ` [PATCH v9 28/48] perf jevents: Mark metrics with experimental events as experimental Ian Rogers
2025-12-02 17:50 ` [PATCH v9 29/48] perf jevents: Add tsx metric group for Intel models Ian Rogers
2025-12-02 17:50 ` [PATCH v9 30/48] perf jevents: Add br metric group for branch statistics on Intel Ian Rogers
2025-12-02 17:50 ` [PATCH v9 31/48] perf jevents: Add software prefetch (swpf) metric group for Intel Ian Rogers
2025-12-02 17:50 ` [PATCH v9 32/48] perf jevents: Add ports metric group giving utilization on Intel Ian Rogers
2025-12-02 17:50 ` [PATCH v9 33/48] perf jevents: Add L2 metrics for Intel Ian Rogers
2025-12-02 17:50 ` [PATCH v9 34/48] perf jevents: Add load store breakdown metrics ldst " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 35/48] perf jevents: Add ILP metrics " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 36/48] perf jevents: Add context switch " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 37/48] perf jevents: Add FPU " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 38/48] perf jevents: Add Miss Level Parallelism (MLP) metric " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 39/48] perf jevents: Add mem_bw " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 40/48] perf jevents: Add local/remote "mem" breakdown metrics " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 41/48] perf jevents: Add dir " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 42/48] perf jevents: Add C-State metrics from the PCU PMU " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 43/48] perf jevents: Add local/remote miss latency metrics " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 44/48] perf jevents: Add upi_bw metric " Ian Rogers
2025-12-02 17:50 ` [PATCH v9 45/48] perf jevents: Add mesh bandwidth saturation " Ian Rogers
2025-12-02 17:50 ` Ian Rogers [this message]
2025-12-09 11:31 ` [PATCH v9 46/48] perf jevents: Add collection of topdown like metrics for arm64 James Clark
2025-12-09 21:23 ` Ian Rogers
2025-12-02 17:50 ` [PATCH v9 47/48] perf jevents: Add cycles breakdown metric for arm64/AMD/Intel Ian Rogers
2025-12-02 17:50 ` [PATCH v9 48/48] perf jevents: Validate that all names given an Event Ian Rogers
2025-12-03 17:59 ` [PATCH v9 00/48] AMD, ARM, Intel metric generation with Python Namhyung Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251202175043.623597-47-irogers@google.com \
--to=irogers@google.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=bgray@linux.ibm.com \
--cc=caleb.biggers@intel.com \
--cc=edward.baker@intel.com \
--cc=james.clark@linaro.org \
--cc=john.g.garry@oracle.com \
--cc=jolsa@kernel.org \
--cc=leo.yan@arm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=perry.taylor@intel.com \
--cc=peterz@infradead.org \
--cc=renyu.zj@linux.alibaba.com \
--cc=samantha.alt@intel.com \
--cc=sandipan.das@amd.com \
--cc=thomas.falcon@intel.com \
--cc=weilin.wang@intel.com \
--cc=xu.yang_2@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).