linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ian Rogers <irogers@google.com>
To: Adrian Hunter <adrian.hunter@intel.com>,
	 Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	 Arnaldo Carvalho de Melo <acme@kernel.org>,
	Benjamin Gray <bgray@linux.ibm.com>,
	 Caleb Biggers <caleb.biggers@intel.com>,
	Edward Baker <edward.baker@intel.com>,
	 Ian Rogers <irogers@google.com>, Ingo Molnar <mingo@redhat.com>,
	 James Clark <james.clark@linaro.org>,
	Jing Zhang <renyu.zj@linux.alibaba.com>,
	 Jiri Olsa <jolsa@kernel.org>,
	John Garry <john.g.garry@oracle.com>, Leo Yan <leo.yan@arm.com>,
	 Namhyung Kim <namhyung@kernel.org>,
	Perry Taylor <perry.taylor@intel.com>,
	 Peter Zijlstra <peterz@infradead.org>,
	Samantha Alt <samantha.alt@intel.com>,
	 Sandipan Das <sandipan.das@amd.com>,
	Thomas Falcon <thomas.falcon@intel.com>,
	 Weilin Wang <weilin.wang@intel.com>, Xu Yang <xu.yang_2@nxp.com>,
	linux-kernel@vger.kernel.org,  linux-perf-users@vger.kernel.org
Subject: [PATCH v8 47/52] perf jevents: Add local/remote miss latency metrics for Intel
Date: Wed, 12 Nov 2025 19:20:35 -0800	[thread overview]
Message-ID: <20251113032040.1994090-48-irogers@google.com> (raw)
In-Reply-To: <20251113032040.1994090-1-irogers@google.com>

Derive from CBOX/CHA occupancy and inserts the average latency as is
provided in Intel's uncore performance monitoring reference.

Signed-off-by: Ian Rogers <irogers@google.com>
---
 tools/perf/pmu-events/intel_metrics.py | 70 ++++++++++++++++++++++++--
 1 file changed, 67 insertions(+), 3 deletions(-)

diff --git a/tools/perf/pmu-events/intel_metrics.py b/tools/perf/pmu-events/intel_metrics.py
index 118fe0fc05a3..037f9b2ea1b6 100755
--- a/tools/perf/pmu-events/intel_metrics.py
+++ b/tools/perf/pmu-events/intel_metrics.py
@@ -6,9 +6,10 @@ import math
 import os
 import re
 from typing import Optional
-from metric import (d_ratio, has_event, max, CheckPmu, Event, JsonEncodeMetric,
-                    JsonEncodeMetricGroupDescriptions, Literal, LoadEvents,
-                    Metric, MetricConstraint, MetricGroup, MetricRef, Select)
+from metric import (d_ratio, has_event, max, source_count, CheckPmu, Event,
+                    JsonEncodeMetric, JsonEncodeMetricGroupDescriptions,
+                    Literal, LoadEvents, Metric, MetricConstraint, MetricGroup,
+                    MetricRef, Select)
 
 # Global command line arguments.
 _args = None
@@ -624,6 +625,68 @@ def IntelL2() -> Optional[MetricGroup]:
     ], description="L2 data cache analysis")
 
 
+def IntelMissLat() -> Optional[MetricGroup]:
+    try:
+        ticks = Event("UNC_CHA_CLOCKTICKS", "UNC_C_CLOCKTICKS")
+        data_rd_loc_occ = Event("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+                                "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+                                "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+                                "UNC_C_TOR_OCCUPANCY.MISS_OPCODE")
+        data_rd_loc_ins = Event("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+                                "UNC_CHA_TOR_INSERTS.IA_MISS",
+                                "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
+                                "UNC_C_TOR_INSERTS.MISS_OPCODE")
+        data_rd_rem_occ = Event("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+                                "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+                                "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+                                "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE")
+        data_rd_rem_ins = Event("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+                                "UNC_CHA_TOR_INSERTS.IA_MISS",
+                                "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
+                                "UNC_C_TOR_INSERTS.NID_MISS_OPCODE")
+    except:
+        return None
+
+    if (data_rd_loc_occ.name == "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE" or
+            data_rd_loc_occ.name == "UNC_C_TOR_OCCUPANCY.MISS_OPCODE"):
+        data_rd = 0x182
+        for e in [data_rd_loc_occ, data_rd_loc_ins, data_rd_rem_occ, data_rd_rem_ins]:
+            e.name += f"/filter_opc={hex(data_rd)}/"
+    elif data_rd_loc_occ.name == "UNC_CHA_TOR_OCCUPANCY.IA_MISS":
+        # Demand Data Read - Full cache-line read requests from core for
+        # lines to be cached in S or E, typically for data
+        demand_data_rd = 0x202
+        #  LLC Prefetch Data - Uncore will first look up the line in the
+        #  LLC; for a cache hit, the LRU will be updated, on a miss, the
+        #  DRd will be initiated
+        llc_prefetch_data = 0x25a
+        local_filter = (f"/filter_opc0={hex(demand_data_rd)},"
+                        f"filter_opc1={hex(llc_prefetch_data)},"
+                        "filter_loc,filter_nm,filter_not_nm/")
+        remote_filter = (f"/filter_opc0={hex(demand_data_rd)},"
+                         f"filter_opc1={hex(llc_prefetch_data)},"
+                         "filter_rem,filter_nm,filter_not_nm/")
+        for e in [data_rd_loc_occ, data_rd_loc_ins]:
+            e.name += local_filter
+        for e in [data_rd_rem_occ, data_rd_rem_ins]:
+            e.name += remote_filter
+    else:
+        assert data_rd_loc_occ.name == "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL", data_rd_loc_occ
+
+    ticks_per_cha = ticks / source_count(data_rd_loc_ins)
+    loc_lat = interval_sec * 1e9 * data_rd_loc_occ / \
+        (ticks_per_cha * data_rd_loc_ins)
+    ticks_per_cha = ticks / source_count(data_rd_rem_ins)
+    rem_lat = interval_sec * 1e9 * data_rd_rem_occ / \
+        (ticks_per_cha * data_rd_rem_ins)
+    return MetricGroup("lpm_miss_lat", [
+        Metric("lpm_miss_lat_loc", "Local to a socket miss latency in nanoseconds",
+               loc_lat, "ns"),
+        Metric("lpm_miss_lat_rem", "Remote to a socket miss latency in nanoseconds",
+               rem_lat, "ns"),
+    ])
+
+
 def IntelMlp() -> Optional[Metric]:
     try:
         l1d = Event("L1D_PEND_MISS.PENDING")
@@ -1005,6 +1068,7 @@ def main() -> None:
         IntelIlp(),
         IntelL2(),
         IntelLdSt(),
+        IntelMissLat(),
         IntelMlp(),
         IntelPorts(),
         IntelSwpf(),
-- 
2.51.2.1041.gc1ab5b90ca-goog


  parent reply	other threads:[~2025-11-13  3:22 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-13  3:19 [PATCH v8 00/52] AMD, ARM, Intel metric generation with Python Ian Rogers
2025-11-13  3:19 ` [PATCH v8 01/52] perf python: Correct copying of metric_leader in an evsel Ian Rogers
2025-11-13  3:19 ` [PATCH v8 02/52] perf ilist: Be tolerant of reading a metric on the wrong CPU Ian Rogers
2025-11-13  3:19 ` [PATCH v8 03/52] perf jevents: Allow multiple metricgroups.json files Ian Rogers
2025-11-13  3:19 ` [PATCH v8 04/52] perf jevents: Update metric constraint support Ian Rogers
2025-11-13  3:19 ` [PATCH v8 05/52] perf jevents: Add descriptions to metricgroup abstraction Ian Rogers
2025-11-13  3:19 ` [PATCH v8 06/52] perf jevents: Allow metric groups not to be named Ian Rogers
2025-11-13  3:19 ` [PATCH v8 07/52] perf jevents: Support parsing negative exponents Ian Rogers
2025-11-13  3:19 ` [PATCH v8 08/52] perf jevents: Term list fix in event parsing Ian Rogers
2025-11-13  3:19 ` [PATCH v8 09/52] perf jevents: Add threshold expressions to Metric Ian Rogers
2025-11-13  3:19 ` [PATCH v8 10/52] perf jevents: Move json encoding to its own functions Ian Rogers
2025-11-13  3:19 ` [PATCH v8 11/52] perf jevents: Drop duplicate pending metrics Ian Rogers
2025-11-13  3:20 ` [PATCH v8 12/52] perf jevents: Skip optional metrics in metric group list Ian Rogers
2025-11-13  3:20 ` [PATCH v8 13/52] perf jevents: Build support for generating metrics from python Ian Rogers
2025-11-13  3:20 ` [PATCH v8 14/52] perf jevents: Add load event json to verify and allow fallbacks Ian Rogers
2025-11-13  3:20 ` [PATCH v8 15/52] perf jevents: Add RAPL event metric for AMD zen models Ian Rogers
2025-11-26  5:05   ` Sandipan Das
2025-11-28  9:20     ` Ian Rogers
2025-11-28 11:33       ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 16/52] perf jevents: Add idle " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 17/52] perf jevents: Add upc metric for uops per cycle for AMD Ian Rogers
2025-11-13  3:20 ` [PATCH v8 18/52] perf jevents: Add br metric group for branch statistics on AMD Ian Rogers
2025-11-13  3:20 ` [PATCH v8 19/52] perf jevents: Add software prefetch (swpf) metric group for AMD Ian Rogers
2025-11-26 10:05   ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 20/52] perf jevents: Add hardware prefetch (hwpf) " Ian Rogers
2025-11-26 10:17   ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 21/52] perf jevents: Add itlb " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 22/52] perf jevents: Add dtlb " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 23/52] perf jevents: Add uncore l3 " Ian Rogers
2025-11-26  5:20   ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 24/52] perf jevents: Add load store breakdown metrics ldst " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 25/52] perf jevents: Add ILP metrics " Ian Rogers
2025-11-26  6:26   ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 26/52] perf jevents: Add context switch " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 27/52] perf jevents: Add uop cache hit/miss rates " Ian Rogers
2025-11-26  5:42   ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 28/52] perf jevents: Add RAPL metrics for all Intel models Ian Rogers
2025-11-13  3:20 ` [PATCH v8 29/52] perf jevents: Add idle metric for " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 30/52] perf jevents: Add CheckPmu to see if a PMU is in loaded json events Ian Rogers
2025-11-13  3:20 ` [PATCH v8 31/52] perf jevents: Add smi metric group for Intel models Ian Rogers
2025-11-13  3:20 ` [PATCH v8 32/52] perf jevents: Mark metrics with experimental events as experimental Ian Rogers
2025-11-13  3:20 ` [PATCH v8 33/52] perf jevents: Add tsx metric group for Intel models Ian Rogers
2025-11-13  3:20 ` [PATCH v8 34/52] perf jevents: Add br metric group for branch statistics on Intel Ian Rogers
2025-11-13  3:20 ` [PATCH v8 35/52] perf jevents: Add software prefetch (swpf) metric group for Intel Ian Rogers
2025-11-13  3:20 ` [PATCH v8 36/52] perf jevents: Add ports metric group giving utilization on Intel Ian Rogers
2025-11-13  3:20 ` [PATCH v8 37/52] perf jevents: Add L2 metrics for Intel Ian Rogers
2025-11-13  3:20 ` [PATCH v8 38/52] perf jevents: Add load store breakdown metrics ldst " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 39/52] perf jevents: Add ILP metrics " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 40/52] perf jevents: Add context switch " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 41/52] perf jevents: Add FPU " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 42/52] perf jevents: Add Miss Level Parallelism (MLP) metric " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 43/52] perf jevents: Add mem_bw " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 44/52] perf jevents: Add local/remote "mem" breakdown metrics " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 45/52] perf jevents: Add dir " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 46/52] perf jevents: Add C-State metrics from the PCU PMU " Ian Rogers
2025-11-13  3:20 ` Ian Rogers [this message]
2025-11-13  3:20 ` [PATCH v8 48/52] perf jevents: Add upi_bw metric " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 49/52] perf jevents: Add mesh bandwidth saturation " Ian Rogers
2025-11-13  3:20 ` [PATCH v8 50/52] perf jevents: Add collection of topdown like metrics for arm64 Ian Rogers
2025-11-13  3:20 ` [PATCH v8 51/52] perf jevents: Add cycles breakdown metric for arm64/AMD/Intel Ian Rogers
2025-11-26  6:32   ` Sandipan Das
2025-11-13  3:20 ` [PATCH v8 52/52] perf jevents: Validate that all names given an Event Ian Rogers
2025-11-19 18:30 ` [PATCH v8 00/52] AMD, ARM, Intel metric generation with Python Ian Rogers
2025-11-20 20:32 ` Falcon, Thomas
2025-11-20 21:10 ` Namhyung Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251113032040.1994090-48-irogers@google.com \
    --to=irogers@google.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=bgray@linux.ibm.com \
    --cc=caleb.biggers@intel.com \
    --cc=edward.baker@intel.com \
    --cc=james.clark@linaro.org \
    --cc=john.g.garry@oracle.com \
    --cc=jolsa@kernel.org \
    --cc=leo.yan@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=perry.taylor@intel.com \
    --cc=peterz@infradead.org \
    --cc=renyu.zj@linux.alibaba.com \
    --cc=samantha.alt@intel.com \
    --cc=sandipan.das@amd.com \
    --cc=thomas.falcon@intel.com \
    --cc=weilin.wang@intel.com \
    --cc=xu.yang_2@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).