* [PATCH 1/7] perf/x86/intel: Support newly introduced 4 OMR MSRs for DMR & NVL
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
2025-11-20 5:34 ` [PATCH 2/7] perf/x86/intel: Add support for PEBS memory auxiliary info field in DMR Dapeng Mi
` (5 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
Diamond Rapids and Nova Lake feature an expanded facility called
the Off-Module Response (OMR) facility, which replaces the Off-Core
Response (OCR) Performance Monitoring of previous processors.
Legacy microarchitectures used the OCR facility to evaluate off-core
and multi-core off-module transactions. The properly renamed, OMR
facility, improves the OCR capability for scalable coverage of new
memory systems of multi-core module systems.
Similarly with OCR, 4 additional off-module configuration MSRs
OFFMODULE_RSP_0 ~ OFFMODULE_RSP_3 are introduced to specify
attributes of the off-module transaction.
For more details about OMR, please refer to section 16.1 "OFF-MODULE
RESPONSE (OMR) FACILITY" in ISE documentation.
This patch adds support for these 4 OMR events.
ISE link: https://www.intel.com/content/www/us/en/content-details/869288/intel-architecture-instruction-set-extensions-programming-reference.html
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/core.c | 45 +++++++++++++++++++++++---------
arch/x86/events/perf_event.h | 5 ++++
arch/x86/include/asm/msr-index.h | 5 ++++
3 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aad89c9d9514..5970f7c20101 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3529,17 +3529,24 @@ static int intel_alt_er(struct cpu_hw_events *cpuc,
struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
int alt_idx = idx;
- if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
- return idx;
-
- if (idx == EXTRA_REG_RSP_0)
- alt_idx = EXTRA_REG_RSP_1;
-
- if (idx == EXTRA_REG_RSP_1)
- alt_idx = EXTRA_REG_RSP_0;
+ if (idx == EXTRA_REG_RSP_0 || idx == EXTRA_REG_RSP_1) {
+ if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
+ return idx;
+ if (++alt_idx > EXTRA_REG_RSP_1)
+ alt_idx = EXTRA_REG_RSP_0;
+ if (config & ~extra_regs[alt_idx].valid_mask)
+ return idx;
+ }
- if (config & ~extra_regs[alt_idx].valid_mask)
- return idx;
+ if (idx >= EXTRA_REG_OMR_0 && idx <= EXTRA_REG_OMR_3) {
+ if (!(x86_pmu.flags & PMU_FL_HAS_OMR))
+ return idx;
+ if (++alt_idx > EXTRA_REG_OMR_3)
+ alt_idx = EXTRA_REG_OMR_0;
+ if (config &
+ ~extra_regs[alt_idx - EXTRA_REG_OMR_0].valid_mask)
+ return idx;
+ }
return alt_idx;
}
@@ -3547,16 +3554,28 @@ static int intel_alt_er(struct cpu_hw_events *cpuc,
static void intel_fixup_er(struct perf_event *event, int idx)
{
struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
- event->hw.extra_reg.idx = idx;
+ int omr_idx;
- if (idx == EXTRA_REG_RSP_0) {
+ event->hw.extra_reg.idx = idx;
+ switch (idx) {
+ case EXTRA_REG_RSP_0:
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
- } else if (idx == EXTRA_REG_RSP_1) {
+ break;
+ case EXTRA_REG_RSP_1:
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
+ break;
+ case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3:
+ omr_idx = idx - EXTRA_REG_OMR_0;
+ event->hw.config &= ~ARCH_PERFMON_EVENTSEL_UMASK;
+ event->hw.config |= 1ULL << (8 + omr_idx);
+ event->hw.extra_reg.reg = MSR_OMR_0 + omr_idx;
+ break;
+ default:
+ pr_warn("The extra reg idx %d is not supported.\n", idx);
}
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 3161ec0a3416..586e3fdfe6d8 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -45,6 +45,10 @@ enum extra_reg_type {
EXTRA_REG_FE = 4, /* fe_* */
EXTRA_REG_SNOOP_0 = 5, /* snoop response 0 */
EXTRA_REG_SNOOP_1 = 6, /* snoop response 1 */
+ EXTRA_REG_OMR_0 = 7, /* OMR 0 */
+ EXTRA_REG_OMR_1 = 8, /* OMR 1 */
+ EXTRA_REG_OMR_2 = 9, /* OMR 2 */
+ EXTRA_REG_OMR_3 = 10, /* OMR 3 */
EXTRA_REG_MAX /* number of entries needed */
};
@@ -1099,6 +1103,7 @@ do { \
#define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
#define PMU_FL_DYN_CONSTRAINT 0x800 /* Needs dynamic constraint */
+#define PMU_FL_HAS_OMR 0x1000 /* has 4 equivalent OMR regs */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 65cc528fbad8..170cece31e3c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -259,6 +259,11 @@
#define MSR_SNOOP_RSP_0 0x00001328
#define MSR_SNOOP_RSP_1 0x00001329
+#define MSR_OMR_0 0x000003e0
+#define MSR_OMR_1 0x000003e1
+#define MSR_OMR_2 0x000003e2
+#define MSR_OMR_3 0x000003e3
+
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 2/7] perf/x86/intel: Add support for PEBS memory auxiliary info field in DMR
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
2025-11-20 5:34 ` [PATCH 1/7] perf/x86/intel: Support newly introduced 4 OMR MSRs for DMR & NVL Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
2025-11-20 5:34 ` [PATCH 3/7] perf/x86/intel: Add core PMU support for DMR Dapeng Mi
` (4 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
With the introduction of the OMR feature, the PEBS memory auxiliary info
field for load and store latency events has been restructured for DMR.
The memory auxiliary info field's bit[8] indicates whether a L2 cache
miss occurred for a memory load or store instruction. If bit[8] is 0,
it signifies no L2 cache miss, and bits[7:0] specify the exact cache data
source (up to the L2 cache level). If bit[8] is 1, bits[7:0] represent
the OMR encoding, indicating the specific L3 cache or memory region
involved in the memory access. A significant enhancement is OMR encoding
provides up to 8 fine-grained memmory regions besides the cache region.
A significant enhancement for OMR encoding is the ability to provide
up to 8 fine-grained memory regions in addition to the cache region,
offering more detailed insights into memory access regions.
For detailed information on the memory auxiliary info encoding, please
refer to section 16.2 "PEBS LOAD LATENCY AND STORE LATENCY FACILITY" in
the ISE documentation.
his patch ensures that the PEBS memory auxiliary info field is correctly
interpreted and utilized in DMR.
ISE: https://www.intel.com/content/www/us/en/content-details/869288/intel-architecture-instruction-set-extensions-programming-reference.html
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/ds.c | 140 ++++++++++++++++++++++++++
arch/x86/events/perf_event.h | 2 +
include/uapi/linux/perf_event.h | 27 ++++-
tools/include/uapi/linux/perf_event.h | 27 ++++-
4 files changed, 190 insertions(+), 6 deletions(-)
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2e170f2093ac..0b62e1f64f33 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -34,6 +34,17 @@ struct pebs_record_32 {
*/
+union omr_encoding {
+ struct {
+ u8 omr_source : 4;
+ u8 omr_remote : 1;
+ u8 omr_hitm : 1;
+ u8 omr_snoop : 1;
+ u8 omr_promoted : 1;
+ };
+ u8 omr_full;
+};
+
union intel_x86_pebs_dse {
u64 val;
struct {
@@ -73,6 +84,18 @@ union intel_x86_pebs_dse {
unsigned int lnc_addr_blk:1;
unsigned int ld_reserved6:18;
};
+ struct {
+ unsigned int pnc_dse: 8;
+ unsigned int pnc_l2_miss:1;
+ unsigned int pnc_stlb_clean_hit:1;
+ unsigned int pnc_stlb_any_hit:1;
+ unsigned int pnc_stlb_miss:1;
+ unsigned int pnc_locked:1;
+ unsigned int pnc_data_blk:1;
+ unsigned int pnc_addr_blk:1;
+ unsigned int pnc_fb_full:1;
+ unsigned int ld_reserved8:16;
+ };
};
@@ -228,6 +251,85 @@ void __init intel_pmu_pebs_data_source_lnl(void)
__intel_pmu_pebs_data_source_cmt(data_source);
}
+/* Version for Panthercove and later */
+
+/* L2 hit */
+#define PNC_PEBS_DATA_SOURCE_MAX 16
+static u64 pnc_pebs_l2_hit_data_source[PNC_PEBS_DATA_SOURCE_MAX] = {
+ P(OP, LOAD) | P(LVL, NA) | LEVEL(NA) | P(SNOOP, NA), /* 0x00: non-cache access */
+ OP_LH | LEVEL(L0) | P(SNOOP, NONE), /* 0x01: L0 hit */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x02: L1 hit */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x03: L1 Miss Handling Buffer hit */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x04: L2 Hit Clean */
+ 0, /* 0x05: Reserved */
+ 0, /* 0x06: Reserved */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT), /* 0x07: L2 Hit Snoop HIT */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, HITM), /* 0x08: L2 Hit Snoop Hit Modified */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, MISS), /* 0x09: Prefetch Promotion */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, MISS), /* 0x0a: Cross Core Prefetch Promotion */
+ 0, /* 0x0b: Reserved */
+ 0, /* 0x0c: Reserved */
+ 0, /* 0x0d: Reserved */
+ 0, /* 0x0e: Reserved */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
+};
+
+/* L2 miss */
+#define OMR_DATA_SOURCE_MAX 16
+static u64 omr_data_source[OMR_DATA_SOURCE_MAX] = {
+ P(OP, LOAD) | P(LVL, NA) | LEVEL(NA) | P(SNOOP, NA), /* 0x00: invalid */
+ 0, /* 0x01: Reserved */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(REGION, L_SHARE), /* 0x02: local CA shared cache */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(REGION, L_NON_SHARE),/* 0x03: local CA non-shared cache */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(REGION, O_IO), /* 0x04: other CA IO agent */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(REGION, O_SHARE), /* 0x05: other CA shared cache */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(REGION, O_NON_SHARE),/* 0x06: other CA non-shared cache */
+ OP_LH | LEVEL(RAM) | P(REGION, MMIO), /* 0x07: MMIO */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM0), /* 0x08: Memory region 0 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM1), /* 0x09: Memory region 1 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM2), /* 0x0a: Memory region 2 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM3), /* 0x0b: Memory region 3 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM4), /* 0x0c: Memory region 4 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM5), /* 0x0d: Memory region 5 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM6), /* 0x0e: Memory region 6 */
+ OP_LH | LEVEL(RAM) | P(REGION, MEM7), /* 0x0f: Memory region 7 */
+};
+
+static u64 parse_omr_data_source(u8 dse)
+{
+ union omr_encoding omr;
+ u64 val = 0;
+
+ omr.omr_full = dse;
+ val = omr_data_source[omr.omr_source];
+ if (omr.omr_source > 0x1 && omr.omr_source < 0x7)
+ val |= omr.omr_remote ? P(LVL, REM_CCE1) : 0;
+ else if (omr.omr_source > 0x7)
+ val |= omr.omr_remote ? P(LVL, REM_RAM1) : P(LVL, LOC_RAM);
+
+ if (omr.omr_remote)
+ val |= REM;
+
+ val |= omr.omr_hitm ? P(SNOOP, HITM) : P(SNOOP, HIT);
+
+ if (omr.omr_source == 0x2) {
+ u8 snoop = omr.omr_snoop | omr.omr_promoted;
+
+ if (snoop == 0x0)
+ val |= P(SNOOP, NA);
+ else if (snoop == 0x1)
+ val |= P(SNOOP, MISS);
+ else if (snoop == 0x2)
+ val |= P(SNOOP, HIT);
+ else if (snoop == 0x3)
+ val |= P(SNOOP, NONE);
+ } else if (omr.omr_source > 0x2 && omr.omr_source < 0x7) {
+ val |= omr.omr_snoop ? P(SNOOPX, FWD) : 0;
+ }
+
+ return val;
+}
+
static u64 precise_store_data(u64 status)
{
union intel_x86_pebs_dse dse;
@@ -410,6 +512,44 @@ u64 arl_h_latency_data(struct perf_event *event, u64 status)
return lnl_latency_data(event, status);
}
+u64 pnc_latency_data(struct perf_event *event, u64 status)
+{
+ union intel_x86_pebs_dse dse;
+ union perf_mem_data_src src;
+ u64 val;
+
+ dse.val = status;
+
+ if (!dse.pnc_l2_miss)
+ val = pnc_pebs_l2_hit_data_source[dse.pnc_dse & 0xf];
+ else
+ val = parse_omr_data_source(dse.pnc_dse);
+
+ if (!val)
+ val = P(OP, LOAD) | LEVEL(NA) | P(SNOOP, NA);
+
+ if (dse.pnc_stlb_miss)
+ val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ if (dse.pnc_locked)
+ val |= P(LOCK, LOCKED);
+
+ if (dse.pnc_data_blk)
+ val |= P(BLK, DATA);
+ if (dse.pnc_addr_blk)
+ val |= P(BLK, ADDR);
+ if (!dse.pnc_data_blk && !dse.pnc_addr_blk)
+ val |= P(BLK, NA);
+
+ src.val = val;
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
+ src.mem_op = P(OP, STORE);
+
+ return src.val;
+}
+
static u64 load_latency_data(struct perf_event *event, u64 status)
{
union intel_x86_pebs_dse dse;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 586e3fdfe6d8..bd501c2a0f73 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1664,6 +1664,8 @@ u64 lnl_latency_data(struct perf_event *event, u64 status);
u64 arl_h_latency_data(struct perf_event *event, u64 status);
+u64 pnc_latency_data(struct perf_event *event, u64 status);
+
extern struct event_constraint intel_core2_pebs_event_constraints[];
extern struct event_constraint intel_atom_pebs_event_constraints[];
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d292f96bc06f..99156e1888f7 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1328,14 +1328,16 @@ union perf_mem_data_src {
mem_snoopx : 2, /* Snoop mode, ext */
mem_blk : 3, /* Access blocked */
mem_hops : 3, /* Hop level */
- mem_rsvd : 18;
+ mem_region : 5, /* cache/memory regions */
+ mem_rsvd : 13;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd : 18,
+ __u64 mem_rsvd : 13,
+ mem_region : 5, /* cache/memory regions */
mem_hops : 3, /* Hop level */
mem_blk : 3, /* Access blocked */
mem_snoopx : 2, /* Snoop mode, ext */
@@ -1392,7 +1394,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
-/* 0x007 available */
+#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
@@ -1445,6 +1447,25 @@ union perf_mem_data_src {
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
+/* Cache/Memory region */
+#define PERF_MEM_REGION_NA 0x0 /* Invalid */
+#define PERF_MEM_REGION_RSVD 0x01 /* Reserved */
+#define PERF_MEM_REGION_L_SHARE 0x02 /* Local CA shared cache */
+#define PERF_MEM_REGION_L_NON_SHARE 0x03 /* Local CA non-shared cache */
+#define PERF_MEM_REGION_O_IO 0x04 /* Other CA IO agent */
+#define PERF_MEM_REGION_O_SHARE 0x05 /* Other CA shared cache */
+#define PERF_MEM_REGION_O_NON_SHARE 0x06 /* Other CA non-shared cache */
+#define PERF_MEM_REGION_MMIO 0x07 /* MMIO */
+#define PERF_MEM_REGION_MEM0 0x08 /* Memory region 0 */
+#define PERF_MEM_REGION_MEM1 0x09 /* Memory region 1 */
+#define PERF_MEM_REGION_MEM2 0x0a /* Memory region 2 */
+#define PERF_MEM_REGION_MEM3 0x0b /* Memory region 3 */
+#define PERF_MEM_REGION_MEM4 0x0c /* Memory region 4 */
+#define PERF_MEM_REGION_MEM5 0x0d /* Memory region 5 */
+#define PERF_MEM_REGION_MEM6 0x0e /* Memory region 6 */
+#define PERF_MEM_REGION_MEM7 0x0f /* Memory region 7 */
+#define PERF_MEM_REGION_SHIFT 46
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index d292f96bc06f..a8d7c8c8551b 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -1328,14 +1328,16 @@ union perf_mem_data_src {
mem_snoopx : 2, /* Snoop mode, ext */
mem_blk : 3, /* Access blocked */
mem_hops : 3, /* Hop level */
- mem_rsvd : 18;
+ mem_region : 5, /* cache/memory regions */
+ mem_rsvd : 13;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd : 18,
+ __u64 mem_rsvd : 13,
+ mem_region : 5, /* cache/memory regions */
mem_hops : 3, /* Hop level */
mem_blk : 3, /* Access blocked */
mem_snoopx : 2, /* Snoop mode, ext */
@@ -1392,7 +1394,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
-/* 0x007 available */
+#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
@@ -1445,6 +1447,25 @@ union perf_mem_data_src {
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
+/* Cache/Memory region */
+#define PERF_MEM_REGION_NA 0x0 /* Invalid */
+#define PERF_MEM_REGION_RSVD 0x01 /* Reserved */
+#define PERF_MEM_REGION_L_SHARE 0x02 /* Local CA shared cache */
+#define PERF_MEM_REGION_L_NON_SHARE 0x03 /* Local CA non-shared cache */
+#define PERF_MEM_REGION_O_IO 0x04 /* Other CA IO agent */
+#define PERF_MEM_REGION_O_SHARE 0x05 /* Other CA shared cache */
+#define PERF_MEM_REGION_O_NON_SHARE 0x06 /* Other CA non-shared cache */
+#define PERF_MEM_REGION_MMIO 0x07 /* MMIO */
+#define PERF_MEM_REGION_MEM0 0x08 /* Memory region 0 */
+#define PERF_MEM_REGION_MEM1 0x09 /* Memory region 1 */
+#define PERF_MEM_REGION_MEM2 0x0a /* Memory region 2 */
+#define PERF_MEM_REGION_MEM3 0x0b /* Memory region 3 */
+#define PERF_MEM_REGION_MEM4 0x0c /* Memory region 4 */
+#define PERF_MEM_REGION_MEM5 0x0d /* Memory region 5 */
+#define PERF_MEM_REGION_MEM6 0x0e /* Memory region 6 */
+#define PERF_MEM_REGION_MEM7 0x0f /* Memory region 7 */
+#define PERF_MEM_REGION_SHIFT 46
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 3/7] perf/x86/intel: Add core PMU support for DMR
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
2025-11-20 5:34 ` [PATCH 1/7] perf/x86/intel: Support newly introduced 4 OMR MSRs for DMR & NVL Dapeng Mi
2025-11-20 5:34 ` [PATCH 2/7] perf/x86/intel: Add support for PEBS memory auxiliary info field in DMR Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
2025-11-20 5:34 ` [PATCH 4/7] perf/x86/intel: Add support for PEBS memory auxiliary info field in NVL Dapeng Mi
` (3 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
This patch enables core PMU features for DMR (PatherCove uarch) which
includes PNC specific counter and PEBS constraints, new cache events ID
and OMR registers table.
For detailed information about counter constraints, please refer to
section 16.3 "COUNTER RESTRICTIONS" in the ISE documentation.
ISE: https://www.intel.com/content/www/us/en/content-details/869288/intel-architecture-instruction-set-extensions-programming-reference.html
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/core.c | 182 +++++++++++++++++++++++++++++++++++
arch/x86/events/intel/ds.c | 27 ++++++
arch/x86/events/perf_event.h | 2 +
3 files changed, 211 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 5970f7c20101..9f2a93fe23df 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -435,6 +435,62 @@ static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct event_constraint intel_pnc_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
+ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
+
+ INTEL_EVENT_CONSTRAINT(0x20, 0xf),
+ INTEL_EVENT_CONSTRAINT(0x79, 0xf),
+
+ INTEL_UEVENT_CONSTRAINT(0x0275, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x0176, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x01cd, 0xfc),
+ INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
+
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xd4, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xd6, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xdf, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xce, 0x1),
+
+ INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
+ INTEL_UEVENT_CONSTRAINT(0x0847, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x0446, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x0846, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x0148, 0xf),
+
+ EVENT_CONSTRAINT_END
+};
+
+static struct extra_reg intel_pnc_extra_regs[] __read_mostly = {
+ /* must define OMR_X first, see intel_alt_er() */
+ INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OMR_0, 0x40ffffff0000ffffull, OMR_0),
+ INTEL_UEVENT_EXTRA_REG(0x022a, MSR_OMR_1, 0x40ffffff0000ffffull, OMR_1),
+ INTEL_UEVENT_EXTRA_REG(0x042a, MSR_OMR_2, 0x40ffffff0000ffffull, OMR_2),
+ INTEL_UEVENT_EXTRA_REG(0x082a, MSR_OMR_3, 0x40ffffff0000ffffull, OMR_3),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
+ INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
+ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
+ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
+ EVENT_EXTRA_END
+};
+
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
@@ -650,6 +706,102 @@ static __initconst const u64 glc_hw_cache_extra_regs
},
};
+static __initconst const u64 pnc_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0,
+ [ C(RESULT_MISS) ] = 0xe124,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_MISS) ] = 0xe424,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x12a,
+ [ C(RESULT_MISS) ] = 0x12a,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x12a,
+ [ C(RESULT_MISS) ] = 0x12a,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0,
+ [ C(RESULT_MISS) ] = 0xe12,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0,
+ [ C(RESULT_MISS) ] = 0xe13,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = 0xe11,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4c4,
+ [ C(RESULT_MISS) ] = 0x4c5,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static __initconst const u64 pnc_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4000000000000001,
+ [ C(RESULT_MISS) ] = 0xFFFFF000000001,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4000000000000002,
+ [ C(RESULT_MISS) ] = 0xFFFFF000000002,
+ },
+ },
+};
+
/*
* Notes on the events:
* - data reads do not include code reads (comparable to earlier tables)
@@ -7225,6 +7377,20 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
}
+static __always_inline void intel_pmu_init_pnc(struct pmu *pmu)
+{
+ intel_pmu_init_glc(pmu);
+ x86_pmu.flags &= ~PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_OMR;
+ memcpy(hybrid_var(pmu, hw_cache_event_ids),
+ pnc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hybrid_var(pmu, hw_cache_extra_regs),
+ pnc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ hybrid(pmu, event_constraints) = intel_pnc_event_constraints;
+ hybrid(pmu, pebs_constraints) = intel_pnc_pebs_event_constraints;
+ hybrid(pmu, extra_regs) = intel_pnc_extra_regs;
+}
+
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
{
intel_pmu_init_grt(pmu);
@@ -7897,6 +8063,22 @@ __init int intel_pmu_init(void)
intel_pmu_pebs_data_source_skl(true);
break;
+ case INTEL_DIAMONDRAPIDS_X:
+ intel_pmu_init_pnc(NULL);
+ x86_pmu.pebs_ept = 1;
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.pebs_latency_data = pnc_latency_data;
+ x86_pmu.get_event_constraints = glc_get_event_constraints;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
+ extra_skl_attr = skl_format_attr;
+ mem_attr = glc_events_attrs;
+ td_attr = glc_td_events_attrs;
+ tsx_attr = glc_tsx_events_attrs;
+ pr_cont("Panthercove events, ");
+ name = "panthercove";
+ break;
+
case INTEL_ALDERLAKE:
case INTEL_ALDERLAKE_L:
case INTEL_RAPTORLAKE:
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0b62e1f64f33..0db6ffc7007e 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1424,6 +1424,33 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_pnc_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
+
+ INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0xfc),
+ INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
+
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xd6, 0xf),
+
+ /*
+ * Everything else is handled by PMU_FL_PEBS_ALL, because we
+ * need the full constraints from the main table.
+ */
+
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index bd501c2a0f73..cbca1888e8f7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1698,6 +1698,8 @@ extern struct event_constraint intel_glc_pebs_event_constraints[];
extern struct event_constraint intel_lnc_pebs_event_constraints[];
+extern struct event_constraint intel_pnc_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 4/7] perf/x86/intel: Add support for PEBS memory auxiliary info field in NVL
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
` (2 preceding siblings ...)
2025-11-20 5:34 ` [PATCH 3/7] perf/x86/intel: Add core PMU support for DMR Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
2025-11-20 5:34 ` [PATCH 5/7] perf/x86/intel: Add core PMU support for Novalake Dapeng Mi
` (2 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
Similar to DMR (Panther Cove uarch), both P-core (Coyote Cove uarch) and
E-core (Arctic Wolf uarch) of NVL adopt the new PEBS memory auxiliary
info layout.
Coyote Cove microarchitecture shares the same PMU capabilities, including
the memory auxiliary info layout, with Panther Cove. Arctic Wolf
microarchitecture has a similar layout to Panther Cove, with the only
difference being specific data source encoding for L2 hit cases (up to
the L2 cache level). The OMR encoding remains the same as in Panther Cove.
For detailed information on the memory auxiliary info encoding, please
refer to section 16.2 "PEBS LOAD LATENCY AND STORE LATENCY FACILITY" in
the latest ISE documentation.
This patch defines Arctic Wolf specific data source encoding and then
supports PEBS memory auxiliary info field for NVL.
ISE: https://www.intel.com/content/www/us/en/content-details/869288/intel-architecture-instruction-set-extensions-programming-reference.html
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/ds.c | 83 ++++++++++++++++++++++++++++++++++++
arch/x86/events/perf_event.h | 2 +
2 files changed, 85 insertions(+)
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0db6ffc7007e..d082eb160a10 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -96,6 +96,18 @@ union intel_x86_pebs_dse {
unsigned int pnc_fb_full:1;
unsigned int ld_reserved8:16;
};
+ struct {
+ unsigned int arw_dse:8;
+ unsigned int arw_l2_miss:1;
+ unsigned int arw_xq_promotion:1;
+ unsigned int arw_reissue:1;
+ unsigned int arw_stlb_miss:1;
+ unsigned int arw_locked:1;
+ unsigned int arw_data_blk:1;
+ unsigned int arw_addr_blk:1;
+ unsigned int arw_fb_full:1;
+ unsigned int ld_reserved9:16;
+ };
};
@@ -274,6 +286,29 @@ static u64 pnc_pebs_l2_hit_data_source[PNC_PEBS_DATA_SOURCE_MAX] = {
OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
};
+/* Version for Arctic Wolf and later */
+
+/* L2 hit */
+#define ARW_PEBS_DATA_SOURCE_MAX 16
+static u64 arw_pebs_l2_hit_data_source[ARW_PEBS_DATA_SOURCE_MAX] = {
+ P(OP, LOAD) | P(LVL, NA) | LEVEL(NA) | P(SNOOP, NA), /* 0x00: non-cache access */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 hit */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: WCB Hit */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 Hit Clean */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT), /* 0x04: L2 Hit Snoop HIT */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, HITM), /* 0x05: L2 Hit Snoop Hit Modified */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x06: uncached */
+ 0, /* 0x07: Reserved */
+ 0, /* 0x08: Reserved */
+ 0, /* 0x09: Reserved */
+ 0, /* 0x0a: Reserved */
+ 0, /* 0x0b: Reserved */
+ 0, /* 0x0c: Reserved */
+ 0, /* 0x0d: Reserved */
+ 0, /* 0x0e: Reserved */
+ 0, /* 0x0f: Reserved */
+};
+
/* L2 miss */
#define OMR_DATA_SOURCE_MAX 16
static u64 omr_data_source[OMR_DATA_SOURCE_MAX] = {
@@ -457,6 +492,44 @@ u64 cmt_latency_data(struct perf_event *event, u64 status)
dse.mtl_fwd_blk);
}
+static u64 arw_latency_data(struct perf_event *event, u64 status)
+{
+ union intel_x86_pebs_dse dse;
+ union perf_mem_data_src src;
+ u64 val;
+
+ dse.val = status;
+
+ if (!dse.arw_l2_miss)
+ val = arw_pebs_l2_hit_data_source[dse.arw_dse & 0xf];
+ else
+ val = parse_omr_data_source(dse.arw_dse);
+
+ if (!val)
+ val = P(OP, LOAD) | LEVEL(NA) | P(SNOOP, NA);
+
+ if (dse.arw_stlb_miss)
+ val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ if (dse.arw_locked)
+ val |= P(LOCK, LOCKED);
+
+ if (dse.arw_data_blk)
+ val |= P(BLK, DATA);
+ if (dse.arw_addr_blk)
+ val |= P(BLK, ADDR);
+ if (!dse.arw_data_blk && !dse.arw_addr_blk)
+ val |= P(BLK, NA);
+
+ src.val = val;
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
+ src.mem_op = P(OP, STORE);
+
+ return src.val;
+}
+
static u64 lnc_latency_data(struct perf_event *event, u64 status)
{
union intel_x86_pebs_dse dse;
@@ -550,6 +623,16 @@ u64 pnc_latency_data(struct perf_event *event, u64 status)
return src.val;
}
+u64 nvl_latency_data(struct perf_event *event, u64 status)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+
+ if (pmu->pmu_type == hybrid_small)
+ return arw_latency_data(event, status);
+
+ return pnc_latency_data(event, status);
+}
+
static u64 load_latency_data(struct perf_event *event, u64 status)
{
union intel_x86_pebs_dse dse;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index cbca1888e8f7..aedc1a7762c2 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1666,6 +1666,8 @@ u64 arl_h_latency_data(struct perf_event *event, u64 status);
u64 pnc_latency_data(struct perf_event *event, u64 status);
+u64 nvl_latency_data(struct perf_event *event, u64 status);
+
extern struct event_constraint intel_core2_pebs_event_constraints[];
extern struct event_constraint intel_atom_pebs_event_constraints[];
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 5/7] perf/x86/intel: Add core PMU support for Novalake
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
` (3 preceding siblings ...)
2025-11-20 5:34 ` [PATCH 4/7] perf/x86/intel: Add support for PEBS memory auxiliary info field in NVL Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
2025-11-20 5:34 ` [PATCH 6/7] perf/x86: Replace magic numbers with macros for attr_rdpmc Dapeng Mi
2025-11-20 5:34 ` [PATCH 7/7] perf/x86/intel: Add rdpmc-user-disable support Dapeng Mi
6 siblings, 0 replies; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
This patch enables core PMU support for Novalake, covering both P-core
and E-core. It includes Arctic Wolf-specific counters, PEBS constraints,
and the OMR registers table.
Since Coyote Cove shares the same PMU capabilities as Panther Cove, the
existing Panther Cove PMU enabling functions are reused for Coyote Cove.
For detailed information about counter constraints, please refer to
section 16.3 "COUNTER RESTRICTIONS" in the ISE documentation.
ISE: https://www.intel.com/content/www/us/en/content-details/869288/intel-architecture-instruction-set-extensions-programming-reference.html
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/core.c | 99 ++++++++++++++++++++++++++++++++++++
arch/x86/events/intel/ds.c | 11 ++++
arch/x86/events/perf_event.h | 2 +
3 files changed, 112 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9f2a93fe23df..a3a1e6e670f8 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -232,6 +232,29 @@ static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
EVENT_CONSTRAINT_END
};
+static struct event_constraint intel_arw_event_constraints[] __read_mostly = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
+ FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
+ FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
+ FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
+ INTEL_UEVENT_CONSTRAINT(0x01b7, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x02b7, 0x2),
+ INTEL_UEVENT_CONSTRAINT(0x04b7, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x08b7, 0x8),
+ INTEL_UEVENT_CONSTRAINT(0x01d4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x02d4, 0x2),
+ INTEL_UEVENT_CONSTRAINT(0x04d4, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x08d4, 0x8),
+ INTEL_UEVENT_CONSTRAINT(0x0175, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x0275, 0x2),
+ INTEL_UEVENT_CONSTRAINT(0x21d3, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x22d3, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
static struct event_constraint intel_skl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -2319,6 +2342,26 @@ static __initconst const u64 tnt_hw_cache_extra_regs
},
};
+static __initconst const u64 arw_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x4000000000000001,
+ [C(RESULT_MISS)] = 0xFFFFF000000001,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x4000000000000002,
+ [C(RESULT_MISS)] = 0xFFFFF000000002,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+};
+
EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
@@ -2377,6 +2420,22 @@ static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct extra_reg intel_arw_extra_regs[] __read_mostly = {
+ /* must define OMR_X first, see intel_alt_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1),
+ INTEL_UEVENT_EXTRA_REG(0x04b7, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2),
+ INTEL_UEVENT_EXTRA_REG(0x08b7, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3),
+ INTEL_UEVENT_EXTRA_REG(0x01d4, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0),
+ INTEL_UEVENT_EXTRA_REG(0x02d4, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1),
+ INTEL_UEVENT_EXTRA_REG(0x04d4, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2),
+ INTEL_UEVENT_EXTRA_REG(0x08d4, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
+ INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
+ INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
+ EVENT_EXTRA_END
+};
+
EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_skt, "event=0x9c,umask=0x01");
EVENT_ATTR_STR(topdown-retiring, td_retiring_skt, "event=0xc2,umask=0x02");
EVENT_ATTR_STR(topdown-be-bound, td_be_bound_skt, "event=0xa4,umask=0x02");
@@ -7399,6 +7458,19 @@ static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
}
+static __always_inline void intel_pmu_init_arw(struct pmu *pmu)
+{
+ intel_pmu_init_grt(pmu);
+ x86_pmu.flags &= ~PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_OMR;
+ memcpy(hybrid_var(pmu, hw_cache_extra_regs),
+ arw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ hybrid(pmu, event_constraints) = intel_arw_event_constraints;
+ hybrid(pmu, pebs_constraints) = intel_arw_pebs_event_constraints;
+ hybrid(pmu, extra_regs) = intel_arw_extra_regs;
+ static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
+}
+
__init int intel_pmu_init(void)
{
struct attribute **extra_skl_attr = &empty_attrs;
@@ -8239,6 +8311,33 @@ __init int intel_pmu_init(void)
name = "arrowlake_h_hybrid";
break;
+ case INTEL_NOVALAKE:
+ case INTEL_NOVALAKE_L:
+ pr_cont("Novalake Hybrid events, ");
+ name = "novalake_hybrid";
+ intel_pmu_init_hybrid(hybrid_big_small);
+
+ x86_pmu.pebs_latency_data = nvl_latency_data;
+ x86_pmu.get_event_constraints = mtl_get_event_constraints;
+ x86_pmu.hw_config = adl_hw_config;
+
+ td_attr = lnl_hybrid_events_attrs;
+ mem_attr = mtl_hybrid_mem_attrs;
+ tsx_attr = adl_hybrid_tsx_attrs;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
+
+ /* Initialize big core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
+ intel_pmu_init_pnc(&pmu->pmu);
+
+ /* Initialize Atom core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
+ intel_pmu_init_arw(&pmu->pmu);
+
+ intel_pmu_pebs_data_source_lnl();
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index d082eb160a10..c6aec7765c02 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1292,6 +1292,17 @@ struct event_constraint intel_grt_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_arw_pebs_event_constraints[] = {
+ /* Allow all events as PEBS with no flags */
+ INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0xff),
+ INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xff),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01d4, 0x1),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x02d4, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x04d4, 0x4),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x08d4, 0x8),
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index aedc1a7762c2..f7caabc5d487 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1680,6 +1680,8 @@ extern struct event_constraint intel_glp_pebs_event_constraints[];
extern struct event_constraint intel_grt_pebs_event_constraints[];
+extern struct event_constraint intel_arw_pebs_event_constraints[];
+
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 6/7] perf/x86: Replace magic numbers with macros for attr_rdpmc
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
` (4 preceding siblings ...)
2025-11-20 5:34 ` [PATCH 5/7] perf/x86/intel: Add core PMU support for Novalake Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
2025-11-20 6:19 ` Ian Rogers
2025-11-20 5:34 ` [PATCH 7/7] perf/x86/intel: Add rdpmc-user-disable support Dapeng Mi
6 siblings, 1 reply; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
Use macros to replace these attr_rdpmc magic numbers, so users are easy
to know their meaning.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/core.c | 7 ++++---
arch/x86/events/intel/p6.c | 2 +-
arch/x86/events/perf_event.h | 7 +++++++
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 5d0d5e466c62..3d9cc1d7fcfa 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2129,7 +2129,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
- x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+ /* enable userspace RDPMC usage by default */
+ x86_pmu.attr_rdpmc = X86_USER_RDPMC_CONDITIONAL_ENABLE;
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
@@ -2609,12 +2610,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
*/
if (val == 0)
static_branch_inc(&rdpmc_never_available_key);
- else if (x86_pmu.attr_rdpmc == 0)
+ else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_NEVER_ENABLE)
static_branch_dec(&rdpmc_never_available_key);
if (val == 2)
static_branch_inc(&rdpmc_always_available_key);
- else if (x86_pmu.attr_rdpmc == 2)
+ else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE)
static_branch_dec(&rdpmc_always_available_key);
on_each_cpu(cr4_update_pce, NULL, 1);
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index 6e41de355bd8..fb991e0ac614 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -243,7 +243,7 @@ static __init void p6_pmu_rdpmc_quirk(void)
*/
pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
x86_pmu.attr_rdpmc_broken = 1;
- x86_pmu.attr_rdpmc = 0;
+ x86_pmu.attr_rdpmc = X86_USER_RDPMC_NEVER_ENABLE;
}
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f7caabc5d487..24a81d2916e9 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -187,6 +187,13 @@ struct amd_nb {
(1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15))
+/* user space rdpmc control values */
+enum {
+ X86_USER_RDPMC_NEVER_ENABLE = 0,
+ X86_USER_RDPMC_CONDITIONAL_ENABLE = 1,
+ X86_USER_RDPMC_ALWAYS_ENABLE = 2,
+};
+
/*
* Per register state.
*/
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH 6/7] perf/x86: Replace magic numbers with macros for attr_rdpmc
2025-11-20 5:34 ` [PATCH 6/7] perf/x86: Replace magic numbers with macros for attr_rdpmc Dapeng Mi
@ 2025-11-20 6:19 ` Ian Rogers
2025-11-20 7:30 ` Mi, Dapeng
0 siblings, 1 reply; 10+ messages in thread
From: Ian Rogers @ 2025-11-20 6:19 UTC (permalink / raw)
To: Dapeng Mi, Falcon Thomas
Cc: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Adrian Hunter, Alexander Shishkin, Andi Kleen,
Eranian Stephane, linux-kernel, linux-perf-users, Dapeng Mi,
Zide Chen, Xudong Hao
On Wed, Nov 19, 2025 at 9:37 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>
> Use macros to replace these attr_rdpmc magic numbers, so users are easy
> to know their meaning.
>
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
I'm reminded that we were having issues with rdpmc on hybrid:
https://lore.kernel.org/lkml/20250614004528.1652860-1-irogers@google.com/
like the enable/disable rdpmc flag being shared across the cpu_core
and cpu_atom PMUs, and needing to force the thread doing the rdpmc to
have affinity matching the CPUs of the PMU it is reading from, as
otherwise things like struct perf_event_mmap_page's index could do
interesting things:
https://web.git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/tests/mmap-basic.c?h=perf-tools-next#n208
Others required the rdpmc to be in a restartable sequence and Peter
proposed fixing this in the kernel:
https://lore.kernel.org/linux-perf-users/20250618084522.GE1613376@noisy.programming.kicks-ass.net/
Also looks like we never merged fixing the documentation:
https://lore.kernel.org/lkml/20220817174909.877139-1-irogers@google.com/
Not specifically a DMR issue but this is reminding me of a bunch of
tech debt - I also wonder if this patch requires the test being
updated.
Thanks,
Ian
> ---
> arch/x86/events/core.c | 7 ++++---
> arch/x86/events/intel/p6.c | 2 +-
> arch/x86/events/perf_event.h | 7 +++++++
> 3 files changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 5d0d5e466c62..3d9cc1d7fcfa 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -2129,7 +2129,8 @@ static int __init init_hw_perf_events(void)
>
> pr_cont("%s PMU driver.\n", x86_pmu.name);
>
> - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
> + /* enable userspace RDPMC usage by default */
> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_CONDITIONAL_ENABLE;
>
> for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
> quirk->func();
> @@ -2609,12 +2610,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
> */
> if (val == 0)
> static_branch_inc(&rdpmc_never_available_key);
> - else if (x86_pmu.attr_rdpmc == 0)
> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_NEVER_ENABLE)
> static_branch_dec(&rdpmc_never_available_key);
>
> if (val == 2)
> static_branch_inc(&rdpmc_always_available_key);
> - else if (x86_pmu.attr_rdpmc == 2)
> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE)
> static_branch_dec(&rdpmc_always_available_key);
>
> on_each_cpu(cr4_update_pce, NULL, 1);
> diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
> index 6e41de355bd8..fb991e0ac614 100644
> --- a/arch/x86/events/intel/p6.c
> +++ b/arch/x86/events/intel/p6.c
> @@ -243,7 +243,7 @@ static __init void p6_pmu_rdpmc_quirk(void)
> */
> pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
> x86_pmu.attr_rdpmc_broken = 1;
> - x86_pmu.attr_rdpmc = 0;
> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_NEVER_ENABLE;
> }
> }
>
> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
> index f7caabc5d487..24a81d2916e9 100644
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -187,6 +187,13 @@ struct amd_nb {
> (1ULL << PERF_REG_X86_R14) | \
> (1ULL << PERF_REG_X86_R15))
>
> +/* user space rdpmc control values */
> +enum {
> + X86_USER_RDPMC_NEVER_ENABLE = 0,
> + X86_USER_RDPMC_CONDITIONAL_ENABLE = 1,
> + X86_USER_RDPMC_ALWAYS_ENABLE = 2,
> +};
> +
> /*
> * Per register state.
> */
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH 6/7] perf/x86: Replace magic numbers with macros for attr_rdpmc
2025-11-20 6:19 ` Ian Rogers
@ 2025-11-20 7:30 ` Mi, Dapeng
0 siblings, 0 replies; 10+ messages in thread
From: Mi, Dapeng @ 2025-11-20 7:30 UTC (permalink / raw)
To: Ian Rogers, Falcon Thomas
Cc: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Adrian Hunter, Alexander Shishkin, Andi Kleen,
Eranian Stephane, linux-kernel, linux-perf-users, Dapeng Mi,
Zide Chen, Xudong Hao
On 11/20/2025 2:19 PM, Ian Rogers wrote:
> On Wed, Nov 19, 2025 at 9:37 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>> Use macros to replace these attr_rdpmc magic numbers, so users are easy
>> to know their meaning.
>>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> I'm reminded that we were having issues with rdpmc on hybrid:
> https://lore.kernel.org/lkml/20250614004528.1652860-1-irogers@google.com/
> like the enable/disable rdpmc flag being shared across the cpu_core
> and cpu_atom PMUs, and needing to force the thread doing the rdpmc to
> have affinity matching the CPUs of the PMU it is reading from, as
> otherwise things like struct perf_event_mmap_page's index could do
> interesting things:
> https://web.git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/tests/mmap-basic.c?h=perf-tools-next#n208
> Others required the rdpmc to be in a restartable sequence and Peter
> proposed fixing this in the kernel:
> https://lore.kernel.org/linux-perf-users/20250618084522.GE1613376@noisy.programming.kicks-ass.net/
>
> Also looks like we never merged fixing the documentation:
> https://lore.kernel.org/lkml/20220817174909.877139-1-irogers@google.com/
>
> Not specifically a DMR issue but this is reminding me of a bunch of
> tech debt - I also wonder if this patch requires the test being
> updated.
Thanks for reminding. This patch doesn't involve any functional change for
the "rdpmc" attribute, but along with the introduction of per-counter
"rdpmc user disable" in patch 7/7, the rdpmc test can definitely be
enhanced. I would enhance the rpdmc test in next version.
>
> Thanks,
> Ian
>
>> ---
>> arch/x86/events/core.c | 7 ++++---
>> arch/x86/events/intel/p6.c | 2 +-
>> arch/x86/events/perf_event.h | 7 +++++++
>> 3 files changed, 12 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
>> index 5d0d5e466c62..3d9cc1d7fcfa 100644
>> --- a/arch/x86/events/core.c
>> +++ b/arch/x86/events/core.c
>> @@ -2129,7 +2129,8 @@ static int __init init_hw_perf_events(void)
>>
>> pr_cont("%s PMU driver.\n", x86_pmu.name);
>>
>> - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
>> + /* enable userspace RDPMC usage by default */
>> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_CONDITIONAL_ENABLE;
>>
>> for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
>> quirk->func();
>> @@ -2609,12 +2610,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
>> */
>> if (val == 0)
>> static_branch_inc(&rdpmc_never_available_key);
>> - else if (x86_pmu.attr_rdpmc == 0)
>> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_NEVER_ENABLE)
>> static_branch_dec(&rdpmc_never_available_key);
>>
>> if (val == 2)
>> static_branch_inc(&rdpmc_always_available_key);
>> - else if (x86_pmu.attr_rdpmc == 2)
>> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE)
>> static_branch_dec(&rdpmc_always_available_key);
>>
>> on_each_cpu(cr4_update_pce, NULL, 1);
>> diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
>> index 6e41de355bd8..fb991e0ac614 100644
>> --- a/arch/x86/events/intel/p6.c
>> +++ b/arch/x86/events/intel/p6.c
>> @@ -243,7 +243,7 @@ static __init void p6_pmu_rdpmc_quirk(void)
>> */
>> pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
>> x86_pmu.attr_rdpmc_broken = 1;
>> - x86_pmu.attr_rdpmc = 0;
>> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_NEVER_ENABLE;
>> }
>> }
>>
>> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
>> index f7caabc5d487..24a81d2916e9 100644
>> --- a/arch/x86/events/perf_event.h
>> +++ b/arch/x86/events/perf_event.h
>> @@ -187,6 +187,13 @@ struct amd_nb {
>> (1ULL << PERF_REG_X86_R14) | \
>> (1ULL << PERF_REG_X86_R15))
>>
>> +/* user space rdpmc control values */
>> +enum {
>> + X86_USER_RDPMC_NEVER_ENABLE = 0,
>> + X86_USER_RDPMC_CONDITIONAL_ENABLE = 1,
>> + X86_USER_RDPMC_ALWAYS_ENABLE = 2,
>> +};
>> +
>> /*
>> * Per register state.
>> */
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 7/7] perf/x86/intel: Add rdpmc-user-disable support
2025-11-20 5:34 [PATCH 0/7] Enable core PMU for DMR and NVL Dapeng Mi
` (5 preceding siblings ...)
2025-11-20 5:34 ` [PATCH 6/7] perf/x86: Replace magic numbers with macros for attr_rdpmc Dapeng Mi
@ 2025-11-20 5:34 ` Dapeng Mi
6 siblings, 0 replies; 10+ messages in thread
From: Dapeng Mi @ 2025-11-20 5:34 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
Andi Kleen, Eranian Stephane
Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
Falcon Thomas, Xudong Hao, Dapeng Mi
Since panther cove starts, rdpmc user disable feature would be
supported. This feature affords perf system the capability to disable
user space rdpmc read in counter level.
Currently when a global counter is active any user with rdpmc right can
read it, even though the perf access permissions may forbid it (e.g.
may not allow reading ring 0 counters)
This rdpmc user disable feature would mitigate this security concern.
The details are
- New RDPMC_USR_DISABLE bit in each EVNTSELx[37] MSR to indicate counter
can't be read by RDPMC in ring 3.
- New RDPMC_USR_DISABLE bits in bits 33,37,41,45 in IA32_FIXED_CTR_CTRL
MSR for fixed counters 0-3.
- On RDPMC for counter x, use select to choose the final counter value:
If (!CPL0 && RDPMC_USR_DISABLE[x] == 1 ) ? 0 : counter_value
- RDPMC_USR_DISABLE is enumerated by CPUID.0x23.0.EBX[2].
This patch extends current global user space rdpmc control logic by
`sysfs interface (sys/devices/cpu/rdpmc) as below.
- rdpmc = 0
global user space rdpmc and counter level's user space rdpmc of all
counters are both disabled.
- rdpmc = 1
global user space rdpmc is enabled in mmap enabled time window and
counter level’s user space rdpmc is only enabled for non system-wide
events. This won't introduce counter data leak as count data would be
cleared when context switches.
- rdpmc = 2
global user space rdpmc and counter level’s user space rdpmc of all
counters are enabled unconditionally.
The new changed rdpmc only affects the new activiated perf events,
current active perf events won't be impacted. This makes code simpler
and cleaner. BTW, the default value of rdpmc is not changed and is still
1.
For more details about rdpmc user disable, please refer to chapter 15
"RDPMC USER DISABLE" in ISE documentation.
ISE: https://www.intel.com/content/www/us/en/content-details/869288/intel-architecture-instruction-set-extensions-programming-reference.html
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
.../sysfs-bus-event_source-devices-rdpmc | 40 +++++++++++++++++++
arch/x86/events/core.c | 21 ++++++++++
arch/x86/events/intel/core.c | 26 ++++++++++++
arch/x86/events/perf_event.h | 6 +++
arch/x86/include/asm/perf_event.h | 8 +++-
5 files changed, 99 insertions(+), 2 deletions(-)
create mode 100644 Documentation/ABI/testing/sysfs-bus-event_source-devices-rdpmc
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-rdpmc b/Documentation/ABI/testing/sysfs-bus-event_source-devices-rdpmc
new file mode 100644
index 000000000000..d004527ab13e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-rdpmc
@@ -0,0 +1,40 @@
+What: /sys/bus/event_source/devices/cpu.../rdpmc
+Date: November 2011
+KernelVersion: 3.10
+Contact: Linux kernel mailing list linux-kernel@vger.kernel.org
+Description: The /sys/bus/event_source/devices/cpu.../rdpmc attribute
+ is used to show/manage if rdpmc instruction can be
+ executed in user space. This attribute supports 3 numbers.
+ - rdpmc = 0
+ user space rdpmc is globally disabled for all PMU
+ counters.
+ - rdpmc = 1
+ user space rdpmc is globally enabled only in event mmap
+ ioctl called time window. If the mmap region is unmapped,
+ user space rdpmc is disabled again.
+ - rdpmc = 2
+ user space rdpmc is globally enabled for all PMU
+ counters.
+
+ In the Intel platforms supporting counter level's user
+ space rdpmc disable feature (CPUID.23H.EBX[2] = 1), the
+ meaning of 3 numbers is extended to
+ - rdpmc = 0
+ global user space rdpmc and counter level's user space
+ rdpmc of all counters are both disabled.
+ - rdpmc = 1
+ No changes on behavior of global user space rdpmc.
+ counter level's rdpmc of system-wide events is disabled
+ but counter level's rdpmc of non-system-wide events is
+ enabled.
+ - rdpmc = 2
+ global user space rdpmc and counter level's user space
+ rdpmc of all counters are both enabled unconditionally.
+
+ The default value of rdpmc is 1.
+
+ Please notice global user space rdpmc's behavior would
+ change immediately along with the rdpmc value's change,
+ but the behavior of counter level's user space rdpmc
+ won't take effect immediately until the event is
+ reactivated or recreated.
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 3d9cc1d7fcfa..c1969cc2bb0c 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2582,6 +2582,27 @@ static ssize_t get_attr_rdpmc(struct device *cdev,
return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
}
+/*
+ * Behaviors of rdpmc value:
+ * - rdpmc = 0
+ * global user space rdpmc and counter level's user space rdpmc of all
+ * counters are both disabled.
+ * - rdpmc = 1
+ * global user space rdpmc is enabled in mmap enabled time window and
+ * counter level's user space rdpmc is enabled for only non system-wide
+ * events. Counter level's user space rdpmc of system-wide events is
+ * still disabled by default. This won't introduce counter data leak for
+ * non system-wide events since their count data would be cleared when
+ * context switches.
+ * - rdpmc = 2
+ * global user space rdpmc and counter level's user space rdpmc of all
+ * counters are enabled unconditionally.
+ *
+ * Suppose the rdpmc value won't be changed frequently, don't dynamically
+ * reschedule events to make the new rpdmc value take effect on active perf
+ * events immediately, the new rdpmc value would only impact the new
+ * activated perf events. This makes code simpler and cleaner.
+ */
static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a3a1e6e670f8..b4344a476a48 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3128,6 +3128,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
bits |= INTEL_FIXED_0_USER;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
bits |= INTEL_FIXED_0_KERNEL;
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE)
+ bits |= INTEL_FIXED_0_RDPMC_USER_DISABLE;
/*
* ANY bit is supported in v3 and up
@@ -3263,6 +3265,26 @@ static void intel_pmu_enable_event_ext(struct perf_event *event)
__intel_pmu_update_event_ext(hwc->idx, ext);
}
+static void intel_pmu_update_rdpmc_user_disable(struct perf_event *event)
+{
+ /*
+ * Counter scope's user-space rdpmc is disabled by default
+ * except two cases.
+ * a. rdpmc = 2 (user space rdpmc enabled unconditionally)
+ * b. rdpmc = 1 and the event is not a system-wide event.
+ * The count of non-system-wide events would be cleared when
+ * context switches, so no count data is leaked.
+ */
+ if (x86_pmu_has_rdpmc_user_disable(event->pmu)) {
+ if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE ||
+ (x86_pmu.attr_rdpmc == X86_USER_RDPMC_CONDITIONAL_ENABLE &&
+ event->ctx->task))
+ event->hw.config &= ~ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
+ else
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
+ }
+}
+
DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext);
static void intel_pmu_enable_event(struct perf_event *event)
@@ -3271,6 +3293,8 @@ static void intel_pmu_enable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ intel_pmu_update_rdpmc_user_disable(event);
+
if (unlikely(event->attr.precise_ip))
static_call(x86_pmu_pebs_enable)(event);
@@ -5860,6 +5884,8 @@ static void update_pmu_cap(struct pmu *pmu)
hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
if (ebx_0.split.eq)
hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
+ if (ebx_0.split.rdpmc_user_disable)
+ hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
if (eax_0.split.cntr_subleaf) {
cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 24a81d2916e9..cd337f3ffd01 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1333,6 +1333,12 @@ static inline u64 x86_pmu_get_event_config(struct perf_event *event)
return event->attr.config & hybrid(event->pmu, config_mask);
}
+static inline bool x86_pmu_has_rdpmc_user_disable(struct pmu *pmu)
+{
+ return !!(hybrid(pmu, config_mask) &
+ ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE);
+}
+
extern struct event_constraint emptyconstraint;
extern struct event_constraint unconstrained;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 7276ba70c88a..0356c55d7ec1 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -33,6 +33,7 @@
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35)
#define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36)
+#define ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE (1ULL << 37)
#define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40)
#define INTEL_FIXED_BITS_STRIDE 4
@@ -40,6 +41,7 @@
#define INTEL_FIXED_0_USER (1ULL << 1)
#define INTEL_FIXED_0_ANYTHREAD (1ULL << 2)
#define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3)
+#define INTEL_FIXED_0_RDPMC_USER_DISABLE (1ULL << 33)
#define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2)
#define HSW_IN_TX (1ULL << 32)
@@ -50,7 +52,7 @@
#define INTEL_FIXED_BITS_MASK \
(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER | \
INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI | \
- ICL_FIXED_0_ADAPTIVE)
+ ICL_FIXED_0_ADAPTIVE | INTEL_FIXED_0_RDPMC_USER_DISABLE)
#define intel_fixed_bits_by_idx(_idx, _bits) \
((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
@@ -226,7 +228,9 @@ union cpuid35_ebx {
unsigned int umask2:1;
/* EQ-bit Supported */
unsigned int eq:1;
- unsigned int reserved:30;
+ /* rdpmc user disable Supported */
+ unsigned int rdpmc_user_disable:1;
+ unsigned int reserved:29;
} split;
unsigned int full;
};
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread