linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v1 0/3] Improve event groups for topdown, add X event modifier
@ 2025-08-25 21:12 Ian Rogers
  2025-08-25 21:12 ` [PATCH v1 1/3] perf evsel: Give warning for broken Intel topdown event grouping Ian Rogers
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Ian Rogers @ 2025-08-25 21:12 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Mark Rutland, Alexander Shishkin, Jiri Olsa,
	Ian Rogers, Adrian Hunter, Kan Liang, James Clark,
	Greg Kroah-Hartman, Yoshihiro Furudera, Dapeng Mi, Howard Chu,
	Thomas Falcon, Andi Kleen, linux-perf-users, linux-kernel

In:
https://lore.kernel.org/lkml/20250822082233.1850417-1-dapeng1.mi@linux.intel.com/#t
Dapeng Mi and Xudong Hao reported that Intel topdown events have
issues with parsing when events are duplicated. While some of that is
intended, things could be better. These patches:

1) give error messages specific to topdown event grouping constraints,
2) fail groups if non-leaders fail to open (this appears to be old tech debt),
3) adds an 'X' event modifier to allow events to opt-out of being regrouped.

The 'X' modifier should also give a means to side-step future issues
in parse_events__sort_events_and_fix_groups should they come up.

Ian Rogers (3):
  perf evsel: Give warning for broken Intel topdown event grouping
  perf stat: Don't skip failing group events
  perf parse-events: Add 'X' modifier to exclude an event from being
    regrouped

 tools/perf/Documentation/perf-list.txt |  1 +
 tools/perf/arch/x86/util/evsel.c       | 62 ++++++++++++++++++++++++--
 tools/perf/builtin-stat.c              | 48 +++++++++-----------
 tools/perf/util/evsel.c                |  7 ++-
 tools/perf/util/evsel.h                |  3 +-
 tools/perf/util/parse-events.c         |  5 ++-
 tools/perf/util/parse-events.h         |  1 +
 tools/perf/util/parse-events.l         |  5 ++-
 8 files changed, 94 insertions(+), 38 deletions(-)

-- 
2.51.0.261.g7ce5a0a67e-goog


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v1 1/3] perf evsel: Give warning for broken Intel topdown event grouping
  2025-08-25 21:12 [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Ian Rogers
@ 2025-08-25 21:12 ` Ian Rogers
  2025-08-25 21:12 ` [PATCH v1 2/3] perf stat: Don't skip failing group events Ian Rogers
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Ian Rogers @ 2025-08-25 21:12 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Mark Rutland, Alexander Shishkin, Jiri Olsa,
	Ian Rogers, Adrian Hunter, Kan Liang, James Clark,
	Greg Kroah-Hartman, Yoshihiro Furudera, Dapeng Mi, Howard Chu,
	Thomas Falcon, Andi Kleen, linux-perf-users, linux-kernel

Extend arch_evsel__open_strerror from just AMD IBS events to Intel
core PMU events, to give a message when a slots event isn't a group
leader or when a perf metric event is duplicated within an event
group. As generating the warning happens after non-arch specific
warnings are generated, disable the missing system wide (-a) flag
warning for the core PMU. This assumes core PMU events should support
per-thread/process and system-wide.

Signed-off-by: Ian Rogers <irogers@google.com>
---
 tools/perf/arch/x86/util/evsel.c | 62 +++++++++++++++++++++++++++++---
 tools/perf/util/evsel.c          |  7 ++--
 tools/perf/util/evsel.h          |  2 +-
 3 files changed, 64 insertions(+), 7 deletions(-)

diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index 9bc80fff3aa0..e67701d26f24 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -1,10 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include "util/evlist.h"
 #include "util/evsel.h"
 #include "util/env.h"
 #include "util/pmu.h"
 #include "util/pmus.h"
+#include "util/stat.h"
+#include "util/strbuf.h"
 #include "linux/string.h"
 #include "topdown.h"
 #include "evsel.h"
@@ -102,13 +106,15 @@ void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
 	}
 }
 
-int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
+static int amd_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
 {
-	if (!x86__is_amd_cpu())
+	struct perf_pmu *pmu;
+
+	if (evsel->core.attr.precise_ip == 0)
 		return 0;
 
-	if (!evsel->core.attr.precise_ip &&
-	    !(evsel->pmu && !strncmp(evsel->pmu->name, "ibs", 3)))
+	pmu = evsel__find_pmu(evsel);
+	if (!pmu || strncmp(pmu->name, "ibs", 3))
 		return 0;
 
 	/* More verbose IBS errors. */
@@ -118,6 +124,54 @@ int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
 		return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
 				 "again without the privilege modifiers (like 'k') at the end.");
 	}
+	return 0;
+}
+
+static int intel_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
+{
+	struct strbuf sb = STRBUF_INIT;
+	int ret;
+
+	if (err != EINVAL)
+		return 0;
 
+	if (!topdown_sys_has_perf_metrics())
+		return 0;
+
+	if (arch_is_topdown_slots(evsel)) {
+		if (!evsel__is_group_leader(evsel)) {
+			evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
+			evlist__format_evsels(evsel->evlist, &sb, 2048);
+			ret = scnprintf(msg, size, "Topdown slots event can only be group leader "
+					"in '%s'.", sb.buf);
+			strbuf_release(&sb);
+			return ret;
+		}
+	} else if (arch_is_topdown_metrics(evsel)) {
+		struct evsel *pos;
+
+		evlist__for_each_entry(evsel->evlist, pos) {
+			if (pos == evsel || !arch_is_topdown_metrics(pos))
+				continue;
+
+			if (pos->core.attr.config != evsel->core.attr.config)
+				continue;
+
+			evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
+			evlist__format_evsels(evsel->evlist, &sb, 2048);
+			ret = scnprintf(msg, size, "Perf metric event '%s' is duplicated "
+					"in the same group (only one event is allowed) in '%s'.",
+					evsel__name(evsel), sb.buf);
+			strbuf_release(&sb);
+			return ret;
+		}
+	}
 	return 0;
 }
+
+int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
+{
+	return x86__is_amd_cpu()
+		? amd_evsel__open_strerror(evsel, msg, size)
+		: intel_evsel__open_strerror(evsel, err, msg, size);
+}
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d264c143b592..796f3bf35f47 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -3716,6 +3716,7 @@ static int dump_perf_event_processes(char *msg, size_t size)
 }
 
 int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
+				     int err __maybe_unused,
 				     char *msg __maybe_unused,
 				     size_t size __maybe_unused)
 {
@@ -3725,6 +3726,7 @@ int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
 int evsel__open_strerror(struct evsel *evsel, struct target *target,
 			 int err, char *msg, size_t size)
 {
+	struct perf_pmu *pmu;
 	char sbuf[STRERR_BUFSIZE];
 	int printed = 0, enforced = 0;
 	int ret;
@@ -3840,7 +3842,8 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
 			return scnprintf(msg, size, "The 'aux_action' feature is not supported, update the kernel.");
 		if (perf_missing_features.aux_output)
 			return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
-		if (!target__has_cpu(target))
+		pmu = evsel__find_pmu(evsel);
+		if (!pmu->is_core && !target__has_cpu(target))
 			return scnprintf(msg, size,
 	"Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
 					evsel__name(evsel));
@@ -3853,7 +3856,7 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
 		break;
 	}
 
-	ret = arch_evsel__open_strerror(evsel, msg, size);
+	ret = arch_evsel__open_strerror(evsel, err, msg, size);
 	if (ret)
 		return ret;
 
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 5797a02e5d6a..e927a3a4fe0e 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -341,7 +341,7 @@ void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
 
 void arch_evsel__set_sample_weight(struct evsel *evsel);
 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
-int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size);
+int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size);
 
 int evsel__set_filter(struct evsel *evsel, const char *filter);
 int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
-- 
2.51.0.261.g7ce5a0a67e-goog


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v1 2/3] perf stat: Don't skip failing group events
  2025-08-25 21:12 [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Ian Rogers
  2025-08-25 21:12 ` [PATCH v1 1/3] perf evsel: Give warning for broken Intel topdown event grouping Ian Rogers
@ 2025-08-25 21:12 ` Ian Rogers
  2025-08-25 21:12 ` [PATCH v1 3/3] perf parse-events: Add 'X' modifier to exclude an event from being regrouped Ian Rogers
  2025-08-26  2:30 ` [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Mi, Dapeng
  3 siblings, 0 replies; 5+ messages in thread
From: Ian Rogers @ 2025-08-25 21:12 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Mark Rutland, Alexander Shishkin, Jiri Olsa,
	Ian Rogers, Adrian Hunter, Kan Liang, James Clark,
	Greg Kroah-Hartman, Yoshihiro Furudera, Dapeng Mi, Howard Chu,
	Thomas Falcon, Andi Kleen, linux-perf-users, linux-kernel

Pass errno to stat_handle_error rather than reading errno after it has
potentially been clobbered. Move "skippable" handling first as a
skippable event (from the perf stat default list) should always just
be skipped.

Remove logic to skip rather than fail events in a group when they
aren't the group leader. The original logic was added in commit
cb5ef60067c1 ("perf stat: Error out unsupported group leader
immediately") due to error handling and opening being together and an
assertion being raised. Not failing this case causes broken groups to
not report values, particularly for topdown events.

Reported-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Closes: https://lore.kernel.org/lkml/20250822082233.1850417-1-dapeng1.mi@linux.intel.com/
Signed-off-by: Ian Rogers <irogers@google.com>
---
 tools/perf/builtin-stat.c | 48 +++++++++++++++++----------------------
 1 file changed, 21 insertions(+), 27 deletions(-)

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 2c38dd98f6ca..ab567919b89a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -613,33 +613,40 @@ enum counter_recovery {
 	COUNTER_FATAL,
 };
 
-static enum counter_recovery stat_handle_error(struct evsel *counter)
+static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
 {
 	char msg[BUFSIZ];
+
+	if (counter->skippable) {
+		if (verbose > 0) {
+			ui__warning("skipping event %s that kernel failed to open .\n",
+				    evsel__name(counter));
+		}
+		counter->supported = false;
+		counter->errored = true;
+		return COUNTER_SKIP;
+	}
+
 	/*
 	 * PPC returns ENXIO for HW counters until 2.6.37
 	 * (behavior changed with commit b0a873e).
 	 */
-	if (errno == EINVAL || errno == ENOSYS ||
-	    errno == ENOENT || errno == ENXIO) {
-		if (verbose > 0)
+	if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) {
+		if (verbose > 0) {
 			ui__warning("%s event is not supported by the kernel.\n",
 				    evsel__name(counter));
+		}
 		counter->supported = false;
 		/*
 		 * errored is a sticky flag that means one of the counter's
 		 * cpu event had a problem and needs to be reexamined.
 		 */
 		counter->errored = true;
-
-		if ((evsel__leader(counter) != counter) ||
-		    !(counter->core.leader->nr_members > 1))
-			return COUNTER_SKIP;
-	} else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
+	} else if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
 		if (verbose > 0)
 			ui__warning("%s\n", msg);
 		return COUNTER_RETRY;
-	} else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
+	} else if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
 		   evsel_list->core.threads &&
 		   evsel_list->core.threads->err_thread != -1) {
 		/*
@@ -651,29 +658,16 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
 			evsel_list->core.threads->err_thread = -1;
 			return COUNTER_RETRY;
 		}
-	} else if (counter->skippable) {
-		if (verbose > 0)
-			ui__warning("skipping event %s that kernel failed to open .\n",
-				    evsel__name(counter));
-		counter->supported = false;
-		counter->errored = true;
-		return COUNTER_SKIP;
-	}
-
-	if (errno == EOPNOTSUPP) {
+	} else if (err == EOPNOTSUPP) {
 		if (verbose > 0) {
 			ui__warning("%s event is not supported by the kernel.\n",
 				    evsel__name(counter));
 		}
 		counter->supported = false;
 		counter->errored = true;
-
-		if ((evsel__leader(counter) != counter) ||
-		    !(counter->core.leader->nr_members > 1))
-			return COUNTER_SKIP;
 	}
 
-	evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
+	evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
 	ui__error("%s\n", msg);
 
 	if (child_pid != -1)
@@ -761,7 +755,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 				continue;
 			}
 
-			switch (stat_handle_error(counter)) {
+			switch (stat_handle_error(counter, errno)) {
 			case COUNTER_FATAL:
 				err = -1;
 				goto err_out;
@@ -803,7 +797,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 			if (create_perf_stat_counter(counter, &stat_config, &target,
 						     evlist_cpu_itr.cpu_map_idx) < 0) {
 
-				switch (stat_handle_error(counter)) {
+				switch (stat_handle_error(counter, errno)) {
 				case COUNTER_FATAL:
 					err = -1;
 					goto err_out;
-- 
2.51.0.261.g7ce5a0a67e-goog


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v1 3/3] perf parse-events: Add 'X' modifier to exclude an event from being regrouped
  2025-08-25 21:12 [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Ian Rogers
  2025-08-25 21:12 ` [PATCH v1 1/3] perf evsel: Give warning for broken Intel topdown event grouping Ian Rogers
  2025-08-25 21:12 ` [PATCH v1 2/3] perf stat: Don't skip failing group events Ian Rogers
@ 2025-08-25 21:12 ` Ian Rogers
  2025-08-26  2:30 ` [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Mi, Dapeng
  3 siblings, 0 replies; 5+ messages in thread
From: Ian Rogers @ 2025-08-25 21:12 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Mark Rutland, Alexander Shishkin, Jiri Olsa,
	Ian Rogers, Adrian Hunter, Kan Liang, James Clark,
	Greg Kroah-Hartman, Yoshihiro Furudera, Dapeng Mi, Howard Chu,
	Thomas Falcon, Andi Kleen, linux-perf-users, linux-kernel
  Cc: Xudong Hao

The function parse_events__sort_events_and_fix_groups is needed to fix
uncore events like:
```
$ perf stat -e '{data_read,data_write}' ...
```
so that the multiple uncore PMUs have a group each of data_read and
data_write events.

The same function will perform architecture sorting and group fixing,
in particular for Intel topdown/perf-metric events. Grouping multiple
perf metric events together causes perf_event_open to fail as the
group can only support one. This means command lines like:
```
$ perf stat -e 'slots,slots' ...
```
fail as the slots events are forced into a group together to try to
satisfy the perf-metric event constraints.

As the user may know better than
parse_events__sort_events_and_fix_groups add a 'X' modifier to skip
its regrouping behavior. This allows the following to succeed rather
than fail on the second slots event being opened:
```
$ perf stat -e 'slots,slots:X' -a sleep 1

 Performance counter stats for 'system wide':

     6,834,154,071      cpu_core/slots/                                                         (50.13%)
     5,548,629,453      cpu_core/slots/X                                                        (49.87%)

       1.002634606 seconds time elapsed
```

Reported-by: Xudong Hao <xudong.hao@intel.com>
Reported-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Closes: https://lore.kernel.org/lkml/20250822082233.1850417-1-dapeng1.mi@linux.intel.com/
Signed-off-by: Ian Rogers <irogers@google.com>
---
 tools/perf/Documentation/perf-list.txt | 1 +
 tools/perf/util/evsel.h                | 1 +
 tools/perf/util/parse-events.c         | 5 +++--
 tools/perf/util/parse-events.h         | 1 +
 tools/perf/util/parse-events.l         | 5 +++--
 5 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 28215306a78a..a5039d1614f9 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -73,6 +73,7 @@ counted. The following modifiers exist:
  e - group or event are exclusive and do not share the PMU
  b - use BPF aggregration (see perf stat --bpf-counters)
  R - retire latency value of the event
+ X - don't regroup the event to match PMUs
 
 The 'p' modifier can be used for specifying how precise the instruction
 address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index e927a3a4fe0e..03f9f22e3a0c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -89,6 +89,7 @@ struct evsel {
 		bool			use_config_name;
 		bool			skippable;
 		bool			retire_lat;
+		bool			dont_regroup;
 		int			bpf_fd;
 		struct bpf_object	*bpf_obj;
 		struct list_head	config_terms;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 8282ddf68b98..43de19551c81 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1892,6 +1892,8 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
 			evsel->bpf_counter = true;
 		if (mod.retire_lat)
 			evsel->retire_lat = true;
+		if (mod.dont_regroup)
+			evsel->dont_regroup = true;
 	}
 	return 0;
 }
@@ -2188,13 +2190,12 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
 		 * Set the group leader respecting the given groupings and that
 		 * groups can't span PMUs.
 		 */
-		if (!cur_leader) {
+		if (!cur_leader || pos->dont_regroup) {
 			cur_leader = pos;
 			cur_leaders_grp = &pos->core;
 			if (pos_force_grouped)
 				force_grouped_leader = pos;
 		}
-
 		cur_leader_pmu_name = cur_leader->group_pmu_name;
 		if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
 			/* PMU changed so the group/leader must change. */
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 62dc7202e3ba..a5c5fc39fd6f 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -216,6 +216,7 @@ struct parse_events_modifier {
 	bool guest : 1;		/* 'G' */
 	bool host : 1;		/* 'H' */
 	bool retire_lat : 1;	/* 'R' */
+	bool dont_regroup : 1;	/* 'X' */
 };
 
 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 2034590eb789..294e943bcdb4 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -206,6 +206,7 @@ static int modifiers(struct parse_events_state *parse_state, yyscan_t scanner)
 		CASE('e', exclusive);
 		CASE('b', bpf);
 		CASE('R', retire_lat);
+		CASE('X', dont_regroup);
 		default:
 			return PE_ERROR;
 		}
@@ -251,10 +252,10 @@ term_name	{name_start}[a-zA-Z0-9_*?.\[\]!\-:]*
 quoted_name	[\']{name_start}[a-zA-Z0-9_*?.\[\]!\-:,\.=]*[\']
 drv_cfg_term	[a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
 /*
- * If you add a modifier you need to update check_modifier().
+ * If you add a modifier you need to update modifiers().
  * Also, the letters in modifier_event must not be in modifier_bp.
  */
-modifier_event	[ukhpPGHSDIWebR]{1,16}
+modifier_event	[ukhpPGHSDIWebRX]{1,17}
 modifier_bp	[rwx]{1,3}
 lc_type 	(L1-dcache|l1-d|l1d|L1-data|L1-icache|l1-i|l1i|L1-instruction|LLC|L2|dTLB|d-tlb|Data-TLB|iTLB|i-tlb|Instruction-TLB|branch|branches|bpu|btb|bpc|node)
 lc_op_result	(load|loads|read|store|stores|write|prefetch|prefetches|speculative-read|speculative-load|refs|Reference|ops|access|misses|miss)
-- 
2.51.0.261.g7ce5a0a67e-goog


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v1 0/3] Improve event groups for topdown, add X event modifier
  2025-08-25 21:12 [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Ian Rogers
                   ` (2 preceding siblings ...)
  2025-08-25 21:12 ` [PATCH v1 3/3] perf parse-events: Add 'X' modifier to exclude an event from being regrouped Ian Rogers
@ 2025-08-26  2:30 ` Mi, Dapeng
  3 siblings, 0 replies; 5+ messages in thread
From: Mi, Dapeng @ 2025-08-26  2:30 UTC (permalink / raw)
  To: Ian Rogers, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Mark Rutland, Alexander Shishkin, Jiri Olsa,
	Adrian Hunter, Kan Liang, James Clark, Greg Kroah-Hartman,
	Yoshihiro Furudera, Howard Chu, Thomas Falcon, Andi Kleen,
	linux-perf-users, linux-kernel


On 8/26/2025 5:12 AM, Ian Rogers wrote:
> In:
> https://lore.kernel.org/lkml/20250822082233.1850417-1-dapeng1.mi@linux.intel.com/#t
> Dapeng Mi and Xudong Hao reported that Intel topdown events have
> issues with parsing when events are duplicated. While some of that is
> intended, things could be better. These patches:
>
> 1) give error messages specific to topdown event grouping constraints,
> 2) fail groups if non-leaders fail to open (this appears to be old tech debt),
> 3) adds an 'X' event modifier to allow events to opt-out of being regrouped.
>
> The 'X' modifier should also give a means to side-step future issues
> in parse_events__sort_events_and_fix_groups should they come up.
>
> Ian Rogers (3):
>   perf evsel: Give warning for broken Intel topdown event grouping
>   perf stat: Don't skip failing group events
>   perf parse-events: Add 'X' modifier to exclude an event from being
>     regrouped
>
>  tools/perf/Documentation/perf-list.txt |  1 +
>  tools/perf/arch/x86/util/evsel.c       | 62 ++++++++++++++++++++++++--
>  tools/perf/builtin-stat.c              | 48 +++++++++-----------
>  tools/perf/util/evsel.c                |  7 ++-
>  tools/perf/util/evsel.h                |  3 +-
>  tools/perf/util/parse-events.c         |  5 ++-
>  tools/perf/util/parse-events.h         |  1 +
>  tools/perf/util/parse-events.l         |  5 ++-
>  8 files changed, 94 insertions(+), 38 deletions(-)

The whole patch-set looks good to me.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>

Tested the patches on Intel Sapphire Rapids and Panther Lake, all results
are expected.

Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>

Test results on Sapphire Rapids.

1. sudo ./perf stat -e slots,slots -a sleep 1

WARNING: events were regrouped to match PMUs
Error:
Topdown slots event can only be group leader in '{slots,slots}'.

2. sudo ./perf stat -e slots,slots:X -a sleep 1
 Performance counter stats for 'system wide':

    55,734,800,895      slots                                             
                     (49.99%)
    57,217,900,875      slots:X                                           
                     (50.01%)

       1.009697323 seconds time elapsed

3.  sudo ./perf stat -e slots,topdown-fe-bound,topdown-fe-bound -a sleep 1

WARNING: events were regrouped to match PMUs
Error:
Perf metric event 'topdown-fe-bound' is duplicated in the same group (only
one event is allowed) in '{slots,topdown-fe-bound,topdown-fe-bound}'.

4. Perf stats test

sudo ./perf test 100
100: perf stat tests                                                 : Ok

Test results on Panther Lake.

1. sudo ./perf stat -e slots,slots -a sleep 1
WARNING: events were regrouped to match PMUs
Error:
Topdown slots event can only be group leader in
'{cpu_core/slots/,cpu_core/slots/}'.

2. sudo ./perf stat -e slots,slots:X -a sleep 1

 Performance counter stats for 'system wide':

     1,592,899,478      cpu_core/slots/                                   
                     (49.98%)
     1,682,298,980      cpu_core/slots/X                                   
                    (50.02%)

       1.002821768 seconds time elapsed

3. sudo ./perf stat -e slots,topdown-fe-bound,topdown-fe-bound -a sleep 1
WARNING: events were regrouped to match PMUs
Error:
Perf metric event 'cpu_core/topdown-fe-bound/' is duplicated in the same
group (only one event is allowed) in
'{cpu_core/slots/,cpu_core/topdown-fe-bound/,cpu_core/topdown-fe-bound/},cpu_atom/topdown-fe-bound/,cpu_atom/topdown-fe-bound/'.

4. sudo ./perf test 99
 99: perf stat tests                                                 : Ok


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2025-08-26  2:30 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-25 21:12 [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Ian Rogers
2025-08-25 21:12 ` [PATCH v1 1/3] perf evsel: Give warning for broken Intel topdown event grouping Ian Rogers
2025-08-25 21:12 ` [PATCH v1 2/3] perf stat: Don't skip failing group events Ian Rogers
2025-08-25 21:12 ` [PATCH v1 3/3] perf parse-events: Add 'X' modifier to exclude an event from being regrouped Ian Rogers
2025-08-26  2:30 ` [PATCH v1 0/3] Improve event groups for topdown, add X event modifier Mi, Dapeng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).