linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ian Rogers <irogers@google.com>
To: Suzuki K Poulose <suzuki.poulose@arm.com>,
	Mike Leach <mike.leach@linaro.org>, Leo Yan <leo.yan@linaro.org>,
	John Garry <john.g.garry@oracle.com>,
	Will Deacon <will@kernel.org>, James Clark <james.clark@arm.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Kajol Jain <kjain@linux.ibm.com>,
	Jing Zhang <renyu.zj@linux.alibaba.com>,
	Kan Liang <kan.liang@linux.intel.com>,
	Zhengjun Xing <zhengjun.xing@linux.intel.com>,
	Ravi Bangoria <ravi.bangoria@amd.com>,
	Madhavan Srinivasan <maddy@linux.ibm.com>,
	Athira Rajeev <atrajeev@linux.vnet.ibm.com>,
	Ming Wang <wangming01@loongson.cn>,
	Huacai Chen <chenhuacai@kernel.org>,
	Sandipan Das <sandipan.das@amd.com>,
	Dmitrii Dolgov <9erthalion6@gmail.com>,
	Sean Christopherson <seanjc@google.com>,
	Raul Silvera <rsilvera@google.com>,
	Ali Saidi <alisaidi@amazon.com>, Rob Herring <robh@kernel.org>,
	Thomas Richter <tmricht@linux.ibm.com>,
	Kang Minchul <tegongkang@gmail.com>,
	linux-kernel@vger.kernel.org, coresight@lists.linaro.org,
	linux-arm-kernel@lists.infradead.org,
	linux-perf-users@vger.kernel.org
Subject: [PATCH v1 21/23] perf pmus: Allow just core PMU scanning
Date: Wed, 17 May 2023 07:58:01 -0700	[thread overview]
Message-ID: <20230517145803.559429-22-irogers@google.com> (raw)
In-Reply-To: <20230517145803.559429-1-irogers@google.com>

Scanning all PMUs is expensive as all PMUs sysfs entries are loaded,
benchmarking shows more than 4x the cost:

```
$ perf bench internals pmu-scan -i 1000
Computing performance of sysfs PMU event scan for 1000 times
  Average core PMU scanning took: 989.231 usec (+- 1.535 usec)
  Average PMU scanning took: 4309.425 usec (+- 74.322 usec)
```

Add new perf_pmus__scan_core routine that scans just core
PMUs. Replace perf_pmus__scan calls with perf_pmus__scan_core when
non-core PMUs are being ignored.

Signed-off-by: Ian Rogers <irogers@google.com>
---
 tools/perf/arch/arm64/util/pmu.c     |  5 +--
 tools/perf/arch/x86/util/evlist.c    |  5 +--
 tools/perf/arch/x86/util/perf_regs.c |  8 ++---
 tools/perf/bench/pmu-scan.c          | 50 ++++++++++++++++------------
 tools/perf/tests/pmu-events.c        |  5 +--
 tools/perf/util/cputopo.c            | 10 ++----
 tools/perf/util/header.c             |  5 +--
 tools/perf/util/mem-events.c         | 14 ++------
 tools/perf/util/parse-events.c       | 13 +++-----
 tools/perf/util/pmu.c                | 10 ------
 tools/perf/util/pmu.h                |  2 --
 tools/perf/util/pmus.c               | 28 ++++++++++++----
 tools/perf/util/pmus.h               |  1 +
 tools/perf/util/print-events.c       | 11 +++---
 14 files changed, 73 insertions(+), 94 deletions(-)

diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
index 2504d43a39a7..561de0cb6b95 100644
--- a/tools/perf/arch/arm64/util/pmu.c
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -11,10 +11,7 @@ static struct perf_pmu *pmu__find_core_pmu(void)
 {
 	struct perf_pmu *pmu = NULL;
 
-	while ((pmu = perf_pmus__scan(pmu))) {
-		if (!is_pmu_core(pmu->name))
-			continue;
-
+	while ((pmu = perf_pmus__scan_core(pmu))) {
 		/*
 		 * The cpumap should cover all CPUs. Otherwise, some CPUs may
 		 * not support some events or have different event IDs.
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index 03240c640c7f..8a6a0b98b976 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -33,13 +33,10 @@ static int ___evlist__add_default_attrs(struct evlist *evlist,
 			continue;
 		}
 
-		while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 			struct perf_cpu_map *cpus;
 			struct evsel *evsel;
 
-			if (!pmu->is_core)
-				continue;
-
 			evsel = evsel__new(attrs + i);
 			if (evsel == NULL)
 				goto out_delete_partial_list;
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index befa7f3659b9..116384f19baf 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -300,11 +300,9 @@ uint64_t arch__intr_reg_mask(void)
 		 * The same register set is supported among different hybrid PMUs.
 		 * Only check the first available one.
 		 */
-		while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-			if (pmu->is_core) {
-				type = pmu->type;
-				break;
-			}
+		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+			type = pmu->type;
+			break;
 		}
 		attr.config |= type << PERF_PMU_TYPE_SHIFT;
 	}
diff --git a/tools/perf/bench/pmu-scan.c b/tools/perf/bench/pmu-scan.c
index 51cae2d03353..c7d207f8e13c 100644
--- a/tools/perf/bench/pmu-scan.c
+++ b/tools/perf/bench/pmu-scan.c
@@ -22,6 +22,7 @@ struct pmu_scan_result {
 	int nr_aliases;
 	int nr_formats;
 	int nr_caps;
+	bool is_core;
 };
 
 static const struct option options[] = {
@@ -53,6 +54,7 @@ static int save_result(void)
 		r = results + nr_pmus;
 
 		r->name = strdup(pmu->name);
+		r->is_core = pmu->is_core;
 		r->nr_caps = pmu->nr_caps;
 
 		r->nr_aliases = 0;
@@ -72,7 +74,7 @@ static int save_result(void)
 	return 0;
 }
 
-static int check_result(void)
+static int check_result(bool core_only)
 {
 	struct pmu_scan_result *r;
 	struct perf_pmu *pmu;
@@ -81,6 +83,9 @@ static int check_result(void)
 
 	for (int i = 0; i < nr_pmus; i++) {
 		r = &results[i];
+		if (core_only && !r->is_core)
+			continue;
+
 		pmu = perf_pmus__find(r->name);
 		if (pmu == NULL) {
 			pr_err("Cannot find PMU %s\n", r->name);
@@ -130,7 +135,6 @@ static int run_pmu_scan(void)
 	struct timeval start, end, diff;
 	double time_average, time_stddev;
 	u64 runtime_us;
-	unsigned int i;
 	int ret;
 
 	init_stats(&stats);
@@ -142,26 +146,30 @@ static int run_pmu_scan(void)
 		return -1;
 	}
 
-	for (i = 0; i < iterations; i++) {
-		gettimeofday(&start, NULL);
-		perf_pmus__scan(NULL);
-		gettimeofday(&end, NULL);
-
-		timersub(&end, &start, &diff);
-		runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
-		update_stats(&stats, runtime_us);
-
-		ret = check_result();
-		perf_pmus__destroy();
-		if (ret < 0)
-			break;
+	for (int j = 0; j < 2; j++) {
+		bool core_only = (j == 0);
+
+		for (unsigned int i = 0; i < iterations; i++) {
+			gettimeofday(&start, NULL);
+			if (core_only)
+				perf_pmus__scan_core(NULL);
+			else
+				perf_pmus__scan(NULL);
+			gettimeofday(&end, NULL);
+			timersub(&end, &start, &diff);
+			runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+			update_stats(&stats, runtime_us);
+
+			ret = check_result(core_only);
+			perf_pmus__destroy();
+			if (ret < 0)
+				break;
+		}
+		time_average = avg_stats(&stats);
+		time_stddev = stddev_stats(&stats);
+		pr_info("  Average%s PMU scanning took: %.3f usec (+- %.3f usec)\n",
+			core_only ? " core" : "", time_average, time_stddev);
 	}
-
-	time_average = avg_stats(&stats);
-	time_stddev = stddev_stats(&stats);
-	pr_info("  Average PMU scanning took: %.3f usec (+- %.3f usec)\n",
-		time_average, time_stddev);
-
 	delete_result();
 	return 0;
 }
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 64ecb7845af4..64383fc34ef1 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -709,12 +709,9 @@ static int test__aliases(struct test_suite *test __maybe_unused,
 	struct perf_pmu *pmu = NULL;
 	unsigned long i;
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 		int count = 0;
 
-		if (!is_pmu_core(pmu->name))
-			continue;
-
 		if (list_empty(&pmu->format)) {
 			pr_debug2("skipping testing core PMU %s\n", pmu->name);
 			continue;
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index 4578c26747e1..3723a1cec768 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -477,9 +477,8 @@ struct hybrid_topology *hybrid_topology__new(void)
 	if (!perf_pmus__has_hybrid())
 		return NULL;
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-		if (pmu->is_core)
-			nr++;
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+		nr++;
 	}
 	if (nr == 0)
 		return NULL;
@@ -489,10 +488,7 @@ struct hybrid_topology *hybrid_topology__new(void)
 		return NULL;
 
 	tp->nr = nr;
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-		if (!pmu->is_core)
-			continue;
-
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 		if (load_hybrid_node(&tp->nodes[i], pmu)) {
 			hybrid_topology__delete(tp);
 			return NULL;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index fa3f7dbbd90e..c701cc474d79 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1591,10 +1591,7 @@ static int write_pmu_caps(struct feat_fd *ff,
 	 */
 	if (perf_pmus__has_hybrid()) {
 		pmu = NULL;
-		while ((pmu = perf_pmus__scan(pmu))) {
-			if (!pmu->is_core)
-				continue;
-
+		while ((pmu = perf_pmus__scan_core(pmu))) {
 			ret = __write_pmu_caps(ff, pmu, true);
 			if (ret < 0)
 				return ret;
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 08ac3ea2e366..c5596230a308 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -136,10 +136,7 @@ int perf_mem_events__init(void)
 		} else {
 			struct perf_pmu *pmu = NULL;
 
-			while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-				if (!pmu->is_core)
-					continue;
-
+			while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 				scnprintf(sysfs_name, sizeof(sysfs_name),
 					  e->sysfs_name, pmu->name);
 				e->supported |= perf_mem_event__supported(mnt, sysfs_name);
@@ -176,10 +173,7 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
 	char sysfs_name[100];
 	struct perf_pmu *pmu = NULL;
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-		if (!pmu->is_core)
-			continue;
-
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 		scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
 			  pmu->name);
 		if (!perf_mem_event__supported(mnt, sysfs_name)) {
@@ -217,9 +211,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
 				return -1;
 			}
 
-			while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-				if (!pmu->is_core)
-					continue;
+			while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 				rec_argv[i++] = "-e";
 				s = perf_mem_events__name(j, pmu->name);
 				if (s) {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 984b230e14d4..47ee628a65bb 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -452,15 +452,12 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
 	const char *config_name = get_config_name(head_config);
 	const char *metric_id = get_config_metric_id(head_config);
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+	/* Legacy cache events are only supported by core PMUs. */
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 		LIST_HEAD(config_terms);
 		struct perf_event_attr attr;
 		int ret;
 
-		/* Skip unsupported PMUs. */
-		if (!perf_pmu__supports_legacy_cache(pmu))
-			continue;
-
 		if (parse_events__filter_pmu(parse_state, pmu))
 			continue;
 
@@ -1480,12 +1477,10 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
 		return __parse_events_add_numeric(parse_state, list, /*pmu=*/NULL,
 						  type, config, head_config);
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+	/* Wildcards on numeric values are only supported by core PMUs. */
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 		int ret;
 
-		if (!perf_pmu__supports_wildcard_numeric(pmu))
-			continue;
-
 		if (parse_events__filter_pmu(parse_state, pmu))
 			continue;
 
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 3217a859c65b..4844ed8049f1 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1440,21 +1440,11 @@ bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu)
 	return pmu->is_core;
 }
 
-bool perf_pmu__supports_wildcard_numeric(const struct perf_pmu *pmu)
-{
-	return pmu->is_core;
-}
-
 bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
 {
 	return !is_pmu_hybrid(pmu->name);
 }
 
-bool perf_pmu__is_mem_pmu(const struct perf_pmu *pmu)
-{
-	return pmu->is_core;
-}
-
 bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name)
 {
 	struct perf_pmu_alias *alias;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index f1f3e8a2e00e..02fec0a7d4c8 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -223,9 +223,7 @@ void perf_pmu__del_formats(struct list_head *formats);
 bool is_pmu_core(const char *name);
 bool is_pmu_hybrid(const char *name);
 bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
-bool perf_pmu__supports_wildcard_numeric(const struct perf_pmu *pmu);
 bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu);
-bool perf_pmu__is_mem_pmu(const struct perf_pmu *pmu);
 bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name);
 
 FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name);
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index dd029a810147..fcd61bddffc3 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -87,7 +87,7 @@ static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
 }
 
 /* Add all pmus in sysfs to pmu list: */
-static void pmu_read_sysfs(void)
+static void pmu_read_sysfs(bool core_only)
 {
 	int fd;
 	DIR *dir;
@@ -104,6 +104,8 @@ static void pmu_read_sysfs(void)
 	while ((dent = readdir(dir))) {
 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
 			continue;
+		if (core_only && !is_pmu_core(dent->d_name))
+			continue;
 		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(uncore_pmus): */
 		perf_pmu__find2(fd, dent->d_name);
 	}
@@ -135,7 +137,7 @@ struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
 	bool use_core_pmus = !pmu || pmu->is_core;
 
 	if (!pmu) {
-		pmu_read_sysfs();
+		pmu_read_sysfs(/*core_only=*/false);
 		pmu = list_prepare_entry(pmu, &core_pmus, list);
 	}
 	if (use_core_pmus) {
@@ -150,6 +152,18 @@ struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
 	return NULL;
 }
 
+struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
+{
+	if (!pmu) {
+		pmu_read_sysfs(/*core_only=*/true);
+		pmu = list_prepare_entry(pmu, &core_pmus, list);
+	}
+	list_for_each_entry_continue(pmu, &core_pmus, list)
+		return pmu;
+
+	return NULL;
+}
+
 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
 {
 	struct perf_pmu *pmu = NULL;
@@ -176,9 +190,9 @@ int perf_pmus__num_mem_pmus(void)
 	struct perf_pmu *pmu = NULL;
 	int count = 0;
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-		if (perf_pmu__is_mem_pmu(pmu))
-			count++;
+	/* All core PMUs are for mem events. */
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+		count++;
 	}
 	return count;
 }
@@ -422,8 +436,8 @@ bool perf_pmus__has_hybrid(void)
 	if (!hybrid_scanned) {
 		struct perf_pmu *pmu = NULL;
 
-		while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-			if (pmu->is_core && is_pmu_hybrid(pmu->name)) {
+		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+			if (is_pmu_hybrid(pmu->name)) {
 				has_hybrid = true;
 				break;
 			}
diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h
index 2a771d9f8da7..9de0222ed52b 100644
--- a/tools/perf/util/pmus.h
+++ b/tools/perf/util/pmus.h
@@ -11,6 +11,7 @@ struct perf_pmu *perf_pmus__find(const char *name);
 struct perf_pmu *perf_pmus__find_by_type(unsigned int type);
 
 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu);
+struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu);
 
 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
 
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index 9cee7bb7a561..7a5f87392720 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -272,12 +272,11 @@ int print_hwcache_events(const struct print_callbacks *print_cb, void *print_sta
 	struct perf_pmu *pmu = NULL;
 	const char *event_type_descriptor = event_type_descriptors[PERF_TYPE_HW_CACHE];
 
-	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-		/*
-		 * Skip uncore PMUs for performance. PERF_TYPE_HW_CACHE type
-		 * attributes can accept software PMUs in the extended type, so
-		 * also skip.
-		 */
+	/*
+	 * Only print core PMUs, skipping uncore for performance and
+	 * PERF_TYPE_SOFTWARE that can succeed in opening legacy cache evenst.
+	 */
+	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
 		if (pmu->is_uncore || pmu->type == PERF_TYPE_SOFTWARE)
 			continue;
 
-- 
2.40.1.606.ga4b1b128d6-goog


  parent reply	other threads:[~2023-05-17 15:01 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-17 14:57 [PATCH v1 00/23] PMU refactoring and improvements Ian Rogers
2023-05-17 14:57 ` [PATCH v1 01/23] perf tools: Warn if no user requested CPUs match PMU's CPUs Ian Rogers
2023-05-21 19:04   ` Liang, Kan
2023-05-22  4:52     ` Ian Rogers
2023-05-22 11:33       ` Liang, Kan
2023-05-17 14:57 ` [PATCH v1 02/23] perf evlist: Remove evlist__warn_hybrid_group Ian Rogers
2023-05-17 14:57 ` [PATCH v1 03/23] perf evlist: Remove __evlist__add_default Ian Rogers
2023-05-21 19:12   ` Liang, Kan
2023-05-22  5:12     ` Ian Rogers
2023-05-17 14:57 ` [PATCH v1 04/23] perf evlist: Reduce scope of evlist__has_hybrid Ian Rogers
2023-05-17 14:57 ` [PATCH v1 05/23] perf pmu: Remove perf_pmu__hybrid_mounted Ian Rogers
2023-05-21 19:23   ` Liang, Kan
2023-05-22  5:21     ` Ian Rogers
2023-05-22 11:55       ` Liang, Kan
2023-05-22 14:06         ` Ian Rogers
2023-05-23 17:23           ` Liang, Kan
2023-05-23 17:45             ` Ian Rogers
2023-05-17 14:57 ` [PATCH v1 06/23] perf pmu: Detect ARM and hybrid PMUs with sysfs Ian Rogers
2023-05-17 14:57 ` [PATCH v1 07/23] perf pmu: Add is_core to pmu Ian Rogers
2023-05-17 14:57 ` [PATCH v1 08/23] perf pmu: Rewrite perf_pmu__has_hybrid to avoid list Ian Rogers
2023-05-17 14:57 ` [PATCH v1 09/23] perf x86: Iterate hybrid PMUs as core PMUs Ian Rogers
2023-05-17 14:57 ` [PATCH v1 10/23] perf topology: Avoid hybrid list for hybrid topology Ian Rogers
2023-05-17 14:57 ` [PATCH v1 11/23] perf evsel: Compute is_hybrid from PMU being core Ian Rogers
2023-05-17 14:57 ` [PATCH v1 12/23] perf header: Avoid hybrid PMU list in write_pmu_caps Ian Rogers
2023-05-17 14:57 ` [PATCH v1 13/23] perf metrics: Remove perf_pmu__is_hybrid use Ian Rogers
2023-05-17 14:57 ` [PATCH v1 14/23] perf stat: Avoid hybrid PMU list Ian Rogers
2023-05-17 14:57 ` [PATCH v1 15/23] perf mem: " Ian Rogers
2023-05-17 14:57 ` [PATCH v1 16/23] perf pmu: Remove perf_pmu__hybrid_pmus list Ian Rogers
2023-05-17 14:57 ` [PATCH v1 17/23] perf pmus: Prefer perf_pmu__scan over perf_pmus__for_each_pmu Ian Rogers
2023-05-21 19:43   ` Liang, Kan
2023-05-17 14:57 ` [PATCH v1 18/23] perf x86 mem: minor refactor to is_mem_loads_aux_event Ian Rogers
2023-05-21 19:47   ` Liang, Kan
2023-05-17 14:57 ` [PATCH v1 19/23] perf pmu: Separate pmu and pmus Ian Rogers
2023-05-17 14:58 ` [PATCH v1 20/23] perf pmus: Split pmus list into core and uncore Ian Rogers
2023-05-21 20:02   ` Liang, Kan
2023-05-22  5:30     ` Ian Rogers
2023-05-17 14:58 ` Ian Rogers [this message]
2023-05-17 14:58 ` [PATCH v1 22/23] perf pmus: Avoid repeated sysfs scanning Ian Rogers
2023-05-17 14:58 ` [PATCH v1 23/23] perf pmus: Ensure all PMUs are read for find_by_type Ian Rogers

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230517145803.559429-22-irogers@google.com \
    --to=irogers@google.com \
    --cc=9erthalion6@gmail.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=alisaidi@amazon.com \
    --cc=atrajeev@linux.vnet.ibm.com \
    --cc=chenhuacai@kernel.org \
    --cc=coresight@lists.linaro.org \
    --cc=james.clark@arm.com \
    --cc=john.g.garry@oracle.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=kjain@linux.ibm.com \
    --cc=leo.yan@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=maddy@linux.ibm.com \
    --cc=mark.rutland@arm.com \
    --cc=mike.leach@linaro.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.bangoria@amd.com \
    --cc=renyu.zj@linux.alibaba.com \
    --cc=robh@kernel.org \
    --cc=rsilvera@google.com \
    --cc=sandipan.das@amd.com \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tegongkang@gmail.com \
    --cc=tmricht@linux.ibm.com \
    --cc=wangming01@loongson.cn \
    --cc=will@kernel.org \
    --cc=zhengjun.xing@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).