From: Ian Rogers <irogers@google.com>
To: Suzuki K Poulose <suzuki.poulose@arm.com>,
Mike Leach <mike.leach@linaro.org>, Leo Yan <leo.yan@linaro.org>,
John Garry <john.g.garry@oracle.com>,
Will Deacon <will@kernel.org>, James Clark <james.clark@arm.com>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Kajol Jain <kjain@linux.ibm.com>,
Jing Zhang <renyu.zj@linux.alibaba.com>,
Kan Liang <kan.liang@linux.intel.com>,
Zhengjun Xing <zhengjun.xing@linux.intel.com>,
Ravi Bangoria <ravi.bangoria@amd.com>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Athira Rajeev <atrajeev@linux.vnet.ibm.com>,
Ming Wang <wangming01@loongson.cn>,
Huacai Chen <chenhuacai@kernel.org>,
Sandipan Das <sandipan.das@amd.com>,
Dmitrii Dolgov <9erthalion6@gmail.com>,
Sean Christopherson <seanjc@google.com>,
Ali Saidi <alisaidi@amazon.com>, Rob Herring <robh@kernel.org>,
Thomas Richter <tmricht@linux.ibm.com>,
Kang Minchul <tegongkang@gmail.com>,
linux-kernel@vger.kernel.org, coresight@lists.linaro.org,
linux-arm-kernel@lists.infradead.org,
linux-perf-users@vger.kernel.org
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Subject: [PATCH v5 01/34] perf cpumap: Add internal nr and cpu accessors
Date: Sat, 27 May 2023 00:21:37 -0700 [thread overview]
Message-ID: <20230527072210.2900565-2-irogers@google.com> (raw)
In-Reply-To: <20230527072210.2900565-1-irogers@google.com>
These accessors assume the map is non-null. Rewrite functions to use
rather than direct accesses. This also fixes a build regression for
REFCNT_CHECKING in the intersect function.
Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ian Rogers <irogers@google.com>
---
tools/lib/perf/cpumap.c | 74 +++++++++++++++++++++++++----------------
1 file changed, 45 insertions(+), 29 deletions(-)
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index d4f3a1a12522..ec3f4ac8b1e2 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -99,6 +99,11 @@ static int cmp_cpu(const void *a, const void *b)
return cpu_a->cpu - cpu_b->cpu;
}
+static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+{
+ return RC_CHK_ACCESS(cpus)->map[idx];
+}
+
static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
@@ -111,8 +116,12 @@ static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu
/* Remove dups */
j = 0;
for (i = 0; i < nr_cpus; i++) {
- if (i == 0 || RC_CHK_ACCESS(cpus)->map[i].cpu != RC_CHK_ACCESS(cpus)->map[i - 1].cpu)
- RC_CHK_ACCESS(cpus)->map[j++].cpu = RC_CHK_ACCESS(cpus)->map[i].cpu;
+ if (i == 0 ||
+ __perf_cpu_map__cpu(cpus, i).cpu !=
+ __perf_cpu_map__cpu(cpus, i - 1).cpu) {
+ RC_CHK_ACCESS(cpus)->map[j++].cpu =
+ __perf_cpu_map__cpu(cpus, i).cpu;
+ }
}
perf_cpu_map__set_nr(cpus, j);
assert(j <= nr_cpus);
@@ -269,26 +278,31 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
return cpus;
}
+static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+{
+ return RC_CHK_ACCESS(cpus)->nr;
+}
+
struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{
struct perf_cpu result = {
.cpu = -1
};
- if (cpus && idx < RC_CHK_ACCESS(cpus)->nr)
- return RC_CHK_ACCESS(cpus)->map[idx];
+ if (cpus && idx < __perf_cpu_map__nr(cpus))
+ return __perf_cpu_map__cpu(cpus, idx);
return result;
}
int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
- return cpus ? RC_CHK_ACCESS(cpus)->nr : 1;
+ return cpus ? __perf_cpu_map__nr(cpus) : 1;
}
bool perf_cpu_map__empty(const struct perf_cpu_map *map)
{
- return map ? RC_CHK_ACCESS(map)->map[0].cpu == -1 : true;
+ return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
}
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
@@ -299,10 +313,10 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
return -1;
low = 0;
- high = RC_CHK_ACCESS(cpus)->nr;
+ high = __perf_cpu_map__nr(cpus);
while (low < high) {
int idx = (low + high) / 2;
- struct perf_cpu cpu_at_idx = RC_CHK_ACCESS(cpus)->map[idx];
+ struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
if (cpu_at_idx.cpu == cpu.cpu)
return idx;
@@ -328,7 +342,9 @@ struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
};
// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
- return RC_CHK_ACCESS(map)->nr > 0 ? RC_CHK_ACCESS(map)->map[RC_CHK_ACCESS(map)->nr - 1] : result;
+ return __perf_cpu_map__nr(map) > 0
+ ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
+ : result;
}
/** Is 'b' a subset of 'a'. */
@@ -336,15 +352,15 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
{
if (a == b || !b)
return true;
- if (!a || RC_CHK_ACCESS(b)->nr > RC_CHK_ACCESS(a)->nr)
+ if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
return false;
- for (int i = 0, j = 0; i < RC_CHK_ACCESS(a)->nr; i++) {
- if (RC_CHK_ACCESS(a)->map[i].cpu > RC_CHK_ACCESS(b)->map[j].cpu)
+ for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
+ if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
return false;
- if (RC_CHK_ACCESS(a)->map[i].cpu == RC_CHK_ACCESS(b)->map[j].cpu) {
+ if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
j++;
- if (j == RC_CHK_ACCESS(b)->nr)
+ if (j == __perf_cpu_map__nr(b))
return true;
}
}
@@ -374,27 +390,27 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
return perf_cpu_map__get(other);
}
- tmp_len = RC_CHK_ACCESS(orig)->nr + RC_CHK_ACCESS(other)->nr;
+ tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus)
return NULL;
/* Standard merge algorithm from wikipedia */
i = j = k = 0;
- while (i < RC_CHK_ACCESS(orig)->nr && j < RC_CHK_ACCESS(other)->nr) {
- if (RC_CHK_ACCESS(orig)->map[i].cpu <= RC_CHK_ACCESS(other)->map[j].cpu) {
- if (RC_CHK_ACCESS(orig)->map[i].cpu == RC_CHK_ACCESS(other)->map[j].cpu)
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
+ if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
j++;
- tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++];
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
} else
- tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++];
+ tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
}
- while (i < RC_CHK_ACCESS(orig)->nr)
- tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++];
+ while (i < __perf_cpu_map__nr(orig))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
- while (j < RC_CHK_ACCESS(other)->nr)
- tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++];
+ while (j < __perf_cpu_map__nr(other))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
assert(k <= tmp_len);
merged = cpu_map__trim_new(k, tmp_cpus);
@@ -416,20 +432,20 @@ struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
if (perf_cpu_map__is_subset(orig, other))
return perf_cpu_map__get(other);
- tmp_len = max(orig->nr, other->nr);
+ tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus)
return NULL;
i = j = k = 0;
- while (i < orig->nr && j < other->nr) {
- if (orig->map[i].cpu < other->map[j].cpu)
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
i++;
- else if (orig->map[i].cpu > other->map[j].cpu)
+ else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
j++;
else {
j++;
- tmp_cpus[k++] = orig->map[i++];
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
}
}
if (k)
--
2.41.0.rc0.172.g3f132b7071-goog
next prev parent reply other threads:[~2023-05-27 7:22 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-27 7:21 [PATCH v5 00/34] PMU refactoring and improvements Ian Rogers
2023-05-27 7:21 ` Ian Rogers [this message]
2023-05-27 7:21 ` [PATCH v5 02/34] perf cpumap: Add equal function Ian Rogers
2023-05-27 7:21 ` [PATCH v5 03/34] libperf cpumap: Add "any CPU"/dummy test function Ian Rogers
2023-05-27 7:21 ` [PATCH v5 04/34] perf pmu: Detect ARM and hybrid PMUs with sysfs Ian Rogers
2023-05-27 7:21 ` [PATCH v5 05/34] perf pmu: Add is_core to pmu Ian Rogers
2023-05-27 7:21 ` [PATCH v5 06/34] perf evsel: Add is_pmu_core inorder to interpret own_cpus Ian Rogers
2023-05-27 7:21 ` [PATCH v5 07/34] perf pmu: Add CPU map for "cpu" PMUs Ian Rogers
2023-05-27 7:21 ` [PATCH v5 08/34] perf evlist: Propagate user CPU maps intersecting core PMU maps Ian Rogers
2023-05-27 7:21 ` [PATCH v5 09/34] perf evlist: Allow has_user_cpus to be set on hybrid Ian Rogers
2023-05-27 7:21 ` [PATCH v5 10/34] perf target: Remove unused hybrid value Ian Rogers
2023-05-27 7:21 ` [PATCH v5 11/34] perf tools: Warn if no user requested CPUs match PMU's CPUs Ian Rogers
2023-05-27 7:21 ` [PATCH v5 12/34] perf evlist: Remove evlist__warn_hybrid_group Ian Rogers
2023-05-27 7:21 ` [PATCH v5 13/34] perf evlist: Remove __evlist__add_default Ian Rogers
2023-05-27 7:21 ` [PATCH v5 14/34] perf evlist: Reduce scope of evlist__has_hybrid Ian Rogers
2023-05-27 7:21 ` [PATCH v5 15/34] perf pmu: Remove perf_pmu__hybrid_mounted Ian Rogers
2023-05-27 7:21 ` [PATCH v5 16/34] perf pmu: Rewrite perf_pmu__has_hybrid to avoid list Ian Rogers
2023-05-27 7:21 ` [PATCH v5 17/34] perf x86: Iterate hybrid PMUs as core PMUs Ian Rogers
2023-05-27 7:21 ` [PATCH v5 18/34] perf topology: Avoid hybrid list for hybrid topology Ian Rogers
2023-05-27 7:21 ` [PATCH v5 19/34] perf evsel: Compute is_hybrid from PMU being core Ian Rogers
2023-05-27 7:21 ` [PATCH v5 20/34] perf header: Avoid hybrid PMU list in write_pmu_caps Ian Rogers
2023-05-27 7:21 ` [PATCH v5 21/34] perf metrics: Remove perf_pmu__is_hybrid use Ian Rogers
2023-05-27 7:21 ` [PATCH v5 22/34] perf stat: Avoid hybrid PMU list Ian Rogers
2023-05-27 7:21 ` [PATCH v5 23/34] perf mem: " Ian Rogers
2023-05-27 7:22 ` [PATCH v5 24/34] perf pmu: Remove perf_pmu__hybrid_pmus list Ian Rogers
2023-05-27 7:22 ` [PATCH v5 25/34] perf pmus: Prefer perf_pmu__scan over perf_pmus__for_each_pmu Ian Rogers
2023-05-27 7:22 ` [PATCH v5 26/34] perf x86 mem: minor refactor to is_mem_loads_aux_event Ian Rogers
2023-05-27 7:22 ` [PATCH v5 27/34] perf pmu: Separate pmu and pmus Ian Rogers
2023-06-02 5:29 ` [PATCH] perf test amd: Fix build failure with amd-ibs-via-core-pmu.c -- Was: " Ravi Bangoria
2023-06-02 6:42 ` Ian Rogers
2023-06-03 4:46 ` [PATCH v2] perf test amd: Fix build failure with amd-ibs-via-core-pmu.c Ravi Bangoria
2023-06-05 14:27 ` Arnaldo Carvalho de Melo
2023-06-06 3:12 ` Ravi Bangoria
2023-06-06 4:24 ` Stephen Rothwell
2023-06-07 0:56 ` Stephen Rothwell
2023-05-27 7:22 ` [PATCH v5 28/34] perf pmus: Split pmus list into core and other Ian Rogers
2023-06-09 3:59 ` Ravi Bangoria
2023-06-09 4:40 ` Ian Rogers
2023-06-09 5:30 ` Ravi Bangoria
2023-06-09 5:35 ` Ian Rogers
2023-06-09 5:55 ` Ravi Bangoria
2023-06-09 6:00 ` Ian Rogers
2023-06-09 6:02 ` Ravi Bangoria
2023-06-09 7:58 ` Mark Rutland
2023-06-11 3:55 ` Ian Rogers
2023-05-27 7:22 ` [PATCH v5 29/34] perf pmus: Allow just core PMU scanning Ian Rogers
2023-06-09 6:12 ` Ravi Bangoria
2023-05-27 7:22 ` [PATCH v5 30/34] perf pmus: Avoid repeated sysfs scanning Ian Rogers
2023-05-27 7:22 ` [PATCH v5 31/34] perf pmus: Ensure all PMUs are read for find_by_type Ian Rogers
2023-05-27 7:22 ` [PATCH v5 32/34] perf pmus: Add function to return count of core PMUs Ian Rogers
2023-05-27 7:22 ` [PATCH v5 33/34] perf pmus: Remove perf_pmus__has_hybrid Ian Rogers
2023-05-27 7:22 ` [PATCH v5 34/34] perf pmu: Remove is_pmu_hybrid Ian Rogers
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230527072210.2900565-2-irogers@google.com \
--to=irogers@google.com \
--cc=9erthalion6@gmail.com \
--cc=acme@kernel.org \
--cc=acme@redhat.com \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=alisaidi@amazon.com \
--cc=atrajeev@linux.vnet.ibm.com \
--cc=chenhuacai@kernel.org \
--cc=coresight@lists.linaro.org \
--cc=james.clark@arm.com \
--cc=john.g.garry@oracle.com \
--cc=jolsa@kernel.org \
--cc=kan.liang@linux.intel.com \
--cc=kjain@linux.ibm.com \
--cc=leo.yan@linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=maddy@linux.ibm.com \
--cc=mark.rutland@arm.com \
--cc=mike.leach@linaro.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=ravi.bangoria@amd.com \
--cc=renyu.zj@linux.alibaba.com \
--cc=robh@kernel.org \
--cc=sandipan.das@amd.com \
--cc=seanjc@google.com \
--cc=suzuki.poulose@arm.com \
--cc=tegongkang@gmail.com \
--cc=tmricht@linux.ibm.com \
--cc=wangming01@loongson.cn \
--cc=will@kernel.org \
--cc=zhengjun.xing@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).