From: Namhyung Kim <namhyung@kernel.org>
To: Arnaldo Carvalho de Melo <acme@kernel.org>, Jiri Olsa <jolsa@kernel.org>
Cc: Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@kernel.org>,
LKML <linux-kernel@vger.kernel.org>,
linux-perf-users@vger.kernel.org, Song Liu <song@kernel.org>,
Hao Luo <haoluo@google.com>,
bpf@vger.kernel.org
Subject: [PATCH 2/5] perf lock contention: Prepare to handle cgroups
Date: Wed, 6 Sep 2023 10:49:00 -0700 [thread overview]
Message-ID: <20230906174903.346486-3-namhyung@kernel.org> (raw)
In-Reply-To: <20230906174903.346486-1-namhyung@kernel.org>
Save cgroup info and display cgroup names if requested. This is a
preparation for the next patch.
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/builtin-lock.c | 3 ++-
tools/perf/util/bpf_lock_contention.c | 26 +++++++++++++++++++++++++-
tools/perf/util/lock-contention.h | 9 +++++++--
3 files changed, 34 insertions(+), 4 deletions(-)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b141f2134274..06430980dfd7 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -2040,6 +2040,7 @@ static int __cmd_contention(int argc, const char **argv)
.filters = &filters,
.save_callstack = needs_callstack(),
.owner = show_lock_owner,
+ .cgroups = RB_ROOT,
};
lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
@@ -2158,7 +2159,7 @@ static int __cmd_contention(int argc, const char **argv)
out_delete:
lock_filter_finish();
evlist__delete(con.evlist);
- lock_contention_finish();
+ lock_contention_finish(&con);
perf_session__delete(session);
zfree(&lockhash_table);
return err;
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index e7dddf0127bc..c6bd7c9b2d57 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "util/cgroup.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/machine.h"
@@ -151,6 +152,10 @@ int lock_contention_prepare(struct lock_contention *con)
skel->bss->needs_callstack = con->save_callstack;
skel->bss->lock_owner = con->owner;
+ if (con->use_cgroup) {
+ read_all_cgroups(&con->cgroups);
+ }
+
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
lock_contention_bpf__attach(skel);
@@ -222,6 +227,17 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "";
}
+ if (con->use_cgroup) {
+ u64 cgrp_id = key->lock_addr;
+ struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
+
+ if (cgrp)
+ return cgrp->name;
+
+ snprintf(name_buf, sizeof(name_buf), "cgroup:%lu", cgrp_id);
+ return name_buf;
+ }
+
/* LOCK_AGGR_CALLER: skip lock internal functions */
while (machine__is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
@@ -364,12 +380,20 @@ int lock_contention_read(struct lock_contention *con)
return err;
}
-int lock_contention_finish(void)
+int lock_contention_finish(struct lock_contention *con)
{
if (skel) {
skel->bss->enabled = 0;
lock_contention_bpf__destroy(skel);
}
+ while (!RB_EMPTY_ROOT(&con->cgroups)) {
+ struct rb_node *node = rb_first(&con->cgroups);
+ struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
+
+ rb_erase(node, &con->cgroups);
+ cgroup__put(cgrp);
+ }
+
return 0;
}
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index fa16532c971c..70423966d778 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -136,6 +136,7 @@ struct lock_contention {
struct hlist_head *result;
struct lock_filter *filters;
struct lock_contention_fails fails;
+ struct rb_root cgroups;
unsigned long map_nr_entries;
int max_stack;
int stack_skip;
@@ -143,6 +144,7 @@ struct lock_contention {
int owner;
int nr_filtered;
bool save_callstack;
+ bool use_cgroup;
};
#ifdef HAVE_BPF_SKEL
@@ -151,7 +153,7 @@ int lock_contention_prepare(struct lock_contention *con);
int lock_contention_start(void);
int lock_contention_stop(void);
int lock_contention_read(struct lock_contention *con);
-int lock_contention_finish(void);
+int lock_contention_finish(struct lock_contention *con);
#else /* !HAVE_BPF_SKEL */
@@ -162,7 +164,10 @@ static inline int lock_contention_prepare(struct lock_contention *con __maybe_un
static inline int lock_contention_start(void) { return 0; }
static inline int lock_contention_stop(void) { return 0; }
-static inline int lock_contention_finish(void) { return 0; }
+static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
+{
+ return 0;
+}
static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
{
--
2.42.0.283.g2d96d420d3-goog
next prev parent reply other threads:[~2023-09-06 17:49 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-06 17:48 [PATCHSET 0/5] perf lock contention: Add cgroup support (v2) Namhyung Kim
2023-09-06 17:48 ` [PATCH 1/5] perf tools: Add read_all_cgroups() and __cgroup_find() Namhyung Kim
2023-09-06 17:49 ` Namhyung Kim [this message]
2023-09-06 17:49 ` [PATCH 3/5] perf lock contention: Add -g/--lock-cgroup option Namhyung Kim
2023-09-06 19:23 ` Arnaldo Carvalho de Melo
2023-09-06 19:29 ` Arnaldo Carvalho de Melo
2023-09-06 17:49 ` [PATCH 4/5] perf lock contention: Add -G/--cgroup-filter option Namhyung Kim
2023-09-06 17:49 ` [PATCH 5/5] perf test: Improve perf lock contention test Namhyung Kim
2023-09-06 21:28 ` [PATCHSET 0/5] perf lock contention: Add cgroup support (v2) Arnaldo Carvalho de Melo
-- strict thread matches above, loose matches on Subject: below --
2023-08-30 23:01 [PATCHSET 0/5] perf lock contention: Add cgroup support (v1) Namhyung Kim
2023-08-30 23:01 ` [PATCH 2/5] perf lock contention: Prepare to handle cgroups Namhyung Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230906174903.346486-3-namhyung@kernel.org \
--to=namhyung@kernel.org \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=bpf@vger.kernel.org \
--cc=haoluo@google.com \
--cc=irogers@google.com \
--cc=jolsa@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=song@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).