linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Yang Jihong <yangjihong1@huawei.com>
To: <peterz@infradead.org>, <mingo@redhat.com>, <acme@kernel.org>,
	<mark.rutland@arm.com>, <alexander.shishkin@linux.intel.com>,
	<jolsa@kernel.org>, <namhyung@kernel.org>, <irogers@google.com>,
	<adrian.hunter@intel.com>, <kan.liang@linux.intel.com>,
	<sandipan.das@amd.com>, <ravi.bangoria@amd.com>,
	<linux-kernel@vger.kernel.org>,
	<linux-perf-users@vger.kernel.org>
Cc: <yangjihong1@huawei.com>
Subject: [RFC v1 15/16] perf kwork top: Add BPF-based statistics on hardirq event support
Date: Sat, 12 Aug 2023 08:49:16 +0000	[thread overview]
Message-ID: <20230812084917.169338-16-yangjihong1@huawei.com> (raw)
In-Reply-To: <20230812084917.169338-1-yangjihong1@huawei.com>

Use bpf to collect statistics on hardirq events based on perf BPF skeletons.

Example usage:

  # perf kwork top -k sched,irq -b
  Starting trace, Hit <Ctrl+C> to stop and report
  ^C
  Total  : 136717.945 ms, 8 cpus
  %Cpu(s):  17.10% id,   0.01% hi,   0.00% si
  %Cpu0   [|||||||||||||||||||||||||       84.26%]
  %Cpu1   [|||||||||||||||||||||||||       84.77%]
  %Cpu2   [||||||||||||||||||||||||        83.22%]
  %Cpu3   [||||||||||||||||||||||||        80.37%]
  %Cpu4   [||||||||||||||||||||||||        81.49%]
  %Cpu5   [|||||||||||||||||||||||||       84.68%]
  %Cpu6   [|||||||||||||||||||||||||       84.48%]
  %Cpu7   [||||||||||||||||||||||||        80.21%]

        PID     SPID    %CPU           RUNTIME  COMMMAND
    -------------------------------------------------------------
          0        0   19.78       3482.833 ms  [swapper/7]
          0        0   19.62       3454.219 ms  [swapper/3]
          0        0   18.50       3258.339 ms  [swapper/4]
          0        0   16.76       2842.749 ms  [swapper/2]
          0        0   15.71       2627.905 ms  [swapper/0]
          0        0   15.51       2598.206 ms  [swapper/6]
          0        0   15.31       2561.820 ms  [swapper/5]
          0        0   15.22       2548.708 ms  [swapper/1]
      13253    13018    2.95        513.108 ms  sched-messaging
      13092    13018    2.67        454.167 ms  sched-messaging
      13401    13018    2.66        454.790 ms  sched-messaging
      13240    13018    2.64        454.587 ms  sched-messaging
      13251    13018    2.61        442.273 ms  sched-messaging
      13075    13018    2.61        438.932 ms  sched-messaging
      13220    13018    2.60        443.245 ms  sched-messaging
      13235    13018    2.59        443.268 ms  sched-messaging
      13222    13018    2.50        426.344 ms  sched-messaging
      13410    13018    2.49        426.191 ms  sched-messaging
      13228    13018    2.46        425.121 ms  sched-messaging
      13379    13018    2.38        409.950 ms  sched-messaging
      13236    13018    2.37        413.159 ms  sched-messaging
      13095    13018    2.36        396.572 ms  sched-messaging
      13325    13018    2.35        408.089 ms  sched-messaging
      13242    13018    2.32        394.750 ms  sched-messaging
      13386    13018    2.31        396.997 ms  sched-messaging
      13046    13018    2.29        383.833 ms  sched-messaging
      13109    13018    2.28        388.482 ms  sched-messaging
      13388    13018    2.28        393.576 ms  sched-messaging
      13238    13018    2.26        388.487 ms  sched-messaging
  <SNIP>

Signed-off-by: Yang Jihong <yangjihong1@huawei.com>
---
 tools/perf/util/bpf_kwork_top.c          | 11 ++++
 tools/perf/util/bpf_skel/kwork_top.bpf.c | 79 ++++++++++++++++++++++++
 2 files changed, 90 insertions(+)

diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
index 42897ea22c61..3998bd2a938f 100644
--- a/tools/perf/util/bpf_kwork_top.c
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -79,6 +79,16 @@ void perf_kwork__top_finish(void)
 	pr_debug("perf kwork top finish at: %lld\n", skel->bss->to_timestamp);
 }
 
+static void irq_load_prepare(void)
+{
+	bpf_program__set_autoload(skel->progs.on_irq_handler_entry, true);
+	bpf_program__set_autoload(skel->progs.on_irq_handler_exit, true);
+}
+
+static struct kwork_class_bpf kwork_irq_bpf = {
+	.load_prepare = irq_load_prepare,
+};
+
 static void sched_load_prepare(void)
 {
 	bpf_program__set_autoload(skel->progs.on_switch, true);
@@ -90,6 +100,7 @@ static struct kwork_class_bpf kwork_sched_bpf = {
 
 static struct kwork_class_bpf *
 kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
+	[KWORK_CLASS_IRQ]	= &kwork_irq_bpf,
 	[KWORK_CLASS_SCHED]	= &kwork_sched_bpf,
 };
 
diff --git a/tools/perf/util/bpf_skel/kwork_top.bpf.c b/tools/perf/util/bpf_skel/kwork_top.bpf.c
index 47ad61608ec7..9c7dc62386c7 100644
--- a/tools/perf/util/bpf_skel/kwork_top.bpf.c
+++ b/tools/perf/util/bpf_skel/kwork_top.bpf.c
@@ -54,6 +54,13 @@ struct {
 	__type(value, struct time_data);
 } kwork_top_task_time SEC(".maps");
 
+struct {
+	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+	__uint(key_size, sizeof(struct work_key));
+	__uint(value_size, sizeof(struct time_data));
+	__uint(max_entries, MAX_ENTRIES);
+} kwork_top_irq_time SEC(".maps");
+
 struct {
 	__uint(type, BPF_MAP_TYPE_HASH);
 	__uint(key_size, sizeof(struct task_key));
@@ -184,4 +191,76 @@ int on_switch(u64 *ctx)
 	return 0;
 }
 
+SEC("tp_btf/irq_handler_entry")
+int on_irq_handler_entry(u64 *cxt)
+{
+	struct task_struct *task;
+
+	if (!enabled)
+		return 0;
+
+	__u32 cpu = bpf_get_smp_processor_id();
+
+	if (cpu_is_filtered(cpu))
+		return 0;
+
+	__u64 ts = bpf_ktime_get_ns();
+
+	task = (struct task_struct *)bpf_get_current_task();
+	if (!task)
+		return 0;
+
+	struct work_key key = {
+		.type = KWORK_CLASS_IRQ,
+		.pid = BPF_CORE_READ(task, pid),
+		.task_p = (__u64)task,
+	};
+
+	struct time_data data = {
+		.timestamp = ts,
+	};
+
+	bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
+
+	return 0;
+}
+
+SEC("tp_btf/irq_handler_exit")
+int on_irq_handler_exit(u64 *cxt)
+{
+	__u64 delta;
+	struct task_struct *task;
+	struct time_data *pelem;
+
+	if (!enabled)
+		return 0;
+
+	__u32 cpu = bpf_get_smp_processor_id();
+
+	if (cpu_is_filtered(cpu))
+		return 0;
+
+	__u64 ts = bpf_ktime_get_ns();
+
+	task = (struct task_struct *)bpf_get_current_task();
+	if (!task)
+		return 0;
+
+	struct work_key key = {
+		.type = KWORK_CLASS_IRQ,
+		.pid = BPF_CORE_READ(task, pid),
+		.task_p = (__u64)task,
+	};
+
+	pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
+	if (pelem && pelem->timestamp != 0)
+		delta = ts - pelem->timestamp;
+	else
+		delta = ts - from_timestamp;
+
+	update_work(&key, delta);
+
+	return 0;
+}
+
 char LICENSE[] SEC("license") = "Dual BSD/GPL";
-- 
2.30.GIT


  parent reply	other threads:[~2023-08-12  8:52 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-12  8:49 [RFC v1 00/16] perf kwork: Implement perf kwork top Yang Jihong
2023-08-12  8:49 ` [RFC v1 01/16] perf kwork: Fix incorrect and missing free atom in work_push_atom() Yang Jihong
2023-09-04  4:05   ` Ian Rogers
2023-09-04 11:42     ` Yang Jihong
2023-09-06 16:43     ` Arnaldo Carvalho de Melo
2023-08-12  8:49 ` [RFC v1 02/16] perf kwork: Add the supported subcommands to the document Yang Jihong
2023-08-12  8:49 ` [RFC v1 03/16] perf kwork: Set ordered_events for perf_tool Yang Jihong
2023-08-12  8:49 ` [RFC v1 04/16] perf kwork: Add `kwork` and `src_type` to work_init() for struct kwork_class Yang Jihong
2023-08-12  8:49 ` [RFC v1 05/16] perf kwork: Overwrite original atom in the list when a new atom is pushed Yang Jihong
2023-09-04  4:13   ` Ian Rogers
2023-09-04 11:46     ` Yang Jihong
2023-08-12  8:49 ` [RFC v1 06/16] perf kwork: Set default events list if not specified in setup_event_list() Yang Jihong
2023-08-12  8:49 ` [RFC v1 07/16] perf kwork: Add sched record support Yang Jihong
2023-08-12  8:49 ` [RFC v1 08/16] perf kwork: Add `root` parameter to work_sort() Yang Jihong
2023-08-12  8:49 ` [RFC v1 09/16] perf kwork: Implement perf kwork top Yang Jihong
2023-08-12  8:49 ` [RFC v1 10/16] perf evsel: Add evsel__intval_common() helper Yang Jihong
2023-08-12  8:49 ` [RFC v1 11/16] perf kwork top: Add statistics on hardirq event support Yang Jihong
2023-08-12  8:49 ` [RFC v1 12/16] perf kwork top: Add statistics on softirq " Yang Jihong
2023-08-12  8:49 ` [RFC v1 13/16] perf kwork top: Add -C/--cpu -i/--input -n/--name -s/--sort --time options Yang Jihong
2023-08-12  8:49 ` [RFC v1 14/16] perf kwork top: Implements BPF-based cpu usage statistics Yang Jihong
2023-08-12  8:49 ` Yang Jihong [this message]
2023-08-12  8:49 ` [RFC v1 16/16] perf kwork top: Add BPF-based statistics on softirq event support Yang Jihong
2023-09-04  5:13 ` [RFC v1 00/16] perf kwork: Implement perf kwork top Ian Rogers
2023-09-04 11:59   ` Yang Jihong
2023-09-04 14:54     ` Ian Rogers
2023-09-05  1:01       ` Yang Jihong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230812084917.169338-16-yangjihong1@huawei.com \
    --to=yangjihong1@huawei.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.bangoria@amd.com \
    --cc=sandipan.das@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).