From: Yafang Shao <laoar.shao@gmail.com>
To: ast@kernel.org, daniel@iogearbox.net, john.fastabend@gmail.com,
andrii@kernel.org, martin.lau@linux.dev, song@kernel.org,
yonghong.song@linux.dev, kpsingh@kernel.org, sdf@google.com,
haoluo@google.com, jolsa@kernel.org, tj@kernel.org,
lizefan.x@bytedance.com, hannes@cmpxchg.org
Cc: bpf@vger.kernel.org, cgroups@vger.kernel.org,
Yafang Shao <laoar.shao@gmail.com>
Subject: [PATCH bpf-next 4/4] selftests/bpf: Add selftests for cpumask iter
Date: Fri, 22 Dec 2023 11:31:02 +0000 [thread overview]
Message-ID: <20231222113102.4148-5-laoar.shao@gmail.com> (raw)
In-Reply-To: <20231222113102.4148-1-laoar.shao@gmail.com>
Within the BPF program, we leverage the cgroup iterator to iterate through
percpu runqueue data, specifically the 'nr_running' metric. Subsequently
we expose this data to userspace by means of a sequence file.
The CPU affinity for the cpumask is determined by the PID of a task:
- PID of the init task (PID 1)
We typically don't set CPU affinity for init task and thus we can iterate
across all possible CPUs. However, in scenarios where you've set CPU
affinity for the init task, you should set the cpumask of your current
task to full-F. Then proceed to iterate through all possible CPUs using
the current task.
- PID of a task with defined CPU affinity
The aim here is to iterate through a specific cpumask. This scenario
aligns with tasks residing within a cpuset cgroup.
- Invalid PID (e.g., PID -1)
No cpumask is available in this case.
The result as follows,
#62/1 cpumask_iter/init_pid:OK
#62/2 cpumask_iter/invalid_pid:OK
#62/3 cpumask_iter/self_pid_one_cpu:OK
#62/4 cpumask_iter/self_pid_multi_cpus:OK
#62 cpumask_iter:OK
Summary: 1/4 PASSED, 0 SKIPPED, 0 FAILED
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
.../selftests/bpf/prog_tests/cpumask_iter.c | 132 +++++++++++++++++++++
tools/testing/selftests/bpf/progs/cpumask_common.h | 4 +
.../selftests/bpf/progs/test_cpumask_iter.c | 50 ++++++++
3 files changed, 186 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/cpumask_iter.c
create mode 100644 tools/testing/selftests/bpf/progs/test_cpumask_iter.c
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask_iter.c b/tools/testing/selftests/bpf/prog_tests/cpumask_iter.c
new file mode 100644
index 0000000..40556cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask_iter.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "test_cpumask_iter.skel.h"
+
+static void verify_percpu_data(struct bpf_link *link, int nr_cpu_exp, int nr_running_exp)
+{
+ int iter_fd, len, item, nr_running, nr_cpus;
+ static char buf[128];
+ size_t left;
+ char *p;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "iter_fd"))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+ left = ARRAY_SIZE(buf);
+ p = buf;
+ while ((len = read(iter_fd, p, left)) > 0) {
+ p += len;
+ left -= len;
+ }
+
+ item = sscanf(buf, "nr_running %u nr_cpus %u\n", &nr_running, &nr_cpus);
+ if (nr_cpu_exp == -1) {
+ ASSERT_EQ(item, -1, "seq_format");
+ goto out;
+ }
+
+ ASSERT_EQ(item, 2, "seq_format");
+ ASSERT_GE(nr_running, nr_running_exp, "nr_running");
+ ASSERT_EQ(nr_cpus, nr_cpu_exp, "nr_cpus");
+
+ /* read() after iter finishes should be ok. */
+ if (len == 0)
+ ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
+
+out:
+ close(iter_fd);
+}
+
+void test_cpumask_iter(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ int nr_possible, cgrp_fd, pid, err, cnt, i;
+ struct test_cpumask_iter *skel = NULL;
+ union bpf_iter_link_info linfo;
+ int cpu_ids[] = {1, 3, 4, 5};
+ struct bpf_link *link;
+ cpu_set_t set;
+
+ skel = test_cpumask_iter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_for_each_cpu__open_and_load"))
+ return;
+
+ if (setup_cgroup_environment())
+ goto destroy;
+
+ /* Utilize the cgroup iter */
+ cgrp_fd = get_root_cgroup();
+ if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
+ goto cleanup;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = cgrp_fd;
+ linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.cpu_cgroup, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto close_fd;
+
+ skel->bss->target_pid = 1;
+ /* In case init task is set CPU affinity */
+ err = sched_getaffinity(1, sizeof(set), &set);
+ if (!ASSERT_OK(err, "setaffinity"))
+ goto close_fd;
+
+ cnt = CPU_COUNT(&set);
+ nr_possible = bpf_num_possible_cpus();
+ if (test__start_subtest("init_pid"))
+ /* curent task is running. */
+ verify_percpu_data(link, cnt, cnt == nr_possible ? 1 : 0);
+
+ skel->bss->target_pid = -1;
+ if (test__start_subtest("invalid_pid"))
+ verify_percpu_data(link, -1, -1);
+
+ pid = getpid();
+ skel->bss->target_pid = pid;
+ CPU_ZERO(&set);
+ CPU_SET(0, &set);
+ err = sched_setaffinity(pid, sizeof(set), &set);
+ if (!ASSERT_OK(err, "setaffinity"))
+ goto free_link;
+
+ if (test__start_subtest("self_pid_one_cpu"))
+ verify_percpu_data(link, 1, 1);
+
+ /* Assume there are at least 8 CPUs on the testbed */
+ if (nr_possible < 8)
+ goto free_link;
+
+ CPU_ZERO(&set);
+ /* Set the CPU affinitiy: 1,3-5 */
+ for (i = 0; i < ARRAY_SIZE(cpu_ids); i++)
+ CPU_SET(cpu_ids[i], &set);
+ err = sched_setaffinity(pid, sizeof(set), &set);
+ if (!ASSERT_OK(err, "setaffinity"))
+ goto free_link;
+
+ if (test__start_subtest("self_pid_multi_cpus"))
+ verify_percpu_data(link, ARRAY_SIZE(cpu_ids), 1);
+
+free_link:
+ bpf_link__destroy(link);
+close_fd:
+ close(cgrp_fd);
+cleanup:
+ cleanup_cgroup_environment();
+destroy:
+ test_cpumask_iter__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
index 0cd4aeb..5ebb136 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_common.h
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -55,6 +55,10 @@ void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym;
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym;
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
+u32 bpf_iter_cpumask_new(struct bpf_iter_cpumask *it, struct cpumask *mask) __ksym;
+u32 *bpf_iter_cpumask_next(struct bpf_iter_cpumask *it) __ksym;
+void bpf_iter_cpumask_destroy(struct bpf_iter_cpumask *it) __ksym;
+bool bpf_cpumask_set_from_pid(struct cpumask *cpumask, u32 pid) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
diff --git a/tools/testing/selftests/bpf/progs/test_cpumask_iter.c b/tools/testing/selftests/bpf/progs/test_cpumask_iter.c
new file mode 100644
index 0000000..d0cdb92
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cpumask_iter.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "cpumask_common.h"
+
+extern const struct rq runqueues __ksym __weak;
+
+int target_pid;
+
+SEC("iter/cgroup")
+int BPF_PROG(cpu_cgroup, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ u32 *cpu, nr_running = 0, nr_cpus = 0;
+ struct bpf_cpumask *mask;
+ struct rq *rq;
+ int ret;
+
+ /* epilogue */
+ if (cgrp == NULL)
+ return 0;
+
+ mask = bpf_cpumask_create();
+ if (!mask)
+ return 1;
+
+ ret = bpf_cpumask_set_from_pid(&mask->cpumask, target_pid);
+ if (ret == false) {
+ bpf_cpumask_release(mask);
+ return 1;
+ }
+
+ bpf_for_each(cpumask, cpu, &mask->cpumask) {
+ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, *cpu);
+ if (!rq)
+ continue;
+
+ nr_running += rq->nr_running;
+ nr_cpus += 1;
+ }
+ BPF_SEQ_PRINTF(meta->seq, "nr_running %u nr_cpus %u\n", nr_running, nr_cpus);
+
+ bpf_cpumask_release(mask);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
1.8.3.1
prev parent reply other threads:[~2023-12-22 11:31 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-22 11:30 [PATCH bpf-next 0/4] bpf: Add bpf_iter_cpumask Yafang Shao
2023-12-22 11:30 ` [PATCH bpf-next 1/4] cgroup, psi: Init PSI of root cgroup to psi_system Yafang Shao
2023-12-22 17:47 ` Tejun Heo
2023-12-24 3:14 ` Yafang Shao
2023-12-22 23:49 ` kernel test robot
2023-12-23 7:26 ` kernel test robot
2023-12-22 11:31 ` [PATCH bpf-next 2/4] bpf: Add bpf_iter_cpumask kfuncs Yafang Shao
2024-01-02 22:13 ` Andrii Nakryiko
2024-01-04 2:30 ` Yafang Shao
2023-12-22 11:31 ` [PATCH bpf-next 3/4] bpf: Add new kfunc bpf_cpumask_set_from_pid Yafang Shao
2023-12-22 17:51 ` Tejun Heo
2023-12-24 3:05 ` Yafang Shao
2023-12-22 11:31 ` Yafang Shao [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231222113102.4148-5-laoar.shao@gmail.com \
--to=laoar.shao@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cgroups@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=hannes@cmpxchg.org \
--cc=haoluo@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=lizefan.x@bytedance.com \
--cc=martin.lau@linux.dev \
--cc=sdf@google.com \
--cc=song@kernel.org \
--cc=tj@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox