public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Martin KaFai Lau <martin.lau@linux.dev>
To: "Alexis Lothoré (eBPF Foundation)" <alexis.lothore@bootlin.com>
Cc: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Eduard Zingerman <eddyz87@gmail.com>, Song Liu <song@kernel.org>,
	Yonghong Song <yonghong.song@linux.dev>,
	John Fastabend <john.fastabend@gmail.com>,
	KP Singh <kpsingh@kernel.org>,
	Stanislav Fomichev <sdf@fomichev.me>, Hao Luo <haoluo@google.com>,
	Jiri Olsa <jolsa@kernel.org>, Mykola Lysenko <mykolal@fb.com>,
	Shuah Khan <shuah@kernel.org>,
	ebpf@linuxfoundation.org,
	Thomas Petazzoni <thomas.petazzoni@bootlin.com>,
	linux-kernel@vger.kernel.org, bpf@vger.kernel.org,
	linux-kselftest@vger.kernel.org
Subject: Re: [PATCH bpf-next v2 4/4] selftests/bpf: convert test_skb_cgroup_id_user to test_progs
Date: Fri, 9 Aug 2024 16:53:36 -0700	[thread overview]
Message-ID: <5f91072d-fd63-4d81-8442-ef7aa62e192f@linux.dev> (raw)
In-Reply-To: <20240806-convert_cgroup_tests-v2-4-180c57e5b710@bootlin.com>

On 8/6/24 12:55 AM, Alexis Lothoré (eBPF Foundation) wrote:
> diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
> new file mode 100644
> index 000000000000..4e41463533c0
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
> @@ -0,0 +1,154 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include "test_progs.h"
> +#include "network_helpers.h"
> +#include "cgroup_helpers.h"
> +#include "cgroup_ancestor.skel.h"
> +
> +#define VETH_PREFIX "test_cgid_"
> +#define VETH_1 VETH_PREFIX "1"
> +#define VETH_2 VETH_PREFIX "2"
> +#define CGROUP_PATH "/skb_cgroup_test"
> +#define NUM_CGROUP_LEVELS 4
> +#define WAIT_AUTO_IP_MAX_ATTEMPT 10
> +#define DST_ADDR "ff02::1"
> +#define DST_PORT 1234
> +#define MAX_ASSERT_NAME 32
> +
> +struct test_data {
> +	struct cgroup_ancestor *skel;
> +	struct bpf_tc_hook qdisc;
> +	struct bpf_tc_opts tc_attach;
> +};
> +
> +static int send_datagram(void)
> +{
> +	unsigned char buf[] = "some random test data";
> +	struct sockaddr_in6 addr = { .sin6_family = AF_INET6,
> +				     .sin6_port = htons(DST_PORT),
> +				     .sin6_scope_id = if_nametoindex(VETH_1) };
> +	int sock, n;
> +
> +	if (!ASSERT_EQ(inet_pton(AF_INET6, DST_ADDR, &addr.sin6_addr), 1,
> +		       "inet_pton"))
> +		return -1;
> +
> +	sock = socket(AF_INET6, SOCK_DGRAM, 0);

sock is leaked.

> +	if (!ASSERT_OK_FD(sock, "create socket"))
> +		return sock;
> +
> +	n = sendto(sock, buf, sizeof(buf), 0, (const struct sockaddr *)&addr,
> +		   sizeof(addr));
> +	if (!ASSERT_EQ(n, sizeof(buf), "send data"))
> +		return n;
> +
> +	return 0;
> +}
> +
> +static int wait_local_ip(void)
> +{
> +	char *ping_cmd = ping_command(AF_INET6);
> +	int i, err;
> +
> +	for (i = 0; i < WAIT_AUTO_IP_MAX_ATTEMPT; i++) {
> +		err = SYS_NOFAIL("%s -c 1 -W 1 %s%%%s", ping_cmd, DST_ADDR,
> +				 VETH_1);
> +		if (!err)
> +			break;
> +	}
> +
> +	return err;
> +}
> +
> +static int setup_network(struct test_data *t)
> +{
> +	int ret;
> +
> +	SYS(fail, "ip link add dev %s type veth peer name %s", VETH_1, VETH_2);
> +	SYS(fail, "ip link set %s up", VETH_1);
> +	SYS(fail, "ip link set %s up", VETH_2);

Same. Do it under a new netns.

> +
> +	ret = wait_local_ip();
> +	if (!ASSERT_EQ(ret, 0, "wait local ip"))
> +		goto fail;
> +
> +	memset(&t->qdisc, 0, sizeof(t->qdisc));
> +	t->qdisc.sz = sizeof(t->qdisc);
> +	t->qdisc.attach_point = BPF_TC_EGRESS;
> +	t->qdisc.ifindex = if_nametoindex(VETH_1);
> +	if (!ASSERT_NEQ(t->qdisc.ifindex, 0, "if_nametoindex"))
> +		goto cleanup_interfaces;
> +	if (!ASSERT_OK(bpf_tc_hook_create(&t->qdisc), "qdisc add"))
> +		goto cleanup_interfaces;
> +
> +	memset(&t->tc_attach, 0, sizeof(t->tc_attach));
> +	t->tc_attach.sz = sizeof(t->tc_attach);
> +	t->tc_attach.prog_fd = bpf_program__fd(t->skel->progs.log_cgroup_id);
> +	if (!ASSERT_OK(bpf_tc_attach(&t->qdisc, &t->tc_attach), "filter add"))
> +		goto cleanup_qdisc;
> +
> +	return 0;
> +
> +cleanup_qdisc:
> +	bpf_tc_hook_destroy(&t->qdisc);
> +cleanup_interfaces:
> +	SYS_NOFAIL("ip link del %s", VETH_1);
> +fail:
> +	return 1;
> +}
> +
> +static void cleanup_network(struct test_data *t)
> +{
> +	bpf_tc_detach(&t->qdisc, &t->tc_attach);
> +	bpf_tc_hook_destroy(&t->qdisc);
> +	/* Deleting first interface will also delete peer interface */
> +	SYS_NOFAIL("ip link del %s", VETH_1);
> +}
> +
> +static void check_ancestors_ids(struct test_data *t)
> +{
> +	__u64 expected_ids[NUM_CGROUP_LEVELS];
> +	char assert_name[MAX_ASSERT_NAME];
> +	__u32 level;
> +
> +	expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
> +	expected_ids[1] = get_cgroup_id("");
> +	expected_ids[2] = get_cgroup_id(CGROUP_PATH);
> +	expected_ids[3] = 0; /* non-existent cgroup */
> +
> +	for (level = 0; level < NUM_CGROUP_LEVELS; level++) {
> +		snprintf(assert_name, MAX_ASSERT_NAME,
> +			 "ancestor id at level %d", level);
> +		ASSERT_EQ(t->skel->bss->cgroup_ids[level], expected_ids[level],
> +			  assert_name);
> +	}
> +}
> +
> +void test_cgroup_ancestor(void)
> +{
> +	struct test_data t;
> +	int cgroup_fd;
> +
> +	t.skel = cgroup_ancestor__open_and_load();
> +	if (!ASSERT_OK_PTR(t.skel, "open and load"))
> +		return;
> +
> +	if (setup_network(&t))
> +		goto cleanup_progs;
> +
> +	cgroup_fd = cgroup_setup_and_join(CGROUP_PATH);

cgroup_fd is leaked.

Thanks for working on this.

> +	if (cgroup_fd < 0)
> +		goto cleanup_network;
> +
> +	if (send_datagram())
> +		goto cleanup_cgroups;
> +
> +	check_ancestors_ids(&t);
> +
> +cleanup_cgroups:
> +	cleanup_cgroup_environment();
> +cleanup_network:
> +	cleanup_network(&t);
> +cleanup_progs:
> +	cgroup_ancestor__destroy(t.skel);
> +}

  reply	other threads:[~2024-08-09 23:53 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-08-06  7:55 [PATCH bpf-next v2 0/4] selftests/bpf: convert three other cgroup tests to test_progs Alexis Lothoré (eBPF Foundation)
2024-08-06  7:55 ` [PATCH bpf-next v2 1/4] selftests/bpf: convert get_current_cgroup_id_user " Alexis Lothoré (eBPF Foundation)
2024-08-09 23:23   ` Martin KaFai Lau
2024-08-06  7:55 ` [PATCH bpf-next v2 2/4] selftests/bpf: convert test_cgroup_storage " Alexis Lothoré (eBPF Foundation)
2024-08-09 23:49   ` Martin KaFai Lau
2024-08-06  7:55 ` [PATCH bpf-next v2 3/4] selftests/bpf: add proper section name to bpf prog and rename it Alexis Lothoré (eBPF Foundation)
2024-08-06  7:55 ` [PATCH bpf-next v2 4/4] selftests/bpf: convert test_skb_cgroup_id_user to test_progs Alexis Lothoré (eBPF Foundation)
2024-08-09 23:53   ` Martin KaFai Lau [this message]
2024-08-12  7:22     ` Alexis Lothoré

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5f91072d-fd63-4d81-8442-ef7aa62e192f@linux.dev \
    --to=martin.lau@linux.dev \
    --cc=alexis.lothore@bootlin.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=ebpf@linuxfoundation.org \
    --cc=eddyz87@gmail.com \
    --cc=haoluo@google.com \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kpsingh@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=mykolal@fb.com \
    --cc=sdf@fomichev.me \
    --cc=shuah@kernel.org \
    --cc=song@kernel.org \
    --cc=thomas.petazzoni@bootlin.com \
    --cc=yonghong.song@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox