* [PATCH bpf-next] selftests/bpf: Add additional mprog query test coverage
@ 2023-10-17 8:17 Daniel Borkmann
2023-10-17 9:09 ` Alan Maguire
2023-10-17 20:00 ` patchwork-bot+netdevbpf
0 siblings, 2 replies; 3+ messages in thread
From: Daniel Borkmann @ 2023-10-17 8:17 UTC (permalink / raw)
To: bpf; +Cc: martin.lau, Daniel Borkmann
Add several new test cases which assert corner cases on the mprog query
mechanism, for example, around passing in a too small or a larger array
than the current count.
./test_progs -t tc_opts
#252 tc_opts_after:OK
#253 tc_opts_append:OK
#254 tc_opts_basic:OK
#255 tc_opts_before:OK
#256 tc_opts_chain_classic:OK
#257 tc_opts_chain_mixed:OK
#258 tc_opts_delete_empty:OK
#259 tc_opts_demixed:OK
#260 tc_opts_detach:OK
#261 tc_opts_detach_after:OK
#262 tc_opts_detach_before:OK
#263 tc_opts_dev_cleanup:OK
#264 tc_opts_invalid:OK
#265 tc_opts_max:OK
#266 tc_opts_mixed:OK
#267 tc_opts_prepend:OK
#268 tc_opts_query:OK
#269 tc_opts_query_attach:OK
#270 tc_opts_replace:OK
#271 tc_opts_revision:OK
Summary: 20/0 PASSED, 0 SKIPPED, 0 FAILED
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
.../selftests/bpf/prog_tests/tc_opts.c | 131 +++++++++++++++++-
1 file changed, 130 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
index ca506d2fcf58..51883ccb8020 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -2471,7 +2471,7 @@ static void test_tc_opts_query_target(int target)
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
union bpf_attr attr;
- __u32 prog_ids[5];
+ __u32 prog_ids[10];
int err;
skel = test_tc_link__open_and_load();
@@ -2599,6 +2599,135 @@ static void test_tc_opts_query_target(int target)
ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+ /* Test 3: Query with smaller prog_ids array */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.count = 2;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, ENOSPC, "prog_query_should_fail");
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 4: Query with larger prog_ids array */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.count = 10;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 5: Query with NULL prog_ids array but with count > 0 */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.count = sizeof(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 6: Query with non-NULL prog_ids array but with count == 0 */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 7: Query with invalid flags */
+ attr.query.attach_flags = 0;
+ attr.query.query_flags = 1;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
+
+ attr.query.attach_flags = 1;
+ attr.query.query_flags = 0;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
+
cleanup4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
--
2.34.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH bpf-next] selftests/bpf: Add additional mprog query test coverage
2023-10-17 8:17 [PATCH bpf-next] selftests/bpf: Add additional mprog query test coverage Daniel Borkmann
@ 2023-10-17 9:09 ` Alan Maguire
2023-10-17 20:00 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 3+ messages in thread
From: Alan Maguire @ 2023-10-17 9:09 UTC (permalink / raw)
To: Daniel Borkmann, bpf; +Cc: martin.lau
On 17/10/2023 09:17, Daniel Borkmann wrote:
> Add several new test cases which assert corner cases on the mprog query
> mechanism, for example, around passing in a too small or a larger array
> than the current count.
>
> ./test_progs -t tc_opts
> #252 tc_opts_after:OK
> #253 tc_opts_append:OK
> #254 tc_opts_basic:OK
> #255 tc_opts_before:OK
> #256 tc_opts_chain_classic:OK
> #257 tc_opts_chain_mixed:OK
> #258 tc_opts_delete_empty:OK
> #259 tc_opts_demixed:OK
> #260 tc_opts_detach:OK
> #261 tc_opts_detach_after:OK
> #262 tc_opts_detach_before:OK
> #263 tc_opts_dev_cleanup:OK
> #264 tc_opts_invalid:OK
> #265 tc_opts_max:OK
> #266 tc_opts_mixed:OK
> #267 tc_opts_prepend:OK
> #268 tc_opts_query:OK
> #269 tc_opts_query_attach:OK
> #270 tc_opts_replace:OK
> #271 tc_opts_revision:OK
> Summary: 20/0 PASSED, 0 SKIPPED, 0 FAILED
>
> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Looks like it does a great job of exercising the codepaths in
bpf_mprog_query()!
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
> ---
> .../selftests/bpf/prog_tests/tc_opts.c | 131 +++++++++++++++++-
> 1 file changed, 130 insertions(+), 1 deletion(-)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
> index ca506d2fcf58..51883ccb8020 100644
> --- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
> +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
> @@ -2471,7 +2471,7 @@ static void test_tc_opts_query_target(int target)
> __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
> struct test_tc_link *skel;
> union bpf_attr attr;
> - __u32 prog_ids[5];
> + __u32 prog_ids[10];
> int err;
>
> skel = test_tc_link__open_and_load();
> @@ -2599,6 +2599,135 @@ static void test_tc_opts_query_target(int target)
> ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
> ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
>
> + /* Test 3: Query with smaller prog_ids array */
> + memset(&attr, 0, attr_size);
> + attr.query.target_ifindex = loopback;
> + attr.query.attach_type = target;
> +
> + memset(prog_ids, 0, sizeof(prog_ids));
> + attr.query.prog_ids = ptr_to_u64(prog_ids);
> + attr.query.count = 2;
> +
> + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
> + ASSERT_EQ(err, -1, "prog_query_should_fail");
> + ASSERT_EQ(errno, ENOSPC, "prog_query_should_fail");
> +
> + ASSERT_EQ(attr.query.count, 4, "count");
> + ASSERT_EQ(attr.query.revision, 5, "revision");
> + ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
> + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
> + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
> + ASSERT_EQ(attr.query.attach_type, target, "attach_type");
> + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
> + ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
> + ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
> + ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
> + ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
> + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
> + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
> + ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
> + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
> +
> + /* Test 4: Query with larger prog_ids array */
> + memset(&attr, 0, attr_size);
> + attr.query.target_ifindex = loopback;
> + attr.query.attach_type = target;
> +
> + memset(prog_ids, 0, sizeof(prog_ids));
> + attr.query.prog_ids = ptr_to_u64(prog_ids);
> + attr.query.count = 10;
> +
> + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
> + if (!ASSERT_OK(err, "prog_query"))
> + goto cleanup4;
> +
> + ASSERT_EQ(attr.query.count, 4, "count");
> + ASSERT_EQ(attr.query.revision, 5, "revision");
> + ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
> + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
> + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
> + ASSERT_EQ(attr.query.attach_type, target, "attach_type");
> + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
> + ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
> + ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
> + ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
> + ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
> + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
> + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
> + ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
> + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
> +
> + /* Test 5: Query with NULL prog_ids array but with count > 0 */
> + memset(&attr, 0, attr_size);
> + attr.query.target_ifindex = loopback;
> + attr.query.attach_type = target;
> +
> + memset(prog_ids, 0, sizeof(prog_ids));
> + attr.query.count = sizeof(prog_ids);
> +
> + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
> + if (!ASSERT_OK(err, "prog_query"))
> + goto cleanup4;
> +
> + ASSERT_EQ(attr.query.count, 4, "count");
> + ASSERT_EQ(attr.query.revision, 5, "revision");
> + ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
> + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
> + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
> + ASSERT_EQ(attr.query.attach_type, target, "attach_type");
> + ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
> + ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
> + ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
> + ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
> + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
> + ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
> + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
> + ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
> + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
> +
> + /* Test 6: Query with non-NULL prog_ids array but with count == 0 */
> + memset(&attr, 0, attr_size);
> + attr.query.target_ifindex = loopback;
> + attr.query.attach_type = target;
> +
> + memset(prog_ids, 0, sizeof(prog_ids));
> + attr.query.prog_ids = ptr_to_u64(prog_ids);
> +
> + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
> + if (!ASSERT_OK(err, "prog_query"))
> + goto cleanup4;
> +
> + ASSERT_EQ(attr.query.count, 4, "count");
> + ASSERT_EQ(attr.query.revision, 5, "revision");
> + ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
> + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
> + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
> + ASSERT_EQ(attr.query.attach_type, target, "attach_type");
> + ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
> + ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
> + ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
> + ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
> + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
> + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
> + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
> + ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
> + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
> +
> + /* Test 7: Query with invalid flags */
> + attr.query.attach_flags = 0;
> + attr.query.query_flags = 1;
> +
> + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
> + ASSERT_EQ(err, -1, "prog_query_should_fail");
> + ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
> +
> + attr.query.attach_flags = 1;
> + attr.query.query_flags = 0;
> +
> + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
> + ASSERT_EQ(err, -1, "prog_query_should_fail");
> + ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
> +
> cleanup4:
> err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
> ASSERT_OK(err, "prog_detach");
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH bpf-next] selftests/bpf: Add additional mprog query test coverage
2023-10-17 8:17 [PATCH bpf-next] selftests/bpf: Add additional mprog query test coverage Daniel Borkmann
2023-10-17 9:09 ` Alan Maguire
@ 2023-10-17 20:00 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 3+ messages in thread
From: patchwork-bot+netdevbpf @ 2023-10-17 20:00 UTC (permalink / raw)
To: Daniel Borkmann; +Cc: bpf, martin.lau
Hello:
This patch was applied to bpf/bpf-next.git (master)
by Andrii Nakryiko <andrii@kernel.org>:
On Tue, 17 Oct 2023 10:17:28 +0200 you wrote:
> Add several new test cases which assert corner cases on the mprog query
> mechanism, for example, around passing in a too small or a larger array
> than the current count.
>
> ./test_progs -t tc_opts
> #252 tc_opts_after:OK
> #253 tc_opts_append:OK
> #254 tc_opts_basic:OK
> #255 tc_opts_before:OK
> #256 tc_opts_chain_classic:OK
> #257 tc_opts_chain_mixed:OK
> #258 tc_opts_delete_empty:OK
> #259 tc_opts_demixed:OK
> #260 tc_opts_detach:OK
> #261 tc_opts_detach_after:OK
> #262 tc_opts_detach_before:OK
> #263 tc_opts_dev_cleanup:OK
> #264 tc_opts_invalid:OK
> #265 tc_opts_max:OK
> #266 tc_opts_mixed:OK
> #267 tc_opts_prepend:OK
> #268 tc_opts_query:OK
> #269 tc_opts_query_attach:OK
> #270 tc_opts_replace:OK
> #271 tc_opts_revision:OK
> Summary: 20/0 PASSED, 0 SKIPPED, 0 FAILED
>
> [...]
Here is the summary with links:
- [bpf-next] selftests/bpf: Add additional mprog query test coverage
https://git.kernel.org/bpf/bpf-next/c/24516309e330
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-10-17 20:00 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-10-17 8:17 [PATCH bpf-next] selftests/bpf: Add additional mprog query test coverage Daniel Borkmann
2023-10-17 9:09 ` Alan Maguire
2023-10-17 20:00 ` patchwork-bot+netdevbpf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox