* [PATCH bpf-next v4 1/3] selftests/bpf: Remove kmem subtest from cgroup_iter_memcg
2026-02-28 9:25 [PATCH bpf-next v4 0/3] Fix test_cgroup_iter_memcg issues found during back-porting Hui Zhu
@ 2026-02-28 9:25 ` Hui Zhu
2026-03-02 18:08 ` JP Kobryn (Meta)
2026-02-28 9:25 ` [PATCH bpf-next v4 2/3] bpf: Use bpf_core_enum_value for stats in cgroup_iter_memcg Hui Zhu
2026-02-28 9:25 ` [PATCH bpf-next v4 3/3] selftests/bpf: Check bpf_mem_cgroup_page_state return value Hui Zhu
2 siblings, 1 reply; 7+ messages in thread
From: Hui Zhu @ 2026-02-28 9:25 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, JP Kobryn, Roman Gushchin, bpf, linux-kselftest,
linux-kernel
Cc: Hui Zhu
From: Hui Zhu <zhuhui@kylinos.cn>
When cgroup.memory=nokmem is set in the kernel command line, kmem
accounting is disabled. This causes the test_kmem subtest in
cgroup_iter_memcg to fail because it expects non-zero kmem values.
Remove the kmem subtest altogether since the remaining subtests
(shmem, file, pgfault) already provide sufficient coverage for
the cgroup iter memcg functionality.
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
.../testing/selftests/bpf/cgroup_iter_memcg.h | 2 --
.../bpf/prog_tests/cgroup_iter_memcg.c | 28 -------------------
.../selftests/bpf/progs/cgroup_iter_memcg.c | 1 -
3 files changed, 31 deletions(-)
diff --git a/tools/testing/selftests/bpf/cgroup_iter_memcg.h b/tools/testing/selftests/bpf/cgroup_iter_memcg.h
index 3f59b127943b..ff20ec537164 100644
--- a/tools/testing/selftests/bpf/cgroup_iter_memcg.h
+++ b/tools/testing/selftests/bpf/cgroup_iter_memcg.h
@@ -9,8 +9,6 @@ struct memcg_query {
unsigned long nr_shmem;
unsigned long nr_file_pages;
unsigned long nr_file_mapped;
- /* some memcg_stat_item */
- unsigned long memcg_kmem;
/* some vm_event_item */
unsigned long pgfault;
};
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
index a5afd16705f0..88fc3e83d2b7 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
@@ -126,32 +126,6 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
shm_unlink("/tmp_shmem");
}
-#define NR_PIPES 64
-static void test_kmem(struct bpf_link *link, struct memcg_query *memcg_query)
-{
- int fds[NR_PIPES][2], i;
-
- /*
- * Increase kmem value by creating pipes which will allocate some
- * kernel buffers.
- */
- for (i = 0; i < NR_PIPES; i++) {
- if (!ASSERT_OK(pipe(fds[i]), "pipe"))
- goto cleanup;
- }
-
- if (!ASSERT_OK(read_stats(link), "read stats"))
- goto cleanup;
-
- ASSERT_GT(memcg_query->memcg_kmem, 0, "kmem value");
-
-cleanup:
- for (i = i - 1; i >= 0; i--) {
- close(fds[i][0]);
- close(fds[i][1]);
- }
-}
-
static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
{
void *map;
@@ -209,8 +183,6 @@ void test_cgroup_iter_memcg(void)
test_shmem(link, &skel->data_query->memcg_query);
if (test__start_subtest("cgroup_iter_memcg__file"))
test_file(link, &skel->data_query->memcg_query);
- if (test__start_subtest("cgroup_iter_memcg__kmem"))
- test_kmem(link, &skel->data_query->memcg_query);
if (test__start_subtest("cgroup_iter_memcg__pgfault"))
test_pgfault(link, &skel->data_query->memcg_query);
diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
index 59fb70a3cc50..12f79a44133e 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
@@ -30,7 +30,6 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx)
memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM);
memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES);
memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED);
- memcg_query.memcg_kmem = bpf_mem_cgroup_page_state(memcg, MEMCG_KMEM);
memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT);
bpf_put_mem_cgroup(memcg);
--
2.43.0
^ permalink raw reply related [flat|nested] 7+ messages in thread* Re: [PATCH bpf-next v4 1/3] selftests/bpf: Remove kmem subtest from cgroup_iter_memcg
2026-02-28 9:25 ` [PATCH bpf-next v4 1/3] selftests/bpf: Remove kmem subtest from cgroup_iter_memcg Hui Zhu
@ 2026-03-02 18:08 ` JP Kobryn (Meta)
0 siblings, 0 replies; 7+ messages in thread
From: JP Kobryn (Meta) @ 2026-03-02 18:08 UTC (permalink / raw)
To: Hui Zhu, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Roman Gushchin, bpf, linux-kselftest, linux-kernel
Cc: Hui Zhu
On 2/28/26 1:25 AM, Hui Zhu wrote:
> From: Hui Zhu <zhuhui@kylinos.cn>
>
> When cgroup.memory=nokmem is set in the kernel command line, kmem
> accounting is disabled. This causes the test_kmem subtest in
> cgroup_iter_memcg to fail because it expects non-zero kmem values.
>
> Remove the kmem subtest altogether since the remaining subtests
> (shmem, file, pgfault) already provide sufficient coverage for
> the cgroup iter memcg functionality.
>
> Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
> ---
> .../testing/selftests/bpf/cgroup_iter_memcg.h | 2 --
> .../bpf/prog_tests/cgroup_iter_memcg.c | 28 -------------------
> .../selftests/bpf/progs/cgroup_iter_memcg.c | 1 -
> 3 files changed, 31 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/cgroup_iter_memcg.h b/tools/testing/selftests/bpf/cgroup_iter_memcg.h
> index 3f59b127943b..ff20ec537164 100644
> --- a/tools/testing/selftests/bpf/cgroup_iter_memcg.h
> +++ b/tools/testing/selftests/bpf/cgroup_iter_memcg.h
> @@ -9,8 +9,6 @@ struct memcg_query {
> unsigned long nr_shmem;
> unsigned long nr_file_pages;
> unsigned long nr_file_mapped;
> - /* some memcg_stat_item */
> - unsigned long memcg_kmem;
> /* some vm_event_item */
> unsigned long pgfault;
> };
> diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
> index a5afd16705f0..88fc3e83d2b7 100644
> --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
> +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
> @@ -126,32 +126,6 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
> shm_unlink("/tmp_shmem");
> }
>
> -#define NR_PIPES 64
> -static void test_kmem(struct bpf_link *link, struct memcg_query *memcg_query)
> -{
> - int fds[NR_PIPES][2], i;
> -
> - /*
> - * Increase kmem value by creating pipes which will allocate some
> - * kernel buffers.
> - */
> - for (i = 0; i < NR_PIPES; i++) {
> - if (!ASSERT_OK(pipe(fds[i]), "pipe"))
> - goto cleanup;
> - }
> -
> - if (!ASSERT_OK(read_stats(link), "read stats"))
> - goto cleanup;
> -
> - ASSERT_GT(memcg_query->memcg_kmem, 0, "kmem value");
> -
> -cleanup:
> - for (i = i - 1; i >= 0; i--) {
> - close(fds[i][0]);
> - close(fds[i][1]);
> - }
> -}
> -
> static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
> {
> void *map;
> @@ -209,8 +183,6 @@ void test_cgroup_iter_memcg(void)
> test_shmem(link, &skel->data_query->memcg_query);
> if (test__start_subtest("cgroup_iter_memcg__file"))
> test_file(link, &skel->data_query->memcg_query);
> - if (test__start_subtest("cgroup_iter_memcg__kmem"))
> - test_kmem(link, &skel->data_query->memcg_query);
> if (test__start_subtest("cgroup_iter_memcg__pgfault"))
> test_pgfault(link, &skel->data_query->memcg_query);
>
> diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
> index 59fb70a3cc50..12f79a44133e 100644
> --- a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
> +++ b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
> @@ -30,7 +30,6 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx)
> memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM);
> memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES);
> memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED);
> - memcg_query.memcg_kmem = bpf_mem_cgroup_page_state(memcg, MEMCG_KMEM);
> memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT);
>
> bpf_put_mem_cgroup(memcg);
Reviewed-by: JP Kobryn <jp.kobryn@linux.dev>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH bpf-next v4 2/3] bpf: Use bpf_core_enum_value for stats in cgroup_iter_memcg
2026-02-28 9:25 [PATCH bpf-next v4 0/3] Fix test_cgroup_iter_memcg issues found during back-porting Hui Zhu
2026-02-28 9:25 ` [PATCH bpf-next v4 1/3] selftests/bpf: Remove kmem subtest from cgroup_iter_memcg Hui Zhu
@ 2026-02-28 9:25 ` Hui Zhu
2026-03-02 18:10 ` JP Kobryn (Meta)
2026-02-28 9:25 ` [PATCH bpf-next v4 3/3] selftests/bpf: Check bpf_mem_cgroup_page_state return value Hui Zhu
2 siblings, 1 reply; 7+ messages in thread
From: Hui Zhu @ 2026-02-28 9:25 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, JP Kobryn, Roman Gushchin, bpf, linux-kselftest,
linux-kernel
Cc: Hui Zhu
From: Hui Zhu <zhuhui@kylinos.cn>
Replace hardcoded enum values with bpf_core_enum_value() calls in
cgroup_iter_memcg test to improve portability across different
kernel versions.
The change adds runtime enum value resolution for:
- node_stat_item: NR_ANON_MAPPED, NR_SHMEM, NR_FILE_PAGES,
NR_FILE_MAPPED
- vm_event_item: PGFAULT
This ensures the BPF program can adapt to enum value changes
between kernel versions.
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
.../selftests/bpf/progs/cgroup_iter_memcg.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
index 12f79a44133e..06a385c9d85b 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
@@ -26,11 +26,18 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx)
bpf_mem_cgroup_flush_stats(memcg);
- memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(memcg, NR_ANON_MAPPED);
- memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM);
- memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES);
- memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED);
- memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT);
+ memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(
+ memcg,
+ bpf_core_enum_value(enum node_stat_item, NR_ANON_MAPPED));
+ memcg_query.nr_shmem = bpf_mem_cgroup_page_state(
+ memcg, bpf_core_enum_value(enum node_stat_item, NR_SHMEM));
+ memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(
+ memcg, bpf_core_enum_value(enum node_stat_item, NR_FILE_PAGES));
+ memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(
+ memcg,
+ bpf_core_enum_value(enum node_stat_item, NR_FILE_MAPPED));
+ memcg_query.pgfault = bpf_mem_cgroup_vm_events(
+ memcg, bpf_core_enum_value(enum vm_event_item, PGFAULT));
bpf_put_mem_cgroup(memcg);
--
2.43.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH bpf-next v4 2/3] bpf: Use bpf_core_enum_value for stats in cgroup_iter_memcg
2026-02-28 9:25 ` [PATCH bpf-next v4 2/3] bpf: Use bpf_core_enum_value for stats in cgroup_iter_memcg Hui Zhu
@ 2026-03-02 18:10 ` JP Kobryn (Meta)
0 siblings, 0 replies; 7+ messages in thread
From: JP Kobryn (Meta) @ 2026-03-02 18:10 UTC (permalink / raw)
To: Hui Zhu, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Roman Gushchin, bpf, linux-kselftest, linux-kernel
Cc: Hui Zhu
On 2/28/26 1:25 AM, Hui Zhu wrote:
> From: Hui Zhu <zhuhui@kylinos.cn>
>
> Replace hardcoded enum values with bpf_core_enum_value() calls in
> cgroup_iter_memcg test to improve portability across different
> kernel versions.
>
> The change adds runtime enum value resolution for:
> - node_stat_item: NR_ANON_MAPPED, NR_SHMEM, NR_FILE_PAGES,
> NR_FILE_MAPPED
> - vm_event_item: PGFAULT
>
> This ensures the BPF program can adapt to enum value changes
> between kernel versions.
>
> Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
> ---
> .../selftests/bpf/progs/cgroup_iter_memcg.c | 17 ++++++++++++-----
> 1 file changed, 12 insertions(+), 5 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
> index 12f79a44133e..06a385c9d85b 100644
> --- a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
> +++ b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
> @@ -26,11 +26,18 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx)
>
> bpf_mem_cgroup_flush_stats(memcg);
>
> - memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(memcg, NR_ANON_MAPPED);
> - memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM);
> - memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES);
> - memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED);
> - memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT);
> + memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(
> + memcg,
> + bpf_core_enum_value(enum node_stat_item, NR_ANON_MAPPED));
> + memcg_query.nr_shmem = bpf_mem_cgroup_page_state(
> + memcg, bpf_core_enum_value(enum node_stat_item, NR_SHMEM));
> + memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(
> + memcg, bpf_core_enum_value(enum node_stat_item, NR_FILE_PAGES));
> + memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(
> + memcg,
> + bpf_core_enum_value(enum node_stat_item, NR_FILE_MAPPED));
> + memcg_query.pgfault = bpf_mem_cgroup_vm_events(
> + memcg, bpf_core_enum_value(enum vm_event_item, PGFAULT));
>
> bpf_put_mem_cgroup(memcg);
>
Reviewed-by: JP Kobryn <jp.kobryn@linux.dev>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH bpf-next v4 3/3] selftests/bpf: Check bpf_mem_cgroup_page_state return value
2026-02-28 9:25 [PATCH bpf-next v4 0/3] Fix test_cgroup_iter_memcg issues found during back-porting Hui Zhu
2026-02-28 9:25 ` [PATCH bpf-next v4 1/3] selftests/bpf: Remove kmem subtest from cgroup_iter_memcg Hui Zhu
2026-02-28 9:25 ` [PATCH bpf-next v4 2/3] bpf: Use bpf_core_enum_value for stats in cgroup_iter_memcg Hui Zhu
@ 2026-02-28 9:25 ` Hui Zhu
2026-03-02 18:27 ` JP Kobryn (Meta)
2 siblings, 1 reply; 7+ messages in thread
From: Hui Zhu @ 2026-02-28 9:25 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, JP Kobryn, Roman Gushchin, bpf, linux-kselftest,
linux-kernel
Cc: Hui Zhu
From: Hui Zhu <zhuhui@kylinos.cn>
When back-porting test_progs to different kernel versions, I encountered
an issue where the test_cgroup_iter_memcg test would falsely pass even
when bpf_mem_cgroup_page_state() failed.
This patch adds explicit checks to ensure bpf_mem_cgroup_page_state()
doesn't return -1 before validating the actual statistics values.
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
.../selftests/bpf/prog_tests/cgroup_iter_memcg.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
index 88fc3e83d2b7..9eadfbd3fdb9 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
@@ -53,6 +53,8 @@ static void test_anon(struct bpf_link *link, struct memcg_query *memcg_query)
if (!ASSERT_OK(read_stats(link), "read stats"))
goto cleanup;
+ ASSERT_NEQ(memcg_query->nr_anon_mapped, (unsigned long)-1,
+ "bpf_mem_cgroup_page_state NR_ANON_MAPPED");
ASSERT_GT(memcg_query->nr_anon_mapped, 0, "final anon mapped val");
cleanup:
@@ -88,6 +90,10 @@ static void test_file(struct bpf_link *link, struct memcg_query *memcg_query)
if (!ASSERT_OK(read_stats(link), "read stats"))
goto cleanup_map;
+ ASSERT_NEQ(memcg_query->nr_file_pages, (unsigned long)-1,
+ "bpf_mem_cgroup_page_state NR_FILE_PAGES");
+ ASSERT_NEQ(memcg_query->nr_file_mapped, (unsigned long)-1,
+ "bpf_mem_cgroup_page_state NR_FILE_MAPPED");
ASSERT_GT(memcg_query->nr_file_pages, 0, "final file value");
ASSERT_GT(memcg_query->nr_file_mapped, 0, "final file mapped value");
@@ -119,6 +125,8 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
if (!ASSERT_OK(read_stats(link), "read stats"))
goto cleanup;
+ ASSERT_NEQ(memcg_query->nr_shmem, (unsigned long)-1,
+ "bpf_mem_cgroup_page_state NR_SHMEM");
ASSERT_GT(memcg_query->nr_shmem, 0, "final shmem value");
cleanup:
@@ -144,6 +152,8 @@ static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
if (!ASSERT_OK(read_stats(link), "read stats"))
goto cleanup;
+ ASSERT_NEQ(memcg_query->pgfault, (unsigned long)-1,
+ "bpf_mem_cgroup_vm_events PGFAULT");
ASSERT_GT(memcg_query->pgfault, 0, "final pgfault val");
cleanup:
--
2.43.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH bpf-next v4 3/3] selftests/bpf: Check bpf_mem_cgroup_page_state return value
2026-02-28 9:25 ` [PATCH bpf-next v4 3/3] selftests/bpf: Check bpf_mem_cgroup_page_state return value Hui Zhu
@ 2026-03-02 18:27 ` JP Kobryn (Meta)
0 siblings, 0 replies; 7+ messages in thread
From: JP Kobryn (Meta) @ 2026-03-02 18:27 UTC (permalink / raw)
To: Hui Zhu, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Roman Gushchin, bpf, linux-kselftest, linux-kernel
Cc: Hui Zhu
On 2/28/26 1:25 AM, Hui Zhu wrote:
> From: Hui Zhu <zhuhui@kylinos.cn>
>
> When back-porting test_progs to different kernel versions, I encountered
> an issue where the test_cgroup_iter_memcg test would falsely pass even
> when bpf_mem_cgroup_page_state() failed.
>
> This patch adds explicit checks to ensure bpf_mem_cgroup_page_state()
> doesn't return -1 before validating the actual statistics values.
>
> Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
> ---
> .../selftests/bpf/prog_tests/cgroup_iter_memcg.c | 10 ++++++++++
> 1 file changed, 10 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
> index 88fc3e83d2b7..9eadfbd3fdb9 100644
> --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
> +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
> @@ -53,6 +53,8 @@ static void test_anon(struct bpf_link *link, struct memcg_query *memcg_query)
> if (!ASSERT_OK(read_stats(link), "read stats"))
> goto cleanup;
>
> + ASSERT_NEQ(memcg_query->nr_anon_mapped, (unsigned long)-1,
> + "bpf_mem_cgroup_page_state NR_ANON_MAPPED");
> ASSERT_GT(memcg_query->nr_anon_mapped, 0, "final anon mapped val");
>
> cleanup:
> @@ -88,6 +90,10 @@ static void test_file(struct bpf_link *link, struct memcg_query *memcg_query)
> if (!ASSERT_OK(read_stats(link), "read stats"))
> goto cleanup_map;
>
> + ASSERT_NEQ(memcg_query->nr_file_pages, (unsigned long)-1,
> + "bpf_mem_cgroup_page_state NR_FILE_PAGES");
> + ASSERT_NEQ(memcg_query->nr_file_mapped, (unsigned long)-1,
> + "bpf_mem_cgroup_page_state NR_FILE_MAPPED");
> ASSERT_GT(memcg_query->nr_file_pages, 0, "final file value");
> ASSERT_GT(memcg_query->nr_file_mapped, 0, "final file mapped value");
>
> @@ -119,6 +125,8 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
> if (!ASSERT_OK(read_stats(link), "read stats"))
> goto cleanup;
>
> + ASSERT_NEQ(memcg_query->nr_shmem, (unsigned long)-1,
> + "bpf_mem_cgroup_page_state NR_SHMEM");
> ASSERT_GT(memcg_query->nr_shmem, 0, "final shmem value");
>
> cleanup:
> @@ -144,6 +152,8 @@ static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
> if (!ASSERT_OK(read_stats(link), "read stats"))
> goto cleanup;
>
> + ASSERT_NEQ(memcg_query->pgfault, (unsigned long)-1,
> + "bpf_mem_cgroup_vm_events PGFAULT");
> ASSERT_GT(memcg_query->pgfault, 0, "final pgfault val");
>
> cleanup:
Emil mentioned in the v3 thread that we don't need these checks [0]. I
agree. Now that you're using the bpf co-re enum helpers, the stat
indexes won't become an issue in a cross-kernel secenario. This patch
can be dropped.
[0]
https://lore.kernel.org/bpf/DGQXZVBSVEUP.35P5G9QOL9EAT@etsalapatis.com/T/#m7cb6edf90ed4daaeda38b746363317405f198f20
^ permalink raw reply [flat|nested] 7+ messages in thread