* [PATCH 1/4] selftests/bpf: explicitly account for globals in verifier_arena_large
2025-11-17 23:56 [PATCH 0/4] libbpf: move arena variables out of the zero page Emil Tsalapatis
@ 2025-11-17 23:56 ` Emil Tsalapatis
2025-11-17 23:56 ` [PATCH 2/4] libbpf: add stub for offset-related skeleton padding Emil Tsalapatis
` (2 subsequent siblings)
3 siblings, 0 replies; 9+ messages in thread
From: Emil Tsalapatis @ 2025-11-17 23:56 UTC (permalink / raw)
To: bpf
Cc: ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, Emil Tsalapatis
The big_alloc1 test in verifier_arena_large assumes that the arena base
and the first page allocated by bpf_arena_alloc_pages are identical.
This is not the case, because the first page in the arena is populated
by global arena data. The test still passes because the code makes the
tacit assumption that the first page is on offset PAGE_SIZE instead of
0.
Make this distinction explicit in the code, and adjust the page offsets
requested during the test to count from the beginning of the arena
instead of using the address of the first allocated page.
Signed-off-by: Emil Tsalapatis <emil@etsalapatis.com>
---
.../selftests/bpf/progs/verifier_arena_large.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
index f19e15400b3e..bd430a34c3ab 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -23,18 +23,25 @@ int big_alloc1(void *ctx)
{
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
volatile char __arena *page1, *page2, *no_page, *page3;
- void __arena *base;
+ u64 base;
- page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ base = (u64)arena_base(&arena);
+
+ page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
if (!page1)
return 1;
+
+ /* Account for global arena data. */
+ if ((u64)page1 != base + PAGE_SIZE)
+ return 15;
+
*page1 = 1;
- page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2,
+ page2 = bpf_arena_alloc_pages(&arena, (void __arena *)(ARENA_SIZE - PAGE_SIZE),
1, NUMA_NO_NODE, 0);
if (!page2)
return 2;
*page2 = 2;
- no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
+ no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE,
1, NUMA_NO_NODE, 0);
if (no_page)
return 3;
--
2.49.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 2/4] libbpf: add stub for offset-related skeleton padding
2025-11-17 23:56 [PATCH 0/4] libbpf: move arena variables out of the zero page Emil Tsalapatis
2025-11-17 23:56 ` [PATCH 1/4] selftests/bpf: explicitly account for globals in verifier_arena_large Emil Tsalapatis
@ 2025-11-17 23:56 ` Emil Tsalapatis
2025-11-18 0:18 ` bot+bpf-ci
2025-11-17 23:56 ` [PATCH 3/4] libbpf: offset global arena data into the arena if possible Emil Tsalapatis
2025-11-17 23:56 ` [PATCH 4/4] selftests/bpf: add tests for the arena offset of globals Emil Tsalapatis
3 siblings, 1 reply; 9+ messages in thread
From: Emil Tsalapatis @ 2025-11-17 23:56 UTC (permalink / raw)
To: bpf
Cc: ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, Emil Tsalapatis
Add a stub function for reporting in which offset within a mapping
libbpf places the map's data. This will be used in a subsequent
patch to support offsetting arena variables within the mapped region.
Adjust skeleton generation to account for the new arena memory layout
by adding padding corresponding to the offset into the arena map. Add
a libbbpf API function to get the data offset within the map's mapping
during skeleton generation.
Signed-off-by: Emil Tsalapatis <emil@etsalapatis.com>
---
tools/bpf/bpftool/gen.c | 23 +++++++++++++++++++++--
tools/lib/bpf/libbpf.c | 10 ++++++++++
tools/lib/bpf/libbpf.h | 9 +++++++++
tools/lib/bpf/libbpf.map | 1 +
4 files changed, 41 insertions(+), 2 deletions(-)
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 993c7d9484a4..6ed125b1b465 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -148,7 +148,8 @@ static int codegen_datasec_def(struct bpf_object *obj,
struct btf *btf,
struct btf_dump *d,
const struct btf_type *sec,
- const char *obj_name)
+ const char *obj_name,
+ int var_off)
{
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
@@ -163,6 +164,17 @@ static int codegen_datasec_def(struct bpf_object *obj,
strip_mods = true;
printf(" struct %s__%s {\n", obj_name, sec_ident);
+
+ /*
+ * Arena variables may be placed in an offset within the section.
+ * Represent this in the skeleton using a padding struct.
+ */
+ if (var_off > 0) {
+ printf("\t\tchar __pad%d[%d];\n",
+ pad_cnt, var_off);
+ pad_cnt++;
+ }
+
for (i = 0; i < vlen; i++, sec_var++) {
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
const char *var_name = btf__name_by_offset(btf, var->name_off);
@@ -279,6 +291,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
struct bpf_map *map;
const struct btf_type *sec;
char map_ident[256];
+ int var_off;
int err = 0;
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
@@ -303,7 +316,13 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
printf(" struct %s__%s {\n", obj_name, map_ident);
printf(" } *%s;\n", map_ident);
} else {
- err = codegen_datasec_def(obj, btf, d, sec, obj_name);
+ var_off = bpf_map__data_offset(map);
+ if (var_off < 0) {
+ p_err("bpf_map__data_offset called on unmapped map\n");
+ err = var_off;
+ goto out;
+ }
+ err = codegen_datasec_def(obj, btf, d, sec, obj_name, var_off);
if (err)
goto out;
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 706e7481bdf6..32dac36ba8db 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10552,6 +10552,16 @@ const char *bpf_map__name(const struct bpf_map *map)
return map->name;
}
+int bpf_map__data_offset(const struct bpf_map *map)
+{
+ if (!map->mmaped)
+ return -EINVAL;
+
+ /* No offsetting for now. */
+ return 0;
+}
+
+
enum bpf_map_type bpf_map__type(const struct bpf_map *map)
{
return map->def.type;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5118d0a90e24..549289dd9891 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1314,6 +1314,15 @@ LIBBPF_API int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_pr
*/
LIBBPF_API struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map);
+/*
+ * @brief **bpf_map__data_offset** returns the offset of the map's data
+ * within the address mapping.
+ * @param BPF map whose variable offset we are looking into.
+ * @return the offset >= 0 of the map's contents within its mapping; negative
+ * error code, otherwise.
+ */
+LIBBPF_API int bpf_map__data_offset(const struct bpf_map *map);
+
struct bpf_xdp_set_link_opts {
size_t sz;
int old_fd;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 8ed8749907d4..ac932ee3a932 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -451,4 +451,5 @@ LIBBPF_1.7.0 {
global:
bpf_map__set_exclusive_program;
bpf_map__exclusive_program;
+ bpf_map__data_offset;
} LIBBPF_1.6.0;
--
2.49.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH 2/4] libbpf: add stub for offset-related skeleton padding
2025-11-17 23:56 ` [PATCH 2/4] libbpf: add stub for offset-related skeleton padding Emil Tsalapatis
@ 2025-11-18 0:18 ` bot+bpf-ci
2025-11-18 2:48 ` Emil Tsalapatis
0 siblings, 1 reply; 9+ messages in thread
From: bot+bpf-ci @ 2025-11-18 0:18 UTC (permalink / raw)
To: emil, bpf
Cc: ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, emil, ast, andrii, daniel, martin.lau, eddyz87,
yonghong.song, clm, ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 3161 bytes --]
> diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> index 993c7d948..6ed125b1b 100644
> --- a/tools/bpf/bpftool/gen.c
> +++ b/tools/bpf/bpftool/gen.c
[ ... ]
> @@ -269,7 +269,8 @@ static int codegen_datasec_def(struct bpf_object *obj,
> static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
> {
> size_t tmp_sz;
>
> if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
> snprintf(buf, sz, "arena");
> return true;
> }
>
> if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
> return false;
>
> if (!get_map_ident(map, buf, sz))
> return false;
>
> return true;
> }
[ ... ]
> @@ -287,6 +291,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
> struct bpf_map *map;
> const struct btf_type *sec;
> char map_ident[256];
> + int var_off;
> int err = 0;
>
> d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
[ ... ]
> @@ -303,7 +316,13 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
> printf(" struct %s__%s {\n", obj_name, map_ident);
> printf(" } *%s;\n", map_ident);
> } else {
> - err = codegen_datasec_def(obj, btf, d, sec, obj_name);
> + var_off = bpf_map__data_offset(map);
> + if (var_off < 0) {
> + p_err("bpf_map__data_offset called on unmapped map\n");
> + err = var_off;
> + goto out;
> + }
> + err = codegen_datasec_def(obj, btf, d, sec, obj_name, var_off);
> if (err)
> goto out;
> }
Can bpf_map__data_offset() be called here on an arena map that doesn't
have its mmaped field set yet?
During skeleton generation in do_skeleton(), the code calls
bpf_object__open_mem() but never calls bpf_object__load(). Looking at
is_mmapable_map() above, arena maps return true and will reach this code
path. However, arena maps only get their mmaped field set during
bpf_object__load() in libbpf.c around line 5615, not during
bpf_object__open().
This means bpf_map__data_offset() will be called on arena maps with
map->mmaped == NULL, causing it to return -EINVAL, which would fail
skeleton generation for any program with arena maps.
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 706e7481b..32dac36ba 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -10552,6 +10552,16 @@ const char *bpf_map__name(const struct bpf_map *map)
> return map->name;
> }
>
> +int bpf_map__data_offset(const struct bpf_map *map)
> +{
> + if (!map->mmaped)
> + return -EINVAL;
> +
> + /* No offsetting for now. */
> + return 0;
> +}
^^^^
The check for map->mmaped here causes the problem described above. Since
this function is called during skeleton generation (before load), the
mmaped field won't be set for arena maps yet. Should this check be
removed, or should the skeleton generation code path be changed?
[ ... ]
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19448868610
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH 2/4] libbpf: add stub for offset-related skeleton padding
2025-11-18 0:18 ` bot+bpf-ci
@ 2025-11-18 2:48 ` Emil Tsalapatis
0 siblings, 0 replies; 9+ messages in thread
From: Emil Tsalapatis @ 2025-11-18 2:48 UTC (permalink / raw)
To: bot+bpf-ci
Cc: bpf, ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, martin.lau, clm, ihor.solodrai
On Mon, Nov 17, 2025 at 7:18 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> > index 993c7d948..6ed125b1b 100644
> > --- a/tools/bpf/bpftool/gen.c
> > +++ b/tools/bpf/bpftool/gen.c
>
> [ ... ]
>
> > @@ -269,7 +269,8 @@ static int codegen_datasec_def(struct bpf_object *obj,
> > static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
> > {
> > size_t tmp_sz;
> >
> > if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
> > snprintf(buf, sz, "arena");
> > return true;
> > }
> >
> > if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
> > return false;
> >
> > if (!get_map_ident(map, buf, sz))
> > return false;
> >
> > return true;
> > }
>
> [ ... ]
>
> > @@ -287,6 +291,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
> > struct bpf_map *map;
> > const struct btf_type *sec;
> > char map_ident[256];
> > + int var_off;
> > int err = 0;
> >
> > d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
>
> [ ... ]
>
> > @@ -303,7 +316,13 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
> > printf(" struct %s__%s {\n", obj_name, map_ident);
> > printf(" } *%s;\n", map_ident);
> > } else {
> > - err = codegen_datasec_def(obj, btf, d, sec, obj_name);
> > + var_off = bpf_map__data_offset(map);
> > + if (var_off < 0) {
> > + p_err("bpf_map__data_offset called on unmapped map\n");
> > + err = var_off;
> > + goto out;
> > + }
> > + err = codegen_datasec_def(obj, btf, d, sec, obj_name, var_off);
> > if (err)
> > goto out;
> > }
>
> Can bpf_map__data_offset() be called here on an arena map that doesn't
> have its mmaped field set yet?
>
> During skeleton generation in do_skeleton(), the code calls
> bpf_object__open_mem() but never calls bpf_object__load(). Looking at
> is_mmapable_map() above, arena maps return true and will reach this code
> path. However, arena maps only get their mmaped field set during
> bpf_object__load() in libbpf.c around line 5615, not during
> bpf_object__open().
>
> This means bpf_map__data_offset() will be called on arena maps with
> map->mmaped == NULL, causing it to return -EINVAL, which would fail
> skeleton generation for any program with arena maps.
>
This analysis seems incorrect. bpf_object__open_mem -> bpf_object_open ->
bpf_object__init_maps -> bpf_object__init_user_btf_maps -> init_arena_map_data
happens at opening time, and this call chain properly populates the ->mmaped
field of the arena map.
> > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > index 706e7481b..32dac36ba 100644
> > --- a/tools/lib/bpf/libbpf.c
> > +++ b/tools/lib/bpf/libbpf.c
> > @@ -10552,6 +10552,16 @@ const char *bpf_map__name(const struct bpf_map *map)
> > return map->name;
> > }
> >
> > +int bpf_map__data_offset(const struct bpf_map *map)
> > +{
> > + if (!map->mmaped)
> > + return -EINVAL;
> > +
> > + /* No offsetting for now. */
> > + return 0;
> > +}
> ^^^^
>
> The check for map->mmaped here causes the problem described above. Since
> this function is called during skeleton generation (before load), the
> mmaped field won't be set for arena maps yet. Should this check be
> removed, or should the skeleton generation code path be changed?
>
See above.
> [ ... ]
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19448868610
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH 3/4] libbpf: offset global arena data into the arena if possible
2025-11-17 23:56 [PATCH 0/4] libbpf: move arena variables out of the zero page Emil Tsalapatis
2025-11-17 23:56 ` [PATCH 1/4] selftests/bpf: explicitly account for globals in verifier_arena_large Emil Tsalapatis
2025-11-17 23:56 ` [PATCH 2/4] libbpf: add stub for offset-related skeleton padding Emil Tsalapatis
@ 2025-11-17 23:56 ` Emil Tsalapatis
2025-11-18 0:26 ` bot+bpf-ci
2025-11-17 23:56 ` [PATCH 4/4] selftests/bpf: add tests for the arena offset of globals Emil Tsalapatis
3 siblings, 1 reply; 9+ messages in thread
From: Emil Tsalapatis @ 2025-11-17 23:56 UTC (permalink / raw)
To: bpf
Cc: ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, Emil Tsalapatis
Currently, libbpf places global arena data at the very beginning of
the arena mapping. Stray NULL dereferences into the arena then find
valid data and lead to silent corruption instead of causing an arena
page fault. The data is placed in the mapping at load time, preventing
us from reserving the region using bpf_arena_reserve_pages().
Adjust the arena logic to attempt placing the data from an offset within
the arena (currently 16 pages in) instead of the very beginning. If
placing the data at an offset would lead to an allocation failure due
to global data being as large as the entire arena, progressively reduce
the offset down to 0 until placement succeeds.
Adjust existing arena tests in the same commit to account for the new
global data offset. New tests that explicitly consider the new feature
are introduced in the next patch.
Signed-off-by: Emil Tsalapatis <emil@etsalapatis.com>
---
tools/lib/bpf/libbpf.c | 30 +++++++++++++++----
.../bpf/progs/verifier_arena_large.c | 14 +++++++--
2 files changed, 37 insertions(+), 7 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 32dac36ba8db..6f40c6321935 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -757,6 +757,7 @@ struct bpf_object {
int arena_map_idx;
void *arena_data;
size_t arena_data_sz;
+ __u32 arena_data_off;
void *jumptables_data;
size_t jumptables_data_sz;
@@ -2991,10 +2992,14 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
void *data, size_t data_sz)
{
const long page_sz = sysconf(_SC_PAGE_SIZE);
+ const size_t data_alloc_sz = roundup(data_sz, page_sz);
+ /* default offset into the arena, may be resized */
+ const long max_off_pages = 16;
size_t mmap_sz;
+ long off_pages;
mmap_sz = bpf_map_mmap_sz(map);
- if (roundup(data_sz, page_sz) > mmap_sz) {
+ if (data_alloc_sz > mmap_sz) {
pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
sec_name, mmap_sz, data_sz);
return -E2BIG;
@@ -3006,6 +3011,17 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
memcpy(obj->arena_data, data, data_sz);
obj->arena_data_sz = data_sz;
+ /*
+ * find the largest offset for global arena variables
+ * where they still fit in the arena
+ */
+ for (off_pages = max_off_pages; off_pages > 0; off_pages >>= 1) {
+ if (off_pages * page_sz + data_alloc_sz <= mmap_sz)
+ break;
+ }
+
+ obj->arena_data_off = off_pages * page_sz;
+
/* make bpf_map__init_value() work for ARENA maps */
map->mmaped = obj->arena_data;
@@ -4663,7 +4679,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx;
reloc_desc->map_idx = obj->arena_map_idx;
- reloc_desc->sym_off = sym->st_value;
+ reloc_desc->sym_off = sym->st_value + obj->arena_data_off;
map = &obj->maps[obj->arena_map_idx];
pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
@@ -5624,7 +5640,8 @@ bpf_object__create_maps(struct bpf_object *obj)
return err;
}
if (obj->arena_data) {
- memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
+ memcpy(map->mmaped + obj->arena_data_off, obj->arena_data,
+ obj->arena_data_sz);
zfree(&obj->arena_data);
}
}
@@ -10557,8 +10574,11 @@ int bpf_map__data_offset(const struct bpf_map *map)
if (!map->mmaped)
return -EINVAL;
- /* No offsetting for now. */
- return 0;
+ /* Only arenas have offsetting. */
+ if (map->def.type != BPF_MAP_TYPE_ARENA)
+ return 0;
+
+ return map->obj->arena_data_off;
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
index bd430a34c3ab..f72198596889 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -10,6 +10,7 @@
#include "bpf_arena_common.h"
#define ARENA_SIZE (1ull << 32)
+#define GLOBAL_PGOFF (16)
struct {
__uint(type, BPF_MAP_TYPE_ARENA);
@@ -31,8 +32,7 @@ int big_alloc1(void *ctx)
if (!page1)
return 1;
- /* Account for global arena data. */
- if ((u64)page1 != base + PAGE_SIZE)
+ if ((u64)page1 != base)
return 15;
*page1 = 1;
@@ -216,6 +216,16 @@ int big_alloc2(void *ctx)
__u8 __arena *pg;
int i, err;
+ /*
+ * The global data is placed in a page with global offset 16.
+ * This test is about page allocation contiguity, so avoid
+ * accounting for the stray allocation by also allocating
+ * all pages before it. We never use the page range, so leak it.
+ */
+ pg = bpf_arena_alloc_pages(&arena, NULL, GLOBAL_PGOFF, NUMA_NO_NODE, 0);
+ if (!pg)
+ return 10;
+
base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
if (!base)
return 1;
--
2.49.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH 3/4] libbpf: offset global arena data into the arena if possible
2025-11-17 23:56 ` [PATCH 3/4] libbpf: offset global arena data into the arena if possible Emil Tsalapatis
@ 2025-11-18 0:26 ` bot+bpf-ci
2025-11-18 2:26 ` Emil Tsalapatis
0 siblings, 1 reply; 9+ messages in thread
From: bot+bpf-ci @ 2025-11-18 0:26 UTC (permalink / raw)
To: emil, bpf
Cc: ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, emil, ast, andrii, daniel, martin.lau, eddyz87,
yonghong.song, clm, ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 2716 bytes --]
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 32dac36ba..6f40c6321 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -757,6 +757,7 @@ struct bpf_object {
> int arena_map_idx;
> void *arena_data;
> size_t arena_data_sz;
> + __u32 arena_data_off;
>
> void *jumptables_data;
> size_t jumptables_data_sz;
[ ... ]
> @@ -2991,10 +2992,14 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
> void *data, size_t data_sz)
> {
> const long page_sz = sysconf(_SC_PAGE_SIZE);
> + const size_t data_alloc_sz = roundup(data_sz, page_sz);
> + /* default offset into the arena, may be resized */
> + const long max_off_pages = 16;
> size_t mmap_sz;
> + long off_pages;
>
> mmap_sz = bpf_map_mmap_sz(map);
> - if (roundup(data_sz, page_sz) > mmap_sz) {
> + if (data_alloc_sz > mmap_sz) {
> pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
> sec_name, mmap_sz, data_sz);
> return -E2BIG;
> @@ -3006,6 +3011,17 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
> memcpy(obj->arena_data, data, data_sz);
> obj->arena_data_sz = data_sz;
>
> + /*
> + * find the largest offset for global arena variables
> + * where they still fit in the arena
> + */
> + for (off_pages = max_off_pages; off_pages > 0; off_pages >>= 1) {
> + if (off_pages * page_sz + data_alloc_sz <= mmap_sz)
> + break;
> + }
> +
> + obj->arena_data_off = off_pages * page_sz;
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Can the multiplication overflow __u32 on systems with large page sizes?
On a system configured with 1GB huge pages as the default,
16 pages * 1GB = 16GB, which exceeds 2^32. The result would be
truncated when stored in arena_data_off (declared as __u32), causing
the offset calculation to wrap around.
While this configuration is rare, the truncation would cause arena data
to be placed at an incorrect offset, breaking the intended behavior. The
three uses of arena_data_off (in init_arena_map_data,
bpf_program__record_reloc, and bpf_object__create_maps) would all use
the truncated value consistently, so if truncation occurs to 0, the code
would fall back to the original behavior of placing data at offset 0.
Should arena_data_off be changed to size_t or __u64 to handle larger
page sizes, or is __u32 considered sufficient for expected arena usage?
[ ... ]
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19448868610
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH 3/4] libbpf: offset global arena data into the arena if possible
2025-11-18 0:26 ` bot+bpf-ci
@ 2025-11-18 2:26 ` Emil Tsalapatis
0 siblings, 0 replies; 9+ messages in thread
From: Emil Tsalapatis @ 2025-11-18 2:26 UTC (permalink / raw)
To: bot+bpf-ci
Cc: bpf, ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, martin.lau, clm, ihor.solodrai
On Mon, Nov 17, 2025 at 7:26 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > index 32dac36ba..6f40c6321 100644
> > --- a/tools/lib/bpf/libbpf.c
> > +++ b/tools/lib/bpf/libbpf.c
> > @@ -757,6 +757,7 @@ struct bpf_object {
> > int arena_map_idx;
> > void *arena_data;
> > size_t arena_data_sz;
> > + __u32 arena_data_off;
> >
> > void *jumptables_data;
> > size_t jumptables_data_sz;
>
> [ ... ]
>
> > @@ -2991,10 +2992,14 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
> > void *data, size_t data_sz)
> > {
> > const long page_sz = sysconf(_SC_PAGE_SIZE);
> > + const size_t data_alloc_sz = roundup(data_sz, page_sz);
> > + /* default offset into the arena, may be resized */
> > + const long max_off_pages = 16;
> > size_t mmap_sz;
> > + long off_pages;
> >
> > mmap_sz = bpf_map_mmap_sz(map);
> > - if (roundup(data_sz, page_sz) > mmap_sz) {
> > + if (data_alloc_sz > mmap_sz) {
> > pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
> > sec_name, mmap_sz, data_sz);
> > return -E2BIG;
> > @@ -3006,6 +3011,17 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
> > memcpy(obj->arena_data, data, data_sz);
> > obj->arena_data_sz = data_sz;
> >
> > + /*
> > + * find the largest offset for global arena variables
> > + * where they still fit in the arena
> > + */
> > + for (off_pages = max_off_pages; off_pages > 0; off_pages >>= 1) {
> > + if (off_pages * page_sz + data_alloc_sz <= mmap_sz)
> > + break;
> > + }
> > +
> > + obj->arena_data_off = off_pages * page_sz;
> > ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>
> Can the multiplication overflow __u32 on systems with large page sizes?
> On a system configured with 1GB huge pages as the default,
> 16 pages * 1GB = 16GB, which exceeds 2^32. The result would be
> truncated when stored in arena_data_off (declared as __u32), causing
> the offset calculation to wrap around.
>
> While this configuration is rare, the truncation would cause arena data
> to be placed at an incorrect offset, breaking the intended behavior. The
> three uses of arena_data_off (in init_arena_map_data,
> bpf_program__record_reloc, and bpf_object__create_maps) would all use
> the truncated value consistently, so if truncation occurs to 0, the code
> would fall back to the original behavior of placing data at offset 0.
>
> Should arena_data_off be changed to size_t or __u64 to handle larger
> page sizes, or is __u32 considered sufficient for expected arena usage?
>
The off_pages * page_sz product is bound by the loop right above it. If it
were > 4 GiB, then so would the mmap_sz, and arena map creation would fail.
> [ ... ]
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19448868610
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH 4/4] selftests/bpf: add tests for the arena offset of globals
2025-11-17 23:56 [PATCH 0/4] libbpf: move arena variables out of the zero page Emil Tsalapatis
` (2 preceding siblings ...)
2025-11-17 23:56 ` [PATCH 3/4] libbpf: offset global arena data into the arena if possible Emil Tsalapatis
@ 2025-11-17 23:56 ` Emil Tsalapatis
3 siblings, 0 replies; 9+ messages in thread
From: Emil Tsalapatis @ 2025-11-17 23:56 UTC (permalink / raw)
To: bpf
Cc: ast, daniel, john.fastabend, memxor, andrii, eddyz87,
yonghong.song, Emil Tsalapatis
Add tests for the new libbpf globals arena offset logic. The
tests cover all three cases: The globals being small enough
to be placed at the maximum possible offset, being as large as
the arena itself and being placed at the very beginning, and
requiring an intermediate offset into the arena.
Signed-off-by: Emil Tsalapatis <emil@etsalapatis.com>
---
.../selftests/bpf/prog_tests/verifier.c | 6 ++
.../bpf/progs/verifier_arena_globals1.c | 60 ++++++++++++++++++
.../bpf/progs/verifier_arena_globals2.c | 49 +++++++++++++++
.../bpf/progs/verifier_arena_globals3.c | 61 +++++++++++++++++++
4 files changed, 176 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
create mode 100644 tools/testing/selftests/bpf/progs/verifier_arena_globals2.c
create mode 100644 tools/testing/selftests/bpf/progs/verifier_arena_globals3.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 4b4b081b46cc..0c64fbc9a194 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -6,6 +6,9 @@
#include "verifier_and.skel.h"
#include "verifier_arena.skel.h"
#include "verifier_arena_large.skel.h"
+#include "verifier_arena_globals1.skel.h"
+#include "verifier_arena_globals2.skel.h"
+#include "verifier_arena_globals3.skel.h"
#include "verifier_array_access.skel.h"
#include "verifier_async_cb_context.skel.h"
#include "verifier_basic_stack.skel.h"
@@ -147,6 +150,9 @@ static void run_tests_aux(const char *skel_name,
void test_verifier_and(void) { RUN(verifier_and); }
void test_verifier_arena(void) { RUN(verifier_arena); }
void test_verifier_arena_large(void) { RUN(verifier_arena_large); }
+void test_verifier_arena_globals1(void) { RUN(verifier_arena_globals1); }
+void test_verifier_arena_globals2(void) { RUN(verifier_arena_globals2); }
+void test_verifier_arena_globals3(void) { RUN(verifier_arena_globals3); }
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
void test_verifier_bitfield_write(void) { RUN(verifier_bitfield_write); }
void test_verifier_bounds(void) { RUN(verifier_bounds); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_globals1.c b/tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
new file mode 100644
index 000000000000..c9bfdc33e1f3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+#include "bpf_misc.h"
+
+#define ARENA_PAGES (64)
+
+/* Set in libbpf. */
+#define GLOBALS_PGOFF (16)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, ARENA_PAGES); /* Arena of 64 pages (standard offset is 16 pages) */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
+#else
+ __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
+#endif
+} arena SEC(".maps");
+
+/*
+ * Global data small enough that we can apply the maximum
+ * offset into the arena. Userspace will also use this to
+ * ensure the offset doesn't unexpectedly change from
+ * under us.
+ */
+char __arena global_data[PAGE_SIZE][ARENA_PAGES - GLOBALS_PGOFF];
+
+SEC("syscall")
+__success __retval(0)
+int check_reserve1(void *ctx)
+{
+ __u8 __arena *guard, *globals;
+ int ret;
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ guard = (void __arena *)arena_base(&arena);
+ globals = (void __arena *)(arena_base(&arena) + GLOBALS_PGOFF * PAGE_SIZE);
+
+ /* Reserve the region we've offset the globals by. */
+ ret = bpf_arena_reserve_pages(&arena, guard, GLOBALS_PGOFF);
+ if (ret)
+ return 1;
+
+ /* Make sure the globals are placed GLOBALS_PGOFF pages in. */
+ ret = bpf_arena_reserve_pages(&arena, globals, 1);
+ if (!ret)
+ return 2;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_globals2.c b/tools/testing/selftests/bpf/progs/verifier_arena_globals2.c
new file mode 100644
index 000000000000..79fd37e5783f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_globals2.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+#define ARENA_PAGES (32)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, ARENA_PAGES); /* Arena of 32 pages (standard offset is 16 pages) */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
+#else
+ __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
+#endif
+} arena SEC(".maps");
+
+/*
+ * Fill the entire arena with global data.
+ * The offset into the arena should be 0.
+ */
+char __arena global_data[PAGE_SIZE][ARENA_PAGES];
+
+SEC("syscall")
+__success __retval(0)
+int check_reserve2(void *ctx)
+{
+ void __arena *guard;
+ int ret;
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ guard = (void __arena *)arena_base(&arena);
+
+ /* Make sure the data at offset 0 case is properly handled. */
+ ret = bpf_arena_reserve_pages(&arena, guard, 1);
+ if (!ret)
+ return 1;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_globals3.c b/tools/testing/selftests/bpf/progs/verifier_arena_globals3.c
new file mode 100644
index 000000000000..cad29610e9be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_globals3.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+#define ARENA_PAGES (32)
+
+#define ARENA_AVAIL_PAGES (6)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, ARENA_PAGES); /* Arena of 32 pages (standard offset is 16 pages) */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
+#else
+ __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
+#endif
+} arena SEC(".maps");
+
+/*
+ * Enough global data to fill most of the arena. Force libbpf to
+ * adjust the offset into the arena enough for the data to fit.
+ */
+
+char __arena global_data[PAGE_SIZE][ARENA_PAGES - ARENA_AVAIL_PAGES];
+
+SEC("syscall")
+__success __retval(0)
+int check_reserve3(void *ctx)
+{
+ void __arena *guard, *globals;
+ int ret;
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ guard = (void __arena *)arena_base(&arena);
+ globals = (void __arena *)(arena_base(&arena) + 4 * PAGE_SIZE);
+
+ /*
+ * The data should be offset 4 pages in (the largest
+ * possible power of 2 that still leaves enough room
+ * to the global data).
+ */
+ ret = bpf_arena_reserve_pages(&arena, guard, 4);
+ if (ret)
+ return 1;
+
+ ret = bpf_arena_reserve_pages(&arena, globals, 1);
+ if (!ret)
+ return 2;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
2.49.0
^ permalink raw reply related [flat|nested] 9+ messages in thread