* [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step
2022-10-18 3:56 [PATCH bpf-next 0/3] libbpf: support non-mmap()'able data sections Andrii Nakryiko
@ 2022-10-18 3:56 ` Andrii Nakryiko
2022-10-18 18:47 ` sdf
2022-10-18 3:56 ` [PATCH bpf-next 2/3] libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars Andrii Nakryiko
2022-10-18 3:56 ` [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest Andrii Nakryiko
2 siblings, 1 reply; 12+ messages in thread
From: Andrii Nakryiko @ 2022-10-18 3:56 UTC (permalink / raw)
To: bpf, ast, daniel; +Cc: andrii, kernel-team
Refactor libbpf's BTF fixup step during BPF object open phase. The only
functional change is that we now ignore BTF_VAR_GLOBAL_EXTERN variables
during fix up, not just BTF_VAR_STATIC ones, which shouldn't cause any
change in behavior as there shouldn't be any extern variable in data
sections for valid BPF object anyways.
Otherwise it's just collapsing two functions that have no reason to be
separate, and switching find_elf_var_offset() helper to return entire
symbol pointer, not just its offset. This will be used by next patch to
get ELF symbol visibility.
While refactoring, also "normalize" debug messages inside
btf_fixup_datasec() to follow general libbpf style and print out data
section name consistently, where it's available.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++------------------------
1 file changed, 41 insertions(+), 54 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 8c3f236c86e4..a25eb8fe7bf2 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1461,15 +1461,12 @@ static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32
return -ENOENT;
}
-static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
+static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
{
Elf_Data *symbols = obj->efile.symbols;
const char *sname;
size_t si;
- if (!name || !off)
- return -EINVAL;
-
for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
Elf64_Sym *sym = elf_sym_by_idx(obj, si);
@@ -1483,15 +1480,13 @@ static int find_elf_var_offset(const struct bpf_object *obj, const char *name, _
sname = elf_sym_str(obj, sym->st_name);
if (!sname) {
pr_warn("failed to get sym name string for var %s\n", name);
- return -EIO;
- }
- if (strcmp(name, sname) == 0) {
- *off = sym->st_value;
- return 0;
+ return ERR_PTR(-EIO);
}
+ if (strcmp(name, sname) == 0)
+ return sym;
}
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
}
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
@@ -2850,57 +2845,62 @@ static int compare_vsi_off(const void *_a, const void *_b)
static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
struct btf_type *t)
{
- __u32 size = 0, off = 0, i, vars = btf_vlen(t);
- const char *name = btf__name_by_offset(btf, t->name_off);
- const struct btf_type *t_var;
+ __u32 size = 0, i, vars = btf_vlen(t);
+ const char *sec_name = btf__name_by_offset(btf, t->name_off);
struct btf_var_secinfo *vsi;
- const struct btf_var *var;
- int ret;
+ int err;
- if (!name) {
+ if (!sec_name) {
pr_debug("No name found in string section for DATASEC kind.\n");
return -ENOENT;
}
- /* .extern datasec size and var offsets were set correctly during
- * extern collection step, so just skip straight to sorting variables
+ /* extern-backing datasecs (.ksyms, .kconfig) have their size and
+ * variable offsets set at the previous step, so we skip any fixups
+ * for such sections
*/
if (t->size)
goto sort_vars;
- ret = find_elf_sec_sz(obj, name, &size);
- if (ret || !size) {
- pr_debug("Invalid size for section %s: %u bytes\n", name, size);
+ err = find_elf_sec_sz(obj, sec_name, &size);
+ if (err || !size) {
+ pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
return -ENOENT;
}
t->size = size;
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
+ const struct btf_type *t_var;
+ struct btf_var *var;
+ const char *var_name;
+ Elf64_Sym *sym;
+
t_var = btf__type_by_id(btf, vsi->type);
if (!t_var || !btf_is_var(t_var)) {
- pr_debug("Non-VAR type seen in section %s\n", name);
+ pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
return -EINVAL;
}
var = btf_var(t_var);
- if (var->linkage == BTF_VAR_STATIC)
+ if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
continue;
- name = btf__name_by_offset(btf, t_var->name_off);
- if (!name) {
- pr_debug("No name found in string section for VAR kind\n");
+ var_name = btf__name_by_offset(btf, t_var->name_off);
+ if (!var_name) {
+ pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
+ sec_name, i);
return -ENOENT;
}
- ret = find_elf_var_offset(obj, name, &off);
- if (ret) {
- pr_debug("No offset found in symbol table for VAR %s\n",
- name);
+ sym = find_elf_var_sym(obj, var_name);
+ if (IS_ERR(sym)) {
+ pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
+ sec_name, var_name);
return -ENOENT;
}
- vsi->offset = off;
+ vsi->offset = sym->st_value;
}
sort_vars:
@@ -2908,13 +2908,16 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
return 0;
}
-static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
+static int bpf_object_fixup_btf(struct bpf_object *obj)
{
- int err = 0;
- __u32 i, n = btf__type_cnt(btf);
+ int i, n, err = 0;
+ if (!obj->btf)
+ return 0;
+
+ n = btf__type_cnt(obj->btf);
for (i = 1; i < n; i++) {
- struct btf_type *t = btf_type_by_id(btf, i);
+ struct btf_type *t = btf_type_by_id(obj->btf, i);
/* Loader needs to fix up some of the things compiler
* couldn't get its hands on while emitting BTF. This
@@ -2922,28 +2925,12 @@ static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
* the info from the ELF itself for this purpose.
*/
if (btf_is_datasec(t)) {
- err = btf_fixup_datasec(obj, btf, t);
+ err = btf_fixup_datasec(obj, obj->btf, t);
if (err)
- break;
+ return err;
}
}
- return libbpf_err(err);
-}
-
-static int bpf_object__finalize_btf(struct bpf_object *obj)
-{
- int err;
-
- if (!obj->btf)
- return 0;
-
- err = btf_finalize_data(obj, obj->btf);
- if (err) {
- pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
- return err;
- }
-
return 0;
}
@@ -7233,7 +7220,7 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
err = err ? : bpf_object__check_endianness(obj);
err = err ? : bpf_object__elf_collect(obj);
err = err ? : bpf_object__collect_externs(obj);
- err = err ? : bpf_object__finalize_btf(obj);
+ err = err ? : bpf_object_fixup_btf(obj);
err = err ? : bpf_object__init_maps(obj, opts);
err = err ? : bpf_object_init_progs(obj, opts);
err = err ? : bpf_object__collect_relos(obj);
--
2.30.2
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step
2022-10-18 3:56 ` [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step Andrii Nakryiko
@ 2022-10-18 18:47 ` sdf
2022-10-18 23:18 ` Andrii Nakryiko
0 siblings, 1 reply; 12+ messages in thread
From: sdf @ 2022-10-18 18:47 UTC (permalink / raw)
To: Andrii Nakryiko; +Cc: bpf, ast, daniel, kernel-team
On 10/17, Andrii Nakryiko wrote:
> Refactor libbpf's BTF fixup step during BPF object open phase. The only
> functional change is that we now ignore BTF_VAR_GLOBAL_EXTERN variables
> during fix up, not just BTF_VAR_STATIC ones, which shouldn't cause any
> change in behavior as there shouldn't be any extern variable in data
> sections for valid BPF object anyways.
> Otherwise it's just collapsing two functions that have no reason to be
> separate, and switching find_elf_var_offset() helper to return entire
> symbol pointer, not just its offset. This will be used by next patch to
> get ELF symbol visibility.
> While refactoring, also "normalize" debug messages inside
> btf_fixup_datasec() to follow general libbpf style and print out data
> section name consistently, where it's available.
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Stanislav Fomichev <sdf@google.com>
Left a couple of questions below.
> ---
> tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++------------------------
> 1 file changed, 41 insertions(+), 54 deletions(-)
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 8c3f236c86e4..a25eb8fe7bf2 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -1461,15 +1461,12 @@ static int find_elf_sec_sz(const struct
> bpf_object *obj, const char *name, __u32
> return -ENOENT;
> }
> -static int find_elf_var_offset(const struct bpf_object *obj, const char
> *name, __u32 *off)
> +static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const
> char *name)
> {
> Elf_Data *symbols = obj->efile.symbols;
> const char *sname;
> size_t si;
> - if (!name || !off)
> - return -EINVAL;
> -
> for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
> Elf64_Sym *sym = elf_sym_by_idx(obj, si);
> @@ -1483,15 +1480,13 @@ static int find_elf_var_offset(const struct
> bpf_object *obj, const char *name, _
> sname = elf_sym_str(obj, sym->st_name);
> if (!sname) {
> pr_warn("failed to get sym name string for var %s\n", name);
> - return -EIO;
> - }
> - if (strcmp(name, sname) == 0) {
> - *off = sym->st_value;
> - return 0;
> + return ERR_PTR(-EIO);
> }
> + if (strcmp(name, sname) == 0)
> + return sym;
> }
> - return -ENOENT;
> + return ERR_PTR(-ENOENT);
> }
> static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
> @@ -2850,57 +2845,62 @@ static int compare_vsi_off(const void *_a, const
> void *_b)
> static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
> struct btf_type *t)
> {
> - __u32 size = 0, off = 0, i, vars = btf_vlen(t);
> - const char *name = btf__name_by_offset(btf, t->name_off);
> - const struct btf_type *t_var;
> + __u32 size = 0, i, vars = btf_vlen(t);
> + const char *sec_name = btf__name_by_offset(btf, t->name_off);
> struct btf_var_secinfo *vsi;
> - const struct btf_var *var;
> - int ret;
> + int err;
> - if (!name) {
> + if (!sec_name) {
> pr_debug("No name found in string section for DATASEC kind.\n");
> return -ENOENT;
> }
> - /* .extern datasec size and var offsets were set correctly during
> - * extern collection step, so just skip straight to sorting variables
> + /* extern-backing datasecs (.ksyms, .kconfig) have their size and
> + * variable offsets set at the previous step, so we skip any fixups
> + * for such sections
> */
> if (t->size)
> goto sort_vars;
> - ret = find_elf_sec_sz(obj, name, &size);
> - if (ret || !size) {
> - pr_debug("Invalid size for section %s: %u bytes\n", name, size);
> + err = find_elf_sec_sz(obj, sec_name, &size);
> + if (err || !size) {
> + pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
nit: do we want to log err instead here? it seems like the size will be
zero on error anyway, so probably not worth logging it?
> return -ENOENT;
> }
> t->size = size;
> for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
> + const struct btf_type *t_var;
> + struct btf_var *var;
> + const char *var_name;
> + Elf64_Sym *sym;
> +
> t_var = btf__type_by_id(btf, vsi->type);
> if (!t_var || !btf_is_var(t_var)) {
> - pr_debug("Non-VAR type seen in section %s\n", name);
> + pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
> return -EINVAL;
> }
> var = btf_var(t_var);
> - if (var->linkage == BTF_VAR_STATIC)
> + if (var->linkage == BTF_VAR_STATIC || var->linkage ==
> BTF_VAR_GLOBAL_EXTERN)
> continue;
> - name = btf__name_by_offset(btf, t_var->name_off);
> - if (!name) {
> - pr_debug("No name found in string section for VAR kind\n");
> + var_name = btf__name_by_offset(btf, t_var->name_off);
> + if (!var_name) {
> + pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
> + sec_name, i);
> return -ENOENT;
> }
> - ret = find_elf_var_offset(obj, name, &off);
> - if (ret) {
> - pr_debug("No offset found in symbol table for VAR %s\n",
> - name);
> + sym = find_elf_var_sym(obj, var_name);
> + if (IS_ERR(sym)) {
> + pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
> + sec_name, var_name);
> return -ENOENT;
> }
> - vsi->offset = off;
> + vsi->offset = sym->st_value;
> }
> sort_vars:
> @@ -2908,13 +2908,16 @@ static int btf_fixup_datasec(struct bpf_object
> *obj, struct btf *btf,
> return 0;
> }
> -static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
> +static int bpf_object_fixup_btf(struct bpf_object *obj)
> {
> - int err = 0;
> - __u32 i, n = btf__type_cnt(btf);
> + int i, n, err = 0;
> + if (!obj->btf)
> + return 0;
> +
> + n = btf__type_cnt(obj->btf);
qq: why do s/__u32/int/ here? btf__type_cnt seems to be returning u32?
> for (i = 1; i < n; i++) {
> - struct btf_type *t = btf_type_by_id(btf, i);
> + struct btf_type *t = btf_type_by_id(obj->btf, i);
> /* Loader needs to fix up some of the things compiler
> * couldn't get its hands on while emitting BTF. This
> @@ -2922,28 +2925,12 @@ static int btf_finalize_data(struct bpf_object
> *obj, struct btf *btf)
> * the info from the ELF itself for this purpose.
> */
> if (btf_is_datasec(t)) {
> - err = btf_fixup_datasec(obj, btf, t);
> + err = btf_fixup_datasec(obj, obj->btf, t);
> if (err)
> - break;
> + return err;
> }
> }
> - return libbpf_err(err);
> -}
> -
> -static int bpf_object__finalize_btf(struct bpf_object *obj)
> -{
> - int err;
> -
> - if (!obj->btf)
> - return 0;
> -
> - err = btf_finalize_data(obj, obj->btf);
> - if (err) {
> - pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
> - return err;
> - }
> -
> return 0;
> }
> @@ -7233,7 +7220,7 @@ static struct bpf_object *bpf_object_open(const
> char *path, const void *obj_buf,
> err = err ? : bpf_object__check_endianness(obj);
> err = err ? : bpf_object__elf_collect(obj);
> err = err ? : bpf_object__collect_externs(obj);
> - err = err ? : bpf_object__finalize_btf(obj);
> + err = err ? : bpf_object_fixup_btf(obj);
> err = err ? : bpf_object__init_maps(obj, opts);
> err = err ? : bpf_object_init_progs(obj, opts);
> err = err ? : bpf_object__collect_relos(obj);
> --
> 2.30.2
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step
2022-10-18 18:47 ` sdf
@ 2022-10-18 23:18 ` Andrii Nakryiko
2022-10-19 0:15 ` Stanislav Fomichev
0 siblings, 1 reply; 12+ messages in thread
From: Andrii Nakryiko @ 2022-10-18 23:18 UTC (permalink / raw)
To: sdf; +Cc: Andrii Nakryiko, bpf, ast, daniel, kernel-team
On Tue, Oct 18, 2022 at 11:47 AM <sdf@google.com> wrote:
>
> On 10/17, Andrii Nakryiko wrote:
> > Refactor libbpf's BTF fixup step during BPF object open phase. The only
> > functional change is that we now ignore BTF_VAR_GLOBAL_EXTERN variables
> > during fix up, not just BTF_VAR_STATIC ones, which shouldn't cause any
> > change in behavior as there shouldn't be any extern variable in data
> > sections for valid BPF object anyways.
>
> > Otherwise it's just collapsing two functions that have no reason to be
> > separate, and switching find_elf_var_offset() helper to return entire
> > symbol pointer, not just its offset. This will be used by next patch to
> > get ELF symbol visibility.
>
> > While refactoring, also "normalize" debug messages inside
> > btf_fixup_datasec() to follow general libbpf style and print out data
> > section name consistently, where it's available.
>
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
>
> Acked-by: Stanislav Fomichev <sdf@google.com>
>
> Left a couple of questions below.
>
> > ---
> > tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++------------------------
> > 1 file changed, 41 insertions(+), 54 deletions(-)
>
[...]
> > - /* .extern datasec size and var offsets were set correctly during
> > - * extern collection step, so just skip straight to sorting variables
> > + /* extern-backing datasecs (.ksyms, .kconfig) have their size and
> > + * variable offsets set at the previous step, so we skip any fixups
> > + * for such sections
> > */
> > if (t->size)
> > goto sort_vars;
>
> > - ret = find_elf_sec_sz(obj, name, &size);
> > - if (ret || !size) {
> > - pr_debug("Invalid size for section %s: %u bytes\n", name, size);
> > + err = find_elf_sec_sz(obj, sec_name, &size);
> > + if (err || !size) {
> > + pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
>
> nit: do we want to log err instead here? it seems like the size will be
> zero on error anyway, so probably not worth logging it?
hmm.. I mostly just preserved the original message content. Error can
be zero, and size can be zero, so don't know, we can log both or none?
Section name will probably be more important in practice.
>
> > return -ENOENT;
> > }
>
> > t->size = size;
>
> > for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
> > + const struct btf_type *t_var;
> > + struct btf_var *var;
> > + const char *var_name;
> > + Elf64_Sym *sym;
> > +
[...]
> > -static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
> > +static int bpf_object_fixup_btf(struct bpf_object *obj)
> > {
> > - int err = 0;
> > - __u32 i, n = btf__type_cnt(btf);
> > + int i, n, err = 0;
>
> > + if (!obj->btf)
> > + return 0;
> > +
> > + n = btf__type_cnt(obj->btf);
>
> qq: why do s/__u32/int/ here? btf__type_cnt seems to be returning u32?
mostly to consolidate all the variables above into single short line.
BTF IDs can't be so big to not fit into int, so it's safe to use
signed int everywhere.
>
> > for (i = 1; i < n; i++) {
> > - struct btf_type *t = btf_type_by_id(btf, i);
> > + struct btf_type *t = btf_type_by_id(obj->btf, i);
>
> > /* Loader needs to fix up some of the things compiler
> > * couldn't get its hands on while emitting BTF. This
[...]
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step
2022-10-18 23:18 ` Andrii Nakryiko
@ 2022-10-19 0:15 ` Stanislav Fomichev
0 siblings, 0 replies; 12+ messages in thread
From: Stanislav Fomichev @ 2022-10-19 0:15 UTC (permalink / raw)
To: Andrii Nakryiko; +Cc: Andrii Nakryiko, bpf, ast, daniel, kernel-team
On Tue, Oct 18, 2022 at 4:19 PM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Tue, Oct 18, 2022 at 11:47 AM <sdf@google.com> wrote:
> >
> > On 10/17, Andrii Nakryiko wrote:
> > > Refactor libbpf's BTF fixup step during BPF object open phase. The only
> > > functional change is that we now ignore BTF_VAR_GLOBAL_EXTERN variables
> > > during fix up, not just BTF_VAR_STATIC ones, which shouldn't cause any
> > > change in behavior as there shouldn't be any extern variable in data
> > > sections for valid BPF object anyways.
> >
> > > Otherwise it's just collapsing two functions that have no reason to be
> > > separate, and switching find_elf_var_offset() helper to return entire
> > > symbol pointer, not just its offset. This will be used by next patch to
> > > get ELF symbol visibility.
> >
> > > While refactoring, also "normalize" debug messages inside
> > > btf_fixup_datasec() to follow general libbpf style and print out data
> > > section name consistently, where it's available.
> >
> > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> >
> > Acked-by: Stanislav Fomichev <sdf@google.com>
> >
> > Left a couple of questions below.
> >
> > > ---
> > > tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++------------------------
> > > 1 file changed, 41 insertions(+), 54 deletions(-)
> >
>
> [...]
>
> > > - /* .extern datasec size and var offsets were set correctly during
> > > - * extern collection step, so just skip straight to sorting variables
> > > + /* extern-backing datasecs (.ksyms, .kconfig) have their size and
> > > + * variable offsets set at the previous step, so we skip any fixups
> > > + * for such sections
> > > */
> > > if (t->size)
> > > goto sort_vars;
> >
> > > - ret = find_elf_sec_sz(obj, name, &size);
> > > - if (ret || !size) {
> > > - pr_debug("Invalid size for section %s: %u bytes\n", name, size);
> > > + err = find_elf_sec_sz(obj, sec_name, &size);
> > > + if (err || !size) {
> > > + pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
> >
> > nit: do we want to log err instead here? it seems like the size will be
> > zero on error anyway, so probably not worth logging it?
>
> hmm.. I mostly just preserved the original message content. Error can
> be zero, and size can be zero, so don't know, we can log both or none?
> Section name will probably be more important in practice.
Logging both is probably the easiest? Let's have them just in case,
shouldn't hurt; I'm not sure how relevant that really is..
> >
> > > return -ENOENT;
> > > }
> >
> > > t->size = size;
> >
> > > for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
> > > + const struct btf_type *t_var;
> > > + struct btf_var *var;
> > > + const char *var_name;
> > > + Elf64_Sym *sym;
> > > +
>
> [...]
>
> > > -static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
> > > +static int bpf_object_fixup_btf(struct bpf_object *obj)
> > > {
> > > - int err = 0;
> > > - __u32 i, n = btf__type_cnt(btf);
> > > + int i, n, err = 0;
> >
> > > + if (!obj->btf)
> > > + return 0;
> > > +
> > > + n = btf__type_cnt(obj->btf);
> >
> > qq: why do s/__u32/int/ here? btf__type_cnt seems to be returning u32?
>
> mostly to consolidate all the variables above into single short line.
> BTF IDs can't be so big to not fit into int, so it's safe to use
> signed int everywhere.
SG, thanks!
> >
> > > for (i = 1; i < n; i++) {
> > > - struct btf_type *t = btf_type_by_id(btf, i);
> > > + struct btf_type *t = btf_type_by_id(obj->btf, i);
> >
> > > /* Loader needs to fix up some of the things compiler
> > > * couldn't get its hands on while emitting BTF. This
>
> [...]
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH bpf-next 2/3] libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars
2022-10-18 3:56 [PATCH bpf-next 0/3] libbpf: support non-mmap()'able data sections Andrii Nakryiko
2022-10-18 3:56 ` [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step Andrii Nakryiko
@ 2022-10-18 3:56 ` Andrii Nakryiko
2022-10-18 18:52 ` sdf
2022-10-18 3:56 ` [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest Andrii Nakryiko
2 siblings, 1 reply; 12+ messages in thread
From: Andrii Nakryiko @ 2022-10-18 3:56 UTC (permalink / raw)
To: bpf, ast, daniel; +Cc: andrii, kernel-team
Teach libbpf to not add BPF_F_MMAPABLE flag unnecessarily for ARRAY maps
that are backing data sections, if such data sections don't expose any
variables to user-space. Exposed variables are those that have
STB_GLOBAL or STB_WEAK ELF binding and correspond to BTF VAR's
BTF_VAR_GLOBAL_ALLOCATED linkage.
The overall idea is that if some data section doesn't have any variable that
is exposed through BPF skeleton, then there is no reason to make such
BPF array mmapable. Making BPF array mmapable is not a free no-op
action, because BPF verifier doesn't allow users to put special objects
(such as BPF spin locks, RB tree nodes, linked list nodes, kptrs, etc;
anything that has a sensitive internal state that should not be modified
arbitrarily from user space) into mmapable arrays, as there is no way to
prevent user space from corrupting such sensitive state through direct
memory access through memory-mapped region.
By making sure that libbpf doesn't add BPF_F_MMAPABLE flag to BPF array
maps corresponding to data sections that only have static variables
(which are not supposed to be visible to user space according to libbpf
and BPF skeleton rules), users now can have spinlocks, kptrs, etc in
either default .bss/.data sections or custom .data.* sections (assuming
there are no global variables in such sections).
The only possible hiccup with this approach is the need to use global
variables during BPF static linking, even if it's not intended to be
shared with user space through BPF skeleton. To allow such scenarios,
extend libbpf's STV_HIDDEN ELF visibility attribute handling to
variables. Libbpf is already treating global hidden BPF subprograms as
static subprograms and adjusts BTF accordingly to make BPF verifier
verify such subprograms as static subprograms with preserving entire BPF
verifier state between subprog calls. This patch teaches libbpf to treat
global hidden variables as static ones and adjust BTF information
accordingly as well. This allows to share variables between multiple
object files during static linking, but still keep them internal to BPF
program and not get them exposed through BPF skeleton.
Note, that if the user has some advanced scenario where they absolutely
need BPF_F_MMAPABLE flag on .data/.bss/.rodata BPF array map despite
only having static variables, they still can achieve this by forcing it
through explicit bpf_map__set_map_flags() API.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++++++++++++++++++--------
1 file changed, 77 insertions(+), 18 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a25eb8fe7bf2..c25d7a4f5704 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1577,7 +1577,38 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
}
static int
-bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
+map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
+
+/* Internal BPF map is mmap()'able only if at least one of corresponding
+ * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
+ * variable and it's not marked as __hidden (which turns it into, effectively,
+ * a STATIC variable).
+ */
+static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
+{
+ const struct btf_type *t, *vt;
+ struct btf_var_secinfo *vsi;
+ int i, n;
+
+ if (!map->btf_value_type_id)
+ return false;
+
+ t = btf__type_by_id(obj->btf, map->btf_value_type_id);
+ if (!btf_is_datasec(t))
+ return false;
+
+ vsi = btf_var_secinfos(t);
+ for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
+ vt = btf__type_by_id(obj->btf, vsi->type);
+ if (!btf_is_var(vt))
+ continue;
+
+ if (btf_var(vt)->linkage != BTF_VAR_STATIC)
+ return true;
+ }
+
+ return false;
+}
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
@@ -1609,7 +1640,12 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
? BPF_F_RDONLY_PROG : 0;
- def->map_flags |= BPF_F_MMAPABLE;
+
+ /* failures are fine because of maps like .rodata.str1.1 */
+ (void) map_fill_btf_type_info(obj, map);
+
+ if (map_is_mmapable(obj, map))
+ def->map_flags |= BPF_F_MMAPABLE;
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
@@ -1626,9 +1662,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
return err;
}
- /* failures are fine because of maps like .rodata.str1.1 */
- (void) bpf_map_find_btf_info(obj, map);
-
if (data)
memcpy(map->mmaped, data, data_sz);
@@ -2540,7 +2573,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
fill_map_from_def(map->inner_map, &inner_def);
}
- err = bpf_map_find_btf_info(obj, map);
+ err = map_fill_btf_type_info(obj, map);
if (err)
return err;
@@ -2848,6 +2881,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
__u32 size = 0, i, vars = btf_vlen(t);
const char *sec_name = btf__name_by_offset(btf, t->name_off);
struct btf_var_secinfo *vsi;
+ bool fixup_offsets = false;
int err;
if (!sec_name) {
@@ -2855,20 +2889,33 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
return -ENOENT;
}
- /* extern-backing datasecs (.ksyms, .kconfig) have their size and
- * variable offsets set at the previous step, so we skip any fixups
- * for such sections
+ /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
+ * variable offsets set at the previous step. Further, not every
+ * extern BTF VAR has corresponding ELF symbol preserved, so we skip
+ * all fixups altogether for such sections and go straight to storting
+ * VARs within their DATASEC.
*/
- if (t->size)
+ if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
goto sort_vars;
- err = find_elf_sec_sz(obj, sec_name, &size);
- if (err || !size) {
- pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
- return -ENOENT;
- }
+ /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
+ * fix this up. But BPF static linker already fixes this up and fills
+ * all the sizes and offsets during static linking. So this step has
+ * to be optional. But the STV_HIDDEN handling is non-optional for any
+ * non-extern DATASEC, so the variable fixup loop below handles both
+ * functions at the same time, paying the cost of BTF VAR <-> ELF
+ * symbol matching just once.
+ */
+ if (t->size == 0) {
+ err = find_elf_sec_sz(obj, sec_name, &size);
+ if (err || !size) {
+ pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
+ return -ENOENT;
+ }
- t->size = size;
+ t->size = size;
+ fixup_offsets = true;
+ }
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
const struct btf_type *t_var;
@@ -2900,7 +2947,19 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
return -ENOENT;
}
- vsi->offset = sym->st_value;
+ if (fixup_offsets)
+ vsi->offset = sym->st_value;
+
+ /* if variable is a global/weak symbol, but has restricted
+ * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
+ * as static. This follows similar logic for functions (BPF
+ * subprogs) and influences libbpf's further decisions about
+ * whether to make global data BPF array maps as
+ * BPF_F_MMAPABLE.
+ */
+ if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
+ var->linkage = BTF_VAR_STATIC;
}
sort_vars:
@@ -4222,7 +4281,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
return 0;
}
-static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
+static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
{
int id;
--
2.30.2
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 2/3] libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars
2022-10-18 3:56 ` [PATCH bpf-next 2/3] libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars Andrii Nakryiko
@ 2022-10-18 18:52 ` sdf
2022-10-18 23:20 ` Andrii Nakryiko
0 siblings, 1 reply; 12+ messages in thread
From: sdf @ 2022-10-18 18:52 UTC (permalink / raw)
To: Andrii Nakryiko; +Cc: bpf, ast, daniel, kernel-team
On 10/17, Andrii Nakryiko wrote:
> Teach libbpf to not add BPF_F_MMAPABLE flag unnecessarily for ARRAY maps
> that are backing data sections, if such data sections don't expose any
> variables to user-space. Exposed variables are those that have
> STB_GLOBAL or STB_WEAK ELF binding and correspond to BTF VAR's
> BTF_VAR_GLOBAL_ALLOCATED linkage.
> The overall idea is that if some data section doesn't have any variable
> that
> is exposed through BPF skeleton, then there is no reason to make such
> BPF array mmapable. Making BPF array mmapable is not a free no-op
> action, because BPF verifier doesn't allow users to put special objects
> (such as BPF spin locks, RB tree nodes, linked list nodes, kptrs, etc;
> anything that has a sensitive internal state that should not be modified
> arbitrarily from user space) into mmapable arrays, as there is no way to
> prevent user space from corrupting such sensitive state through direct
> memory access through memory-mapped region.
> By making sure that libbpf doesn't add BPF_F_MMAPABLE flag to BPF array
> maps corresponding to data sections that only have static variables
> (which are not supposed to be visible to user space according to libbpf
> and BPF skeleton rules), users now can have spinlocks, kptrs, etc in
> either default .bss/.data sections or custom .data.* sections (assuming
> there are no global variables in such sections).
> The only possible hiccup with this approach is the need to use global
> variables during BPF static linking, even if it's not intended to be
> shared with user space through BPF skeleton. To allow such scenarios,
> extend libbpf's STV_HIDDEN ELF visibility attribute handling to
> variables. Libbpf is already treating global hidden BPF subprograms as
> static subprograms and adjusts BTF accordingly to make BPF verifier
> verify such subprograms as static subprograms with preserving entire BPF
> verifier state between subprog calls. This patch teaches libbpf to treat
> global hidden variables as static ones and adjust BTF information
> accordingly as well. This allows to share variables between multiple
> object files during static linking, but still keep them internal to BPF
> program and not get them exposed through BPF skeleton.
> Note, that if the user has some advanced scenario where they absolutely
> need BPF_F_MMAPABLE flag on .data/.bss/.rodata BPF array map despite
> only having static variables, they still can achieve this by forcing it
> through explicit bpf_map__set_map_flags() API.
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Stanislav Fomichev <sdf@google.com>
Left a nit for spelling and the same 'log err vs size' question.
> ---
> tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++++++++++++++++++--------
> 1 file changed, 77 insertions(+), 18 deletions(-)
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index a25eb8fe7bf2..c25d7a4f5704 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -1577,7 +1577,38 @@ static char *internal_map_name(struct bpf_object
> *obj, const char *real_name)
> }
> static int
> -bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
> +map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
> +
> +/* Internal BPF map is mmap()'able only if at least one of corresponding
> + * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a
> GLOBAL
> + * variable and it's not marked as __hidden (which turns it into,
> effectively,
> + * a STATIC variable).
> + */
> +static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
> +{
> + const struct btf_type *t, *vt;
> + struct btf_var_secinfo *vsi;
> + int i, n;
> +
> + if (!map->btf_value_type_id)
> + return false;
> +
> + t = btf__type_by_id(obj->btf, map->btf_value_type_id);
> + if (!btf_is_datasec(t))
> + return false;
> +
> + vsi = btf_var_secinfos(t);
> + for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
> + vt = btf__type_by_id(obj->btf, vsi->type);
> + if (!btf_is_var(vt))
> + continue;
> +
> + if (btf_var(vt)->linkage != BTF_VAR_STATIC)
> + return true;
> + }
> +
> + return false;
> +}
> static int
> bpf_object__init_internal_map(struct bpf_object *obj, enum
> libbpf_map_type type,
> @@ -1609,7 +1640,12 @@ bpf_object__init_internal_map(struct bpf_object
> *obj, enum libbpf_map_type type,
> def->max_entries = 1;
> def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
> ? BPF_F_RDONLY_PROG : 0;
> - def->map_flags |= BPF_F_MMAPABLE;
> +
> + /* failures are fine because of maps like .rodata.str1.1 */
> + (void) map_fill_btf_type_info(obj, map);
> +
> + if (map_is_mmapable(obj, map))
> + def->map_flags |= BPF_F_MMAPABLE;
> pr_debug("map '%s' (global data): at sec_idx %d, offset %zu,
> flags %x.\n",
> map->name, map->sec_idx, map->sec_offset, def->map_flags);
> @@ -1626,9 +1662,6 @@ bpf_object__init_internal_map(struct bpf_object
> *obj, enum libbpf_map_type type,
> return err;
> }
> - /* failures are fine because of maps like .rodata.str1.1 */
> - (void) bpf_map_find_btf_info(obj, map);
> -
> if (data)
> memcpy(map->mmaped, data, data_sz);
> @@ -2540,7 +2573,7 @@ static int bpf_object__init_user_btf_map(struct
> bpf_object *obj,
> fill_map_from_def(map->inner_map, &inner_def);
> }
> - err = bpf_map_find_btf_info(obj, map);
> + err = map_fill_btf_type_info(obj, map);
> if (err)
> return err;
> @@ -2848,6 +2881,7 @@ static int btf_fixup_datasec(struct bpf_object
> *obj, struct btf *btf,
> __u32 size = 0, i, vars = btf_vlen(t);
> const char *sec_name = btf__name_by_offset(btf, t->name_off);
> struct btf_var_secinfo *vsi;
> + bool fixup_offsets = false;
> int err;
> if (!sec_name) {
> @@ -2855,20 +2889,33 @@ static int btf_fixup_datasec(struct bpf_object
> *obj, struct btf *btf,
> return -ENOENT;
> }
> - /* extern-backing datasecs (.ksyms, .kconfig) have their size and
> - * variable offsets set at the previous step, so we skip any fixups
> - * for such sections
> + /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
> + * variable offsets set at the previous step. Further, not every
> + * extern BTF VAR has corresponding ELF symbol preserved, so we skip
[..]
> + * all fixups altogether for such sections and go straight to storting
> + * VARs within their DATASEC.
nit: s/storting/sorting/
> */
> - if (t->size)
> + if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC)
> == 0)
> goto sort_vars;
> - err = find_elf_sec_sz(obj, sec_name, &size);
> - if (err || !size) {
> - pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
> - return -ENOENT;
> - }
> + /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
> + * fix this up. But BPF static linker already fixes this up and fills
> + * all the sizes and offsets during static linking. So this step has
> + * to be optional. But the STV_HIDDEN handling is non-optional for any
> + * non-extern DATASEC, so the variable fixup loop below handles both
> + * functions at the same time, paying the cost of BTF VAR <-> ELF
> + * symbol matching just once.
> + */
> + if (t->size == 0) {
> + err = find_elf_sec_sz(obj, sec_name, &size);
> + if (err || !size) {
> + pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
nit: same suggestion here - let's log err instead?
> + return -ENOENT;
> + }
> - t->size = size;
> + t->size = size;
> + fixup_offsets = true;
> + }
> for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
> const struct btf_type *t_var;
> @@ -2900,7 +2947,19 @@ static int btf_fixup_datasec(struct bpf_object
> *obj, struct btf *btf,
> return -ENOENT;
> }
> - vsi->offset = sym->st_value;
> + if (fixup_offsets)
> + vsi->offset = sym->st_value;
> +
> + /* if variable is a global/weak symbol, but has restricted
> + * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
> + * as static. This follows similar logic for functions (BPF
> + * subprogs) and influences libbpf's further decisions about
> + * whether to make global data BPF array maps as
> + * BPF_F_MMAPABLE.
> + */
> + if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
> + || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
> + var->linkage = BTF_VAR_STATIC;
> }
> sort_vars:
> @@ -4222,7 +4281,7 @@ bpf_object__collect_prog_relos(struct bpf_object
> *obj, Elf64_Shdr *shdr, Elf_Dat
> return 0;
> }
> -static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map
> *map)
> +static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map
> *map)
> {
> int id;
> --
> 2.30.2
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 2/3] libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars
2022-10-18 18:52 ` sdf
@ 2022-10-18 23:20 ` Andrii Nakryiko
0 siblings, 0 replies; 12+ messages in thread
From: Andrii Nakryiko @ 2022-10-18 23:20 UTC (permalink / raw)
To: sdf; +Cc: Andrii Nakryiko, bpf, ast, daniel, kernel-team
On Tue, Oct 18, 2022 at 11:52 AM <sdf@google.com> wrote:
>
> On 10/17, Andrii Nakryiko wrote:
> > Teach libbpf to not add BPF_F_MMAPABLE flag unnecessarily for ARRAY maps
> > that are backing data sections, if such data sections don't expose any
> > variables to user-space. Exposed variables are those that have
> > STB_GLOBAL or STB_WEAK ELF binding and correspond to BTF VAR's
> > BTF_VAR_GLOBAL_ALLOCATED linkage.
>
> > The overall idea is that if some data section doesn't have any variable
> > that
> > is exposed through BPF skeleton, then there is no reason to make such
> > BPF array mmapable. Making BPF array mmapable is not a free no-op
> > action, because BPF verifier doesn't allow users to put special objects
> > (such as BPF spin locks, RB tree nodes, linked list nodes, kptrs, etc;
> > anything that has a sensitive internal state that should not be modified
> > arbitrarily from user space) into mmapable arrays, as there is no way to
> > prevent user space from corrupting such sensitive state through direct
> > memory access through memory-mapped region.
>
> > By making sure that libbpf doesn't add BPF_F_MMAPABLE flag to BPF array
> > maps corresponding to data sections that only have static variables
> > (which are not supposed to be visible to user space according to libbpf
> > and BPF skeleton rules), users now can have spinlocks, kptrs, etc in
> > either default .bss/.data sections or custom .data.* sections (assuming
> > there are no global variables in such sections).
>
> > The only possible hiccup with this approach is the need to use global
> > variables during BPF static linking, even if it's not intended to be
> > shared with user space through BPF skeleton. To allow such scenarios,
> > extend libbpf's STV_HIDDEN ELF visibility attribute handling to
> > variables. Libbpf is already treating global hidden BPF subprograms as
> > static subprograms and adjusts BTF accordingly to make BPF verifier
> > verify such subprograms as static subprograms with preserving entire BPF
> > verifier state between subprog calls. This patch teaches libbpf to treat
> > global hidden variables as static ones and adjust BTF information
> > accordingly as well. This allows to share variables between multiple
> > object files during static linking, but still keep them internal to BPF
> > program and not get them exposed through BPF skeleton.
>
> > Note, that if the user has some advanced scenario where they absolutely
> > need BPF_F_MMAPABLE flag on .data/.bss/.rodata BPF array map despite
> > only having static variables, they still can achieve this by forcing it
> > through explicit bpf_map__set_map_flags() API.
>
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
>
> Acked-by: Stanislav Fomichev <sdf@google.com>
>
> Left a nit for spelling and the same 'log err vs size' question.
>
>
> > ---
> > tools/lib/bpf/libbpf.c | 95 ++++++++++++++++++++++++++++++++++--------
> > 1 file changed, 77 insertions(+), 18 deletions(-)
>
[...]
> > - /* extern-backing datasecs (.ksyms, .kconfig) have their size and
> > - * variable offsets set at the previous step, so we skip any fixups
> > - * for such sections
> > + /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
> > + * variable offsets set at the previous step. Further, not every
> > + * extern BTF VAR has corresponding ELF symbol preserved, so we skip
>
> [..]
>
> > + * all fixups altogether for such sections and go straight to storting
> > + * VARs within their DATASEC.
>
> nit: s/storting/sorting/
eagle eye :) will fix!
>
> > */
> > - if (t->size)
> > + if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC)
> > == 0)
> > goto sort_vars;
>
> > - err = find_elf_sec_sz(obj, sec_name, &size);
> > - if (err || !size) {
> > - pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
> > - return -ENOENT;
> > - }
> > + /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
> > + * fix this up. But BPF static linker already fixes this up and fills
> > + * all the sizes and offsets during static linking. So this step has
> > + * to be optional. But the STV_HIDDEN handling is non-optional for any
> > + * non-extern DATASEC, so the variable fixup loop below handles both
> > + * functions at the same time, paying the cost of BTF VAR <-> ELF
> > + * symbol matching just once.
> > + */
> > + if (t->size == 0) {
> > + err = find_elf_sec_sz(obj, sec_name, &size);
> > + if (err || !size) {
> > + pr_debug("sec '%s': invalid size %u bytes\n", sec_name, size);
>
> nit: same suggestion here - let's log err instead?
sure
>
> > + return -ENOENT;
> > + }
>
> > - t->size = size;
> > + t->size = size;
> > + fixup_offsets = true;
> > + }
>
> > for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
> > const struct btf_type *t_var;
[...]
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest
2022-10-18 3:56 [PATCH bpf-next 0/3] libbpf: support non-mmap()'able data sections Andrii Nakryiko
2022-10-18 3:56 ` [PATCH bpf-next 1/3] libbpf: clean up and refactor BTF fixup step Andrii Nakryiko
2022-10-18 3:56 ` [PATCH bpf-next 2/3] libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars Andrii Nakryiko
@ 2022-10-18 3:56 ` Andrii Nakryiko
2022-10-18 18:55 ` sdf
2022-10-18 20:56 ` Dave Marchevsky
2 siblings, 2 replies; 12+ messages in thread
From: Andrii Nakryiko @ 2022-10-18 3:56 UTC (permalink / raw)
To: bpf, ast, daniel; +Cc: andrii, kernel-team
Add non-mmapable data section to test_skeleton selftest and make sure it
really isn't mmapable by trying to mmap() it anyways.
Also make sure that libbpf doesn't report BPF_F_MMAPABLE flag to users.
Additional, some more manual testing was performed that this feature
works as intended.
Looking at created map through bpftool shows that flags passed to kernel are
indeed zero:
$ bpftool map show
...
1782: array name .data.non_mmapa flags 0x0
key 4B value 16B max_entries 1 memlock 4096B
btf_id 1169
pids test_progs(8311)
...
Checking BTF uploaded to kernel for this map shows that zero_key and
zero_value are indeed marked as static, even though zero_key is actually
original global (but STV_HIDDEN) variable:
$ bpftool btf dump id 1169
...
[51] VAR 'zero_key' type_id=2, linkage=static
[52] VAR 'zero_value' type_id=7, linkage=static
...
[62] DATASEC '.data.non_mmapable' size=16 vlen=2
type_id=51 offset=0 size=4 (VAR 'zero_key')
type_id=52 offset=4 size=12 (VAR 'zero_value')
...
And original BTF does have zero_key marked as linkage=global:
$ bpftool btf dump file test_skeleton.bpf.linked3.o
...
[51] VAR 'zero_key' type_id=2, linkage=global
[52] VAR 'zero_value' type_id=7, linkage=static
...
[62] DATASEC '.data.non_mmapable' size=16 vlen=2
type_id=51 offset=0 size=4 (VAR 'zero_key')
type_id=52 offset=4 size=12 (VAR 'zero_value')
Bpftool didn't require any changes at all because it checks whether internal
map is mmapable already, but just to double-check generated skeleton, we
see that .data.non_mmapable neither sets mmaped pointer nor has
a corresponding field in the skeleton:
$ grep non_mmapable test_skeleton.skel.h
struct bpf_map *data_non_mmapable;
s->maps[7].name = ".data.non_mmapable";
s->maps[7].map = &obj->maps.data_non_mmapable;
But .data.read_mostly has all of those things:
$ grep read_mostly test_skeleton.skel.h
struct bpf_map *data_read_mostly;
struct test_skeleton__data_read_mostly {
int read_mostly_var;
} *data_read_mostly;
s->maps[6].name = ".data.read_mostly";
s->maps[6].map = &obj->maps.data_read_mostly;
s->maps[6].mmaped = (void **)&obj->data_read_mostly;
_Static_assert(sizeof(s->data_read_mostly->read_mostly_var) == 4, "unexpected size of 'read_mostly_var'");
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
.../testing/selftests/bpf/prog_tests/skeleton.c | 11 ++++++++++-
.../testing/selftests/bpf/progs/test_skeleton.c | 17 +++++++++++++++++
2 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
index 99dac5292b41..bc6817aee9aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include <sys/mman.h>
struct s {
int a;
@@ -22,7 +23,8 @@ void test_skeleton(void)
struct test_skeleton__kconfig *kcfg;
const void *elf_bytes;
size_t elf_bytes_sz = 0;
- int i;
+ void *m;
+ int i, fd;
skel = test_skeleton__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
@@ -124,6 +126,13 @@ void test_skeleton(void)
ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr");
+ fd = bpf_map__fd(skel->maps.data_non_mmapable);
+ m = mmap(NULL, getpagesize(), PROT_READ, MAP_SHARED, fd, 0);
+ if (!ASSERT_EQ(m, MAP_FAILED, "unexpected_mmap_success"))
+ munmap(m, getpagesize());
+
+ ASSERT_EQ(bpf_map__map_flags(skel->maps.data_non_mmapable), 0, "non_mmap_flags");
+
elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
ASSERT_OK_PTR(elf_bytes, "elf_bytes");
ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 1a4e93f6d9df..adece9f91f58 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -53,6 +53,20 @@ int out_mostly_var;
char huge_arr[16 * 1024 * 1024];
+/* non-mmapable custom .data section */
+
+struct my_value { int x, y, z; };
+
+__hidden int zero_key SEC(".data.non_mmapable");
+static struct my_value zero_value SEC(".data.non_mmapable");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct my_value);
+ __uint(max_entries, 1);
+} my_map SEC(".maps");
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
@@ -75,6 +89,9 @@ int handler(const void *ctx)
huge_arr[sizeof(huge_arr) - 1] = 123;
+ /* make sure zero_key and zero_value are not optimized out */
+ bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY);
+
return 0;
}
--
2.30.2
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest
2022-10-18 3:56 ` [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest Andrii Nakryiko
@ 2022-10-18 18:55 ` sdf
2022-10-18 20:56 ` Dave Marchevsky
1 sibling, 0 replies; 12+ messages in thread
From: sdf @ 2022-10-18 18:55 UTC (permalink / raw)
To: Andrii Nakryiko; +Cc: bpf, ast, daniel, kernel-team
On 10/17, Andrii Nakryiko wrote:
> Add non-mmapable data section to test_skeleton selftest and make sure it
> really isn't mmapable by trying to mmap() it anyways.
> Also make sure that libbpf doesn't report BPF_F_MMAPABLE flag to users.
> Additional, some more manual testing was performed that this feature
> works as intended.
> Looking at created map through bpftool shows that flags passed to kernel
> are
> indeed zero:
> $ bpftool map show
> ...
> 1782: array name .data.non_mmapa flags 0x0
> key 4B value 16B max_entries 1 memlock 4096B
> btf_id 1169
> pids test_progs(8311)
> ...
> Checking BTF uploaded to kernel for this map shows that zero_key and
> zero_value are indeed marked as static, even though zero_key is actually
> original global (but STV_HIDDEN) variable:
> $ bpftool btf dump id 1169
> ...
> [51] VAR 'zero_key' type_id=2, linkage=static
> [52] VAR 'zero_value' type_id=7, linkage=static
> ...
> [62] DATASEC '.data.non_mmapable' size=16 vlen=2
> type_id=51 offset=0 size=4 (VAR 'zero_key')
> type_id=52 offset=4 size=12 (VAR 'zero_value')
> ...
> And original BTF does have zero_key marked as linkage=global:
> $ bpftool btf dump file test_skeleton.bpf.linked3.o
> ...
> [51] VAR 'zero_key' type_id=2, linkage=global
> [52] VAR 'zero_value' type_id=7, linkage=static
> ...
> [62] DATASEC '.data.non_mmapable' size=16 vlen=2
> type_id=51 offset=0 size=4 (VAR 'zero_key')
> type_id=52 offset=4 size=12 (VAR 'zero_value')
> Bpftool didn't require any changes at all because it checks whether
> internal
> map is mmapable already, but just to double-check generated skeleton, we
> see that .data.non_mmapable neither sets mmaped pointer nor has
> a corresponding field in the skeleton:
> $ grep non_mmapable test_skeleton.skel.h
> struct bpf_map *data_non_mmapable;
> s->maps[7].name = ".data.non_mmapable";
> s->maps[7].map = &obj->maps.data_non_mmapable;
> But .data.read_mostly has all of those things:
> $ grep read_mostly test_skeleton.skel.h
> struct bpf_map *data_read_mostly;
> struct test_skeleton__data_read_mostly {
> int read_mostly_var;
> } *data_read_mostly;
> s->maps[6].name = ".data.read_mostly";
> s->maps[6].map = &obj->maps.data_read_mostly;
> s->maps[6].mmaped = (void **)&obj->data_read_mostly;
> _Static_assert(sizeof(s->data_read_mostly->read_mostly_var) ==
> 4, "unexpected size of 'read_mostly_var'");
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Stanislav Fomichev <sdf@google.com>
> ---
> .../testing/selftests/bpf/prog_tests/skeleton.c | 11 ++++++++++-
> .../testing/selftests/bpf/progs/test_skeleton.c | 17 +++++++++++++++++
> 2 files changed, 27 insertions(+), 1 deletion(-)
> diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c
> b/tools/testing/selftests/bpf/prog_tests/skeleton.c
> index 99dac5292b41..bc6817aee9aa 100644
> --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
> +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
> @@ -2,6 +2,7 @@
> /* Copyright (c) 2019 Facebook */
> #include <test_progs.h>
> +#include <sys/mman.h>
> struct s {
> int a;
> @@ -22,7 +23,8 @@ void test_skeleton(void)
> struct test_skeleton__kconfig *kcfg;
> const void *elf_bytes;
> size_t elf_bytes_sz = 0;
> - int i;
> + void *m;
> + int i, fd;
> skel = test_skeleton__open();
> if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
> @@ -124,6 +126,13 @@ void test_skeleton(void)
> ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1],
> 123, "huge_arr");
> + fd = bpf_map__fd(skel->maps.data_non_mmapable);
> + m = mmap(NULL, getpagesize(), PROT_READ, MAP_SHARED, fd, 0);
> + if (!ASSERT_EQ(m, MAP_FAILED, "unexpected_mmap_success"))
> + munmap(m, getpagesize());
> +
> + ASSERT_EQ(bpf_map__map_flags(skel->maps.data_non_mmapable),
> 0, "non_mmap_flags");
> +
> elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
> ASSERT_OK_PTR(elf_bytes, "elf_bytes");
> ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
> diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c
> b/tools/testing/selftests/bpf/progs/test_skeleton.c
> index 1a4e93f6d9df..adece9f91f58 100644
> --- a/tools/testing/selftests/bpf/progs/test_skeleton.c
> +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
> @@ -53,6 +53,20 @@ int out_mostly_var;
> char huge_arr[16 * 1024 * 1024];
> +/* non-mmapable custom .data section */
> +
> +struct my_value { int x, y, z; };
> +
> +__hidden int zero_key SEC(".data.non_mmapable");
> +static struct my_value zero_value SEC(".data.non_mmapable");
> +
> +struct {
> + __uint(type, BPF_MAP_TYPE_ARRAY);
> + __type(key, int);
> + __type(value, struct my_value);
> + __uint(max_entries, 1);
> +} my_map SEC(".maps");
> +
> SEC("raw_tp/sys_enter")
> int handler(const void *ctx)
> {
> @@ -75,6 +89,9 @@ int handler(const void *ctx)
> huge_arr[sizeof(huge_arr) - 1] = 123;
> + /* make sure zero_key and zero_value are not optimized out */
> + bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY);
> +
> return 0;
> }
> --
> 2.30.2
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest
2022-10-18 3:56 ` [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest Andrii Nakryiko
2022-10-18 18:55 ` sdf
@ 2022-10-18 20:56 ` Dave Marchevsky
2022-10-18 21:02 ` Dave Marchevsky
1 sibling, 1 reply; 12+ messages in thread
From: Dave Marchevsky @ 2022-10-18 20:56 UTC (permalink / raw)
To: Andrii Nakryiko, bpf, ast, daniel; +Cc: kernel-team
On 10/17/22 11:56 PM, Andrii Nakryiko wrote:
> Add non-mmapable data section to test_skeleton selftest and make sure it
> really isn't mmapable by trying to mmap() it anyways.
>
> Also make sure that libbpf doesn't report BPF_F_MMAPABLE flag to users.
>
> Additional, some more manual testing was performed that this feature
> works as intended.
>
> Looking at created map through bpftool shows that flags passed to kernel are
> indeed zero:
>
> $ bpftool map show
> ...
> 1782: array name .data.non_mmapa flags 0x0
> key 4B value 16B max_entries 1 memlock 4096B
> btf_id 1169
> pids test_progs(8311)
> ...
>
> Checking BTF uploaded to kernel for this map shows that zero_key and
> zero_value are indeed marked as static, even though zero_key is actually
> original global (but STV_HIDDEN) variable:
>
> $ bpftool btf dump id 1169
> ...
> [51] VAR 'zero_key' type_id=2, linkage=static
> [52] VAR 'zero_value' type_id=7, linkage=static
> ...
> [62] DATASEC '.data.non_mmapable' size=16 vlen=2
> type_id=51 offset=0 size=4 (VAR 'zero_key')
> type_id=52 offset=4 size=12 (VAR 'zero_value')
> ...
>
> And original BTF does have zero_key marked as linkage=global:
>
> $ bpftool btf dump file test_skeleton.bpf.linked3.o
> ...
> [51] VAR 'zero_key' type_id=2, linkage=global
> [52] VAR 'zero_value' type_id=7, linkage=static
> ...
> [62] DATASEC '.data.non_mmapable' size=16 vlen=2
> type_id=51 offset=0 size=4 (VAR 'zero_key')
> type_id=52 offset=4 size=12 (VAR 'zero_value')
>
> Bpftool didn't require any changes at all because it checks whether internal
> map is mmapable already, but just to double-check generated skeleton, we
> see that .data.non_mmapable neither sets mmaped pointer nor has
> a corresponding field in the skeleton:
>
> $ grep non_mmapable test_skeleton.skel.h
> struct bpf_map *data_non_mmapable;
> s->maps[7].name = ".data.non_mmapable";
> s->maps[7].map = &obj->maps.data_non_mmapable;
>
> But .data.read_mostly has all of those things:
>
> $ grep read_mostly test_skeleton.skel.h
> struct bpf_map *data_read_mostly;
> struct test_skeleton__data_read_mostly {
> int read_mostly_var;
> } *data_read_mostly;
> s->maps[6].name = ".data.read_mostly";
> s->maps[6].map = &obj->maps.data_read_mostly;
> s->maps[6].mmaped = (void **)&obj->data_read_mostly;
> _Static_assert(sizeof(s->data_read_mostly->read_mostly_var) == 4, "unexpected size of 'read_mostly_var'");
>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH bpf-next 3/3] libbpf: add non-mmapable data section selftest
2022-10-18 20:56 ` Dave Marchevsky
@ 2022-10-18 21:02 ` Dave Marchevsky
0 siblings, 0 replies; 12+ messages in thread
From: Dave Marchevsky @ 2022-10-18 21:02 UTC (permalink / raw)
To: Andrii Nakryiko, bpf, ast, daniel; +Cc: kernel-team
On 10/18/22 4:56 PM, Dave Marchevsky wrote:
> On 10/17/22 11:56 PM, Andrii Nakryiko wrote:
>> Add non-mmapable data section to test_skeleton selftest and make sure it
>> really isn't mmapable by trying to mmap() it anyways.
>>
>> Also make sure that libbpf doesn't report BPF_F_MMAPABLE flag to users.
>>
>> Additional, some more manual testing was performed that this feature
>> works as intended.
>>
>> Looking at created map through bpftool shows that flags passed to kernel are
>> indeed zero:
>>
>> $ bpftool map show
>> ...
>> 1782: array name .data.non_mmapa flags 0x0
>> key 4B value 16B max_entries 1 memlock 4096B
>> btf_id 1169
>> pids test_progs(8311)
>> ...
>>
>> Checking BTF uploaded to kernel for this map shows that zero_key and
>> zero_value are indeed marked as static, even though zero_key is actually
>> original global (but STV_HIDDEN) variable:
>>
>> $ bpftool btf dump id 1169
>> ...
>> [51] VAR 'zero_key' type_id=2, linkage=static
>> [52] VAR 'zero_value' type_id=7, linkage=static
>> ...
>> [62] DATASEC '.data.non_mmapable' size=16 vlen=2
>> type_id=51 offset=0 size=4 (VAR 'zero_key')
>> type_id=52 offset=4 size=12 (VAR 'zero_value')
>> ...
>>
>> And original BTF does have zero_key marked as linkage=global:
>>
>> $ bpftool btf dump file test_skeleton.bpf.linked3.o
>> ...
>> [51] VAR 'zero_key' type_id=2, linkage=global
>> [52] VAR 'zero_value' type_id=7, linkage=static
>> ...
>> [62] DATASEC '.data.non_mmapable' size=16 vlen=2
>> type_id=51 offset=0 size=4 (VAR 'zero_key')
>> type_id=52 offset=4 size=12 (VAR 'zero_value')
>>
>> Bpftool didn't require any changes at all because it checks whether internal
>> map is mmapable already, but just to double-check generated skeleton, we
>> see that .data.non_mmapable neither sets mmaped pointer nor has
>> a corresponding field in the skeleton:
>>
>> $ grep non_mmapable test_skeleton.skel.h
>> struct bpf_map *data_non_mmapable;
>> s->maps[7].name = ".data.non_mmapable";
>> s->maps[7].map = &obj->maps.data_non_mmapable;
>>
>> But .data.read_mostly has all of those things:
>>
>> $ grep read_mostly test_skeleton.skel.h
>> struct bpf_map *data_read_mostly;
>> struct test_skeleton__data_read_mostly {
>> int read_mostly_var;
>> } *data_read_mostly;
>> s->maps[6].name = ".data.read_mostly";
>> s->maps[6].map = &obj->maps.data_read_mostly;
>> s->maps[6].mmaped = (void **)&obj->data_read_mostly;
>> _Static_assert(sizeof(s->data_read_mostly->read_mostly_var) == 4, "unexpected size of 'read_mostly_var'");
>>
>> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
>> ---
>
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Bah, I meant
Acked-by: Dave Marchevsky <davemarchevsky@fb.com>
^ permalink raw reply [flat|nested] 12+ messages in thread