* [PATCH bpf-next 1/2] bpf: align syscall writeback behavior with caller-declared size
2026-05-15 7:15 [PATCH bpf-next 0/2] bpf: Align syscall writeback behavior with user-declared size Yuyang Huang
@ 2026-05-15 7:15 ` Yuyang Huang
2026-05-15 8:14 ` bot+bpf-ci
2026-05-15 7:15 ` [PATCH bpf-next 2/2] selftests/bpf: Add verification for BPF_PROG_QUERY attr size boundaries Yuyang Huang
1 sibling, 1 reply; 4+ messages in thread
From: Yuyang Huang @ 2026-05-15 7:15 UTC (permalink / raw)
To: Yuyang Huang
Cc: David S. Miller, Alexei Starovoitov, Andrew Lunn, Andrii Nakryiko,
Daniel Borkmann, Eduard Zingerman, Eric Dumazet, Jakub Kicinski,
Jiri Olsa, John Fastabend, Kumar Kartikeya Dwivedi,
Martin KaFai Lau, Nikolay Aleksandrov, Paolo Abeni, Shuah Khan,
Simon Horman, Song Liu, Stanislav Fomichev, Yonghong Song, bpf,
linux-kernel, linux-kselftest, netdev, Maciej Żenczykowski,
Lorenzo Colitti
The bpf(cmd, attr, size) syscall copies up to 'size' bytes on input, but
several commands write outputs back to userspace unconditionally. Because
copy_to_user() does not fault on adjacent mapped memory, a short userspace
buffer results in out-of-bounds writes, potentially overwriting adjacent
userspace memory.
Address this by introducing two policies based on field type:
1) Mandatory fields (original ABI): Return -EINVAL in __sys_bpf() if the
buffer size does not cover them. This hardens the syscall front-gate
for the following commands:
- BPF_PROG_QUERY (min size: query.prog_cnt)
- BPF_PROG_TEST_RUN (min size: test.duration)
- BPF_*_GET_NEXT_ID (min size: next_id)
- BPF_OBJ_GET_INFO_BY_FD (min size: info.info_len)
- BPF_TASK_FD_QUERY (minimum size: task_fd_query.probe_addr)
- BPF_MAP_*_BATCH (min size: batch.flags)
2) Optional fields (later revisions): Skip writeback if the buffer size
does not cover the field. This is applied to BPF_PROG_QUERY's
'query.revision'. Older userspace passing a smaller size (e.g., 40
bytes) will have the write safely skipped. This size-gating pattern
mirrors the existing precedent used for 'log_true_size' (verifier.c)
and 'btf_log_true_size' (btf.c).
To support this, the user-declared 'size' is plumbed from __sys_bpf()
through the query dispatchers (cgroup, tcx, netkit) to the underlying
writeback helpers in cgroup.c and mprog.c.
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: Yuyang Huang <yuyanghuang@google.com>
Link: https://lore.kernel.org/r/CANP3RGfZTXM_u=E_atoomPZXutoQJ02nOMkCCR-YBZbOm2suWA@mail.gmail.com
---
drivers/net/netkit.c | 5 +++--
include/linux/bpf-cgroup.h | 5 +++--
include/linux/bpf_mprog.h | 4 ++--
include/net/netkit.h | 6 ++++--
include/net/tcx.h | 5 +++--
kernel/bpf/cgroup.c | 13 +++++++------
kernel/bpf/mprog.c | 5 +++--
kernel/bpf/syscall.c | 34 +++++++++++++++++++++++++++++-----
kernel/bpf/tcx.c | 5 +++--
9 files changed, 57 insertions(+), 25 deletions(-)
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 5e2eecc3165d..680607d6e039 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -813,7 +813,8 @@ int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog)
return ret;
}
-int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
+int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ u32 uattr_size)
{
struct net_device *dev;
int ret;
@@ -826,7 +827,7 @@ int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
ret = PTR_ERR(dev);
goto out;
}
- ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false));
+ ret = bpf_mprog_query(attr, uattr, uattr_size, netkit_entry_fetch(dev, false));
out:
rtnl_unlock();
return ret;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index b2e79c2b41d5..4d0cc65976a1 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -421,7 +421,7 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr,
enum bpf_prog_type ptype);
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr);
+ union bpf_attr __user *uattr, u32 uattr_size);
const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -452,7 +452,8 @@ static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
}
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr,
+ u32 uattr_size)
{
return -EINVAL;
}
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
index 0b9f4caeeb0a..fa479ace854a 100644
--- a/include/linux/bpf_mprog.h
+++ b/include/linux/bpf_mprog.h
@@ -72,7 +72,7 @@
* // bpf_mprog user-side lock
* // fetch active @entry from attach location
* [...]
- * ret = bpf_mprog_query(attr, uattr, entry);
+ * ret = bpf_mprog_query(attr, uattr, uattr_size, entry);
* // bpf_mprog user-side unlock
*
* Data/fast path:
@@ -329,7 +329,7 @@ int bpf_mprog_detach(struct bpf_mprog_entry *entry,
u32 flags, u32 id_or_fd, u64 revision);
int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
- struct bpf_mprog_entry *entry);
+ u32 uattr_size, struct bpf_mprog_entry *entry);
static inline bool bpf_mprog_supported(enum bpf_prog_type type)
{
diff --git a/include/net/netkit.h b/include/net/netkit.h
index 9ec0163739f4..fe209d1f9a64 100644
--- a/include/net/netkit.h
+++ b/include/net/netkit.h
@@ -9,7 +9,8 @@
int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
-int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ u32 uattr_size);
INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev));
#else
static inline int netkit_prog_attach(const union bpf_attr *attr,
@@ -31,7 +32,8 @@ static inline int netkit_prog_detach(const union bpf_attr *attr,
}
static inline int netkit_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr,
+ u32 uattr_size)
{
return -EINVAL;
}
diff --git a/include/net/tcx.h b/include/net/tcx.h
index 23a61af13547..610626b39676 100644
--- a/include/net/tcx.h
+++ b/include/net/tcx.h
@@ -166,7 +166,7 @@ int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
void tcx_uninstall(struct net_device *dev, bool ingress);
int tcx_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr);
+ union bpf_attr __user *uattr, u32 uattr_size);
static inline void dev_tcx_uninstall(struct net_device *dev)
{
@@ -194,7 +194,8 @@ static inline int tcx_prog_detach(const union bpf_attr *attr,
}
static inline int tcx_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr,
+ u32 uattr_size)
{
return -EINVAL;
}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 876f6a81a9b6..2c2bdaa86aa7 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1208,7 +1208,7 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
/* Must be called with cgroup_mutex held to avoid races. */
static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr, u32 uattr_size)
{
__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
@@ -1259,7 +1259,8 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
return -EFAULT;
if (!effective_query && from_atype == to_atype)
revision = cgrp->bpf.revisions[from_atype];
- if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
+ if (uattr_size >= offsetofend(union bpf_attr, query.revision) &&
+ copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
return -EFAULT;
if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
/* return early if user requested only program count + flags */
@@ -1312,12 +1313,12 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
}
static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr, u32 uattr_size)
{
int ret;
cgroup_lock();
- ret = __cgroup_bpf_query(cgrp, attr, uattr);
+ ret = __cgroup_bpf_query(cgrp, attr, uattr, uattr_size);
cgroup_unlock();
return ret;
}
@@ -1520,7 +1521,7 @@ int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
}
int cgroup_bpf_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr, u32 uattr_size)
{
struct cgroup *cgrp;
int ret;
@@ -1529,7 +1530,7 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
if (IS_ERR(cgrp))
return PTR_ERR(cgrp);
- ret = cgroup_bpf_query(cgrp, attr, uattr);
+ ret = cgroup_bpf_query(cgrp, attr, uattr, uattr_size);
cgroup_put(cgrp);
return ret;
diff --git a/kernel/bpf/mprog.c b/kernel/bpf/mprog.c
index 1394168062e8..822d9c4c0db4 100644
--- a/kernel/bpf/mprog.c
+++ b/kernel/bpf/mprog.c
@@ -393,7 +393,7 @@ int bpf_mprog_detach(struct bpf_mprog_entry *entry,
}
int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
- struct bpf_mprog_entry *entry)
+ u32 uattr_size, struct bpf_mprog_entry *entry)
{
u32 __user *uprog_flags, *ulink_flags;
u32 __user *uprog_id, *ulink_id;
@@ -413,7 +413,8 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
}
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
return -EFAULT;
- if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
+ if (uattr_size >= offsetofend(union bpf_attr, query.revision) &&
+ copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
return -EFAULT;
if (copy_to_user(&uattr->query.count, &count, sizeof(count)))
return -EFAULT;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a3c0214ca934..a46b0510d9e2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -4654,7 +4654,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
#define BPF_PROG_QUERY_LAST_FIELD query.revision
static int bpf_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ union bpf_attr __user *uattr, u32 uattr_size)
{
if (!bpf_net_capable())
return -EPERM;
@@ -4693,7 +4693,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_GETSOCKOPT:
case BPF_CGROUP_SETSOCKOPT:
case BPF_LSM_CGROUP:
- return cgroup_bpf_prog_query(attr, uattr);
+ return cgroup_bpf_prog_query(attr, uattr, uattr_size);
case BPF_LIRC_MODE2:
return lirc_prog_query(attr, uattr);
case BPF_FLOW_DISSECTOR:
@@ -4706,10 +4706,10 @@ static int bpf_prog_query(const union bpf_attr *attr,
return sock_map_bpf_prog_query(attr, uattr);
case BPF_TCX_INGRESS:
case BPF_TCX_EGRESS:
- return tcx_prog_query(attr, uattr);
+ return tcx_prog_query(attr, uattr, uattr_size);
case BPF_NETKIT_PRIMARY:
case BPF_NETKIT_PEER:
- return netkit_prog_query(attr, uattr);
+ return netkit_prog_query(attr, uattr, uattr_size);
default:
return -EINVAL;
}
@@ -6260,20 +6260,30 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
err = bpf_prog_detach(&attr);
break;
case BPF_PROG_QUERY:
- err = bpf_prog_query(&attr, uattr.user);
+ if (size < offsetofend(union bpf_attr, query.prog_cnt))
+ return -EINVAL;
+ err = bpf_prog_query(&attr, uattr.user, size);
break;
case BPF_PROG_TEST_RUN:
+ if (size < offsetofend(union bpf_attr, test.duration))
+ return -EINVAL;
err = bpf_prog_test_run(&attr, uattr.user);
break;
case BPF_PROG_GET_NEXT_ID:
+ if (size < offsetofend(union bpf_attr, next_id))
+ return -EINVAL;
err = bpf_obj_get_next_id(&attr, uattr.user,
&prog_idr, &prog_idr_lock);
break;
case BPF_MAP_GET_NEXT_ID:
+ if (size < offsetofend(union bpf_attr, next_id))
+ return -EINVAL;
err = bpf_obj_get_next_id(&attr, uattr.user,
&map_idr, &map_idr_lock);
break;
case BPF_BTF_GET_NEXT_ID:
+ if (size < offsetofend(union bpf_attr, next_id))
+ return -EINVAL;
err = bpf_obj_get_next_id(&attr, uattr.user,
&btf_idr, &btf_idr_lock);
break;
@@ -6284,6 +6294,8 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
err = bpf_map_get_fd_by_id(&attr);
break;
case BPF_OBJ_GET_INFO_BY_FD:
+ if (size < offsetofend(union bpf_attr, info.info_len))
+ return -EINVAL;
err = bpf_obj_get_info_by_fd(&attr, uattr.user);
break;
case BPF_RAW_TRACEPOINT_OPEN:
@@ -6296,22 +6308,32 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
err = bpf_btf_get_fd_by_id(&attr);
break;
case BPF_TASK_FD_QUERY:
+ if (size < offsetofend(union bpf_attr, task_fd_query.probe_addr))
+ return -EINVAL;
err = bpf_task_fd_query(&attr, uattr.user);
break;
case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
err = map_lookup_and_delete_elem(&attr);
break;
case BPF_MAP_LOOKUP_BATCH:
+ if (size < offsetofend(union bpf_attr, batch.flags))
+ return -EINVAL;
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
break;
case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
+ if (size < offsetofend(union bpf_attr, batch.flags))
+ return -EINVAL;
err = bpf_map_do_batch(&attr, uattr.user,
BPF_MAP_LOOKUP_AND_DELETE_BATCH);
break;
case BPF_MAP_UPDATE_BATCH:
+ if (size < offsetofend(union bpf_attr, batch.flags))
+ return -EINVAL;
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
break;
case BPF_MAP_DELETE_BATCH:
+ if (size < offsetofend(union bpf_attr, batch.flags))
+ return -EINVAL;
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
break;
case BPF_LINK_CREATE:
@@ -6324,6 +6346,8 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
err = bpf_link_get_fd_by_id(&attr);
break;
case BPF_LINK_GET_NEXT_ID:
+ if (size < offsetofend(union bpf_attr, next_id))
+ return -EINVAL;
err = bpf_obj_get_next_id(&attr, uattr.user,
&link_idr, &link_idr_lock);
break;
diff --git a/kernel/bpf/tcx.c b/kernel/bpf/tcx.c
index 02db0113b8e7..2a91f6075511 100644
--- a/kernel/bpf/tcx.c
+++ b/kernel/bpf/tcx.c
@@ -119,7 +119,8 @@ void tcx_uninstall(struct net_device *dev, bool ingress)
tcx_entry_free(entry);
}
-int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
+int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ u32 uattr_size)
{
bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
struct net *net = current->nsproxy->net_ns;
@@ -132,7 +133,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
ret = -ENODEV;
goto out;
}
- ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
+ ret = bpf_mprog_query(attr, uattr, uattr_size, tcx_entry_fetch(dev, ingress));
out:
rtnl_unlock();
return ret;
--
2.54.0.563.g4f69b47b94-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH bpf-next 2/2] selftests/bpf: Add verification for BPF_PROG_QUERY attr size boundaries
2026-05-15 7:15 [PATCH bpf-next 0/2] bpf: Align syscall writeback behavior with user-declared size Yuyang Huang
2026-05-15 7:15 ` [PATCH bpf-next 1/2] bpf: align syscall writeback behavior with caller-declared size Yuyang Huang
@ 2026-05-15 7:15 ` Yuyang Huang
1 sibling, 0 replies; 4+ messages in thread
From: Yuyang Huang @ 2026-05-15 7:15 UTC (permalink / raw)
To: Yuyang Huang
Cc: David S. Miller, Alexei Starovoitov, Andrew Lunn, Andrii Nakryiko,
Daniel Borkmann, Eduard Zingerman, Eric Dumazet, Jakub Kicinski,
Jiri Olsa, John Fastabend, Kumar Kartikeya Dwivedi,
Martin KaFai Lau, Nikolay Aleksandrov, Paolo Abeni, Shuah Khan,
Simon Horman, Song Liu, Stanislav Fomichev, Yonghong Song, bpf,
linux-kernel, linux-kselftest, netdev, Maciej Żenczykowski,
Lorenzo Colitti
Add a new selftest to verify that the BPF syscall (specifically
BPF_PROG_QUERY) correctly respects the caller-declared attribute
size boundaries:
- Optional output fields (like query.revision) are not written if the
caller-declared size ends before them.
- Calls with a size below the mandatory minimum return -EINVAL.
- Full-size calls still receive the optional fields normally.
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: Yuyang Huang <yuyanghuang@google.com>
Link: https://lore.kernel.org/r/CANP3RGfZTXM_u=E_atoomPZXutoQJ02nOMkCCR-YBZbOm2suWA@mail.gmail.com
Tested with virtme-ng:
# ./test_progs -t bpf_attr_size
#17/1 bpf_attr_size/query_size_boundaries:OK
#17/2 bpf_attr_size/query_mandatory_too_short_einval:OK
#17 bpf_attr_size:OK
Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED
---
.../selftests/bpf/prog_tests/bpf_attr_size.c | 84 +++++++++++++++++++
1 file changed, 84 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_attr_size.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_attr_size.c b/tools/testing/selftests/bpf/prog_tests/bpf_attr_size.c
new file mode 100644
index 000000000000..65fd717782de
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_attr_size.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Google LLC */
+#include <linux/bpf.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include "test_tc_link.skel.h"
+#include "tc_helpers.h"
+
+#define OLD_QUERY_SIZE offsetofend(union bpf_attr, query.prog_cnt)
+#define FULL_QUERY_SIZE offsetofend(union bpf_attr, query.revision)
+#define SHORT_QUERY_SIZE offsetofend(union bpf_attr, query.attach_type)
+
+static void test_query_size_boundaries(void)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ struct test_tc_link *skel;
+ union bpf_attr attr;
+ int fd, err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.tc1);
+
+ err = bpf_prog_attach_opts(fd, loopback, BPF_TCX_INGRESS, &opta);
+ if (!ASSERT_OK(err, "prog_attach"))
+ goto cleanup;
+
+ /* 1. Old size: revision must not be written */
+ memset(&attr, 0, sizeof(attr));
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = BPF_TCX_INGRESS;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, OLD_QUERY_SIZE);
+ if (!ASSERT_OK(err, "query_old_size"))
+ goto detach;
+
+ ASSERT_EQ(attr.query.prog_cnt, 1, "prog_cnt_written");
+ ASSERT_EQ(attr.query.revision, 0, "revision_not_written");
+
+ /* 2. Full size: revision must be written normally */
+ memset(&attr, 0, sizeof(attr));
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = BPF_TCX_INGRESS;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, FULL_QUERY_SIZE);
+ if (!ASSERT_OK(err, "query_full_size"))
+ goto detach;
+
+ ASSERT_EQ(attr.query.prog_cnt, 1, "prog_cnt_written");
+ ASSERT_GT(attr.query.revision, 0, "revision_written");
+
+detach:
+ err = bpf_prog_detach_opts(fd, loopback, BPF_TCX_INGRESS, &optd);
+ ASSERT_OK(err, "prog_detach");
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+static void test_query_mandatory_too_short_einval(void)
+{
+ union bpf_attr attr;
+ int err;
+
+ /* Below minimum size: must return -EINVAL */
+ memset(&attr, 0, sizeof(attr));
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = BPF_TCX_INGRESS;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, SHORT_QUERY_SIZE);
+ ASSERT_EQ(err, -1, "query_too_short_fails");
+ ASSERT_EQ(errno, EINVAL, "query_too_short_einval");
+}
+
+void test_bpf_attr_size(void)
+{
+ if (test__start_subtest("query_size_boundaries"))
+ test_query_size_boundaries();
+ if (test__start_subtest("query_mandatory_too_short_einval"))
+ test_query_mandatory_too_short_einval();
+}
--
2.54.0.563.g4f69b47b94-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread