* [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly
@ 2022-11-11 8:07 Hou Tao
2022-11-11 17:43 ` sdf
2022-11-11 23:02 ` Yonghong Song
0 siblings, 2 replies; 5+ messages in thread
From: Hou Tao @ 2022-11-11 8:07 UTC (permalink / raw)
To: bpf, Yonghong Song
Cc: Martin KaFai Lau, Andrii Nakryiko, Song Liu, Hao Luo,
Alexei Starovoitov, Daniel Borkmann, KP Singh, Stanislav Fomichev,
Jiri Olsa, John Fastabend, Xu Kuohai, houtao1
From: Hou Tao <houtao1@huawei.com>
Currently bpf_map_do_batch() first invokes fdget(batch.map_fd) to get
the target map file, then it invokes generic_map_update_batch() to do
batch update. generic_map_update_batch() will get the target map file
by using fdget(batch.map_fd) again and pass it to
bpf_map_update_value().
The problem is map file returned by the second fdget() may be NULL or a
totally different file compared by map file in bpf_map_do_batch(). The
reason is that the first fdget() only guarantees the liveness of struct
file instead of file descriptor and the file description may be released
by concurrent close() through pick_file().
It doesn't incur any problem as for now, because maps with batch update
support don't use map file in .map_fd_get_ptr() ops. But it is better to
fix the access of a potentially invalid map file.
using __bpf_map_get() again in generic_map_update_batch() can not fix
the problem, because batch.map_fd may be closed and reopened, and the
returned map file may be different with map file got in
bpf_map_do_batch(), so just passing the map file directly to
.map_update_batch() in bpf_map_do_batch().
Signed-off-by: Hou Tao <houtao1@huawei.com>
---
v2:
* rewrite the commit message to explain the problem and the reasoning.
v1: https://lore.kernel.org/bpf/20221107075537.1445644-1-houtao@huaweicloud.com
include/linux/bpf.h | 5 +++--
kernel/bpf/syscall.c | 31 ++++++++++++++++++-------------
2 files changed, 21 insertions(+), 15 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 798aec816970..20cfe88ee6df 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -85,7 +85,8 @@ struct bpf_map_ops {
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
- int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
@@ -1776,7 +1777,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
-int generic_map_update_batch(struct bpf_map *map,
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 85532d301124..cb8a87277bf8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -175,8 +175,8 @@ static void maybe_wait_bpf_programs(struct bpf_map *map)
synchronize_rcu();
}
-static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
- void *value, __u64 flags)
+static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
+ void *key, void *value, __u64 flags)
{
int err;
@@ -190,7 +190,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
map->map_type == BPF_MAP_TYPE_SOCKMAP) {
return sock_map_update_elem_sys(map, key, value, flags);
} else if (IS_FD_PROG_ARRAY(map)) {
- return bpf_fd_array_map_update_elem(map, f.file, key, value,
+ return bpf_fd_array_map_update_elem(map, map_file, key, value,
flags);
}
@@ -205,12 +205,12 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
flags);
} else if (IS_FD_ARRAY(map)) {
rcu_read_lock();
- err = bpf_fd_array_map_update_elem(map, f.file, key, value,
+ err = bpf_fd_array_map_update_elem(map, map_file, key, value,
flags);
rcu_read_unlock();
} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
rcu_read_lock();
- err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
+ err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
flags);
rcu_read_unlock();
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
@@ -1390,7 +1390,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
goto free_key;
}
- err = bpf_map_update_value(map, f, key, value, attr->flags);
+ err = bpf_map_update_value(map, f.file, key, value, attr->flags);
kvfree(value);
free_key:
@@ -1576,16 +1576,14 @@ int generic_map_delete_batch(struct bpf_map *map,
return err;
}
-int generic_map_update_batch(struct bpf_map *map,
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
void __user *values = u64_to_user_ptr(attr->batch.values);
void __user *keys = u64_to_user_ptr(attr->batch.keys);
u32 value_size, cp, max_count;
- int ufd = attr->batch.map_fd;
void *key, *value;
- struct fd f;
int err = 0;
if (attr->batch.elem_flags & ~BPF_F_LOCK)
@@ -1612,7 +1610,6 @@ int generic_map_update_batch(struct bpf_map *map,
return -ENOMEM;
}
- f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
for (cp = 0; cp < max_count; cp++) {
err = -EFAULT;
if (copy_from_user(key, keys + cp * map->key_size,
@@ -1620,7 +1617,7 @@ int generic_map_update_batch(struct bpf_map *map,
copy_from_user(value, values + cp * value_size, value_size))
break;
- err = bpf_map_update_value(map, f, key, value,
+ err = bpf_map_update_value(map, map_file, key, value,
attr->batch.elem_flags);
if (err)
@@ -1633,7 +1630,6 @@ int generic_map_update_batch(struct bpf_map *map,
kvfree(value);
kvfree(key);
- fdput(f);
return err;
}
@@ -4435,6 +4431,15 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
err = fn(map, attr, uattr); \
} while (0)
+#define BPF_DO_BATCH_WITH_FILE(fn) \
+ do { \
+ if (!fn) { \
+ err = -ENOTSUPP; \
+ goto err_put; \
+ } \
+ err = fn(map, f.file, attr, uattr); \
+ } while (0)
+
static int bpf_map_do_batch(const union bpf_attr *attr,
union bpf_attr __user *uattr,
int cmd)
@@ -4470,7 +4475,7 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
else if (cmd == BPF_MAP_UPDATE_BATCH)
- BPF_DO_BATCH(map->ops->map_update_batch);
+ BPF_DO_BATCH_WITH_FILE(map->ops->map_update_batch);
else
BPF_DO_BATCH(map->ops->map_delete_batch);
err_put:
--
2.29.2
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly
2022-11-11 8:07 [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly Hou Tao
@ 2022-11-11 17:43 ` sdf
2022-11-14 17:49 ` Daniel Bokmann
2022-11-11 23:02 ` Yonghong Song
1 sibling, 1 reply; 5+ messages in thread
From: sdf @ 2022-11-11 17:43 UTC (permalink / raw)
To: Hou Tao
Cc: bpf, Yonghong Song, Martin KaFai Lau, Andrii Nakryiko, Song Liu,
Hao Luo, Alexei Starovoitov, Daniel Borkmann, KP Singh, Jiri Olsa,
John Fastabend, Xu Kuohai, houtao1
On 11/11, Hou Tao wrote:
> From: Hou Tao <houtao1@huawei.com>
> Currently bpf_map_do_batch() first invokes fdget(batch.map_fd) to get
> the target map file, then it invokes generic_map_update_batch() to do
> batch update. generic_map_update_batch() will get the target map file
> by using fdget(batch.map_fd) again and pass it to
> bpf_map_update_value().
> The problem is map file returned by the second fdget() may be NULL or a
> totally different file compared by map file in bpf_map_do_batch(). The
> reason is that the first fdget() only guarantees the liveness of struct
> file instead of file descriptor and the file description may be released
> by concurrent close() through pick_file().
> It doesn't incur any problem as for now, because maps with batch update
> support don't use map file in .map_fd_get_ptr() ops. But it is better to
> fix the access of a potentially invalid map file.
> using __bpf_map_get() again in generic_map_update_batch() can not fix
> the problem, because batch.map_fd may be closed and reopened, and the
> returned map file may be different with map file got in
> bpf_map_do_batch(), so just passing the map file directly to
> .map_update_batch() in bpf_map_do_batch().
> Signed-off-by: Hou Tao <houtao1@huawei.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
> ---
> v2:
> * rewrite the commit message to explain the problem and the reasoning.
> v1:
> https://lore.kernel.org/bpf/20221107075537.1445644-1-houtao@huaweicloud.com
> include/linux/bpf.h | 5 +++--
> kernel/bpf/syscall.c | 31 ++++++++++++++++++-------------
> 2 files changed, 21 insertions(+), 15 deletions(-)
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 798aec816970..20cfe88ee6df 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -85,7 +85,8 @@ struct bpf_map_ops {
> int (*map_lookup_and_delete_batch)(struct bpf_map *map,
> const union bpf_attr *attr,
> union bpf_attr __user *uattr);
> - int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
> + int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
> + const union bpf_attr *attr,
> union bpf_attr __user *uattr);
> int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
> union bpf_attr __user *uattr);
> @@ -1776,7 +1777,7 @@ void bpf_map_init_from_attr(struct bpf_map *map,
> union bpf_attr *attr);
> int generic_map_lookup_batch(struct bpf_map *map,
> const union bpf_attr *attr,
> union bpf_attr __user *uattr);
> -int generic_map_update_batch(struct bpf_map *map,
> +int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
> const union bpf_attr *attr,
> union bpf_attr __user *uattr);
> int generic_map_delete_batch(struct bpf_map *map,
> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index 85532d301124..cb8a87277bf8 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -175,8 +175,8 @@ static void maybe_wait_bpf_programs(struct bpf_map
> *map)
> synchronize_rcu();
> }
> -static int bpf_map_update_value(struct bpf_map *map, struct fd f, void
> *key,
> - void *value, __u64 flags)
> +static int bpf_map_update_value(struct bpf_map *map, struct file
> *map_file,
> + void *key, void *value, __u64 flags)
> {
> int err;
> @@ -190,7 +190,7 @@ static int bpf_map_update_value(struct bpf_map *map,
> struct fd f, void *key,
> map->map_type == BPF_MAP_TYPE_SOCKMAP) {
> return sock_map_update_elem_sys(map, key, value, flags);
> } else if (IS_FD_PROG_ARRAY(map)) {
> - return bpf_fd_array_map_update_elem(map, f.file, key, value,
> + return bpf_fd_array_map_update_elem(map, map_file, key, value,
> flags);
> }
> @@ -205,12 +205,12 @@ static int bpf_map_update_value(struct bpf_map
> *map, struct fd f, void *key,
> flags);
> } else if (IS_FD_ARRAY(map)) {
> rcu_read_lock();
> - err = bpf_fd_array_map_update_elem(map, f.file, key, value,
> + err = bpf_fd_array_map_update_elem(map, map_file, key, value,
> flags);
> rcu_read_unlock();
> } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
> rcu_read_lock();
> - err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
> + err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
> flags);
> rcu_read_unlock();
> } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
> @@ -1390,7 +1390,7 @@ static int map_update_elem(union bpf_attr *attr,
> bpfptr_t uattr)
> goto free_key;
> }
> - err = bpf_map_update_value(map, f, key, value, attr->flags);
> + err = bpf_map_update_value(map, f.file, key, value, attr->flags);
> kvfree(value);
> free_key:
> @@ -1576,16 +1576,14 @@ int generic_map_delete_batch(struct bpf_map *map,
> return err;
> }
> -int generic_map_update_batch(struct bpf_map *map,
> +int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
> const union bpf_attr *attr,
> union bpf_attr __user *uattr)
> {
> void __user *values = u64_to_user_ptr(attr->batch.values);
> void __user *keys = u64_to_user_ptr(attr->batch.keys);
> u32 value_size, cp, max_count;
> - int ufd = attr->batch.map_fd;
> void *key, *value;
> - struct fd f;
> int err = 0;
> if (attr->batch.elem_flags & ~BPF_F_LOCK)
> @@ -1612,7 +1610,6 @@ int generic_map_update_batch(struct bpf_map *map,
> return -ENOMEM;
> }
> - f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
> for (cp = 0; cp < max_count; cp++) {
> err = -EFAULT;
> if (copy_from_user(key, keys + cp * map->key_size,
> @@ -1620,7 +1617,7 @@ int generic_map_update_batch(struct bpf_map *map,
> copy_from_user(value, values + cp * value_size, value_size))
> break;
> - err = bpf_map_update_value(map, f, key, value,
> + err = bpf_map_update_value(map, map_file, key, value,
> attr->batch.elem_flags);
> if (err)
> @@ -1633,7 +1630,6 @@ int generic_map_update_batch(struct bpf_map *map,
> kvfree(value);
> kvfree(key);
> - fdput(f);
> return err;
> }
> @@ -4435,6 +4431,15 @@ static int bpf_task_fd_query(const union bpf_attr
> *attr,
> err = fn(map, attr, uattr); \
> } while (0)
[..]
> +#define BPF_DO_BATCH_WITH_FILE(fn) \
> + do { \
> + if (!fn) { \
> + err = -ENOTSUPP; \
> + goto err_put; \
> + } \
> + err = fn(map, f.file, attr, uattr); \
> + } while (0)
> +
nit: probably not worth defining this for a single user? but not sure
it matters..
> static int bpf_map_do_batch(const union bpf_attr *attr,
> union bpf_attr __user *uattr,
> int cmd)
> @@ -4470,7 +4475,7 @@ static int bpf_map_do_batch(const union bpf_attr
> *attr,
> else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
> BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
> else if (cmd == BPF_MAP_UPDATE_BATCH)
> - BPF_DO_BATCH(map->ops->map_update_batch);
> + BPF_DO_BATCH_WITH_FILE(map->ops->map_update_batch);
> else
> BPF_DO_BATCH(map->ops->map_delete_batch);
> err_put:
> --
> 2.29.2
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly
2022-11-11 17:43 ` sdf
@ 2022-11-14 17:49 ` Daniel Bokmann
2022-11-15 11:18 ` Hou Tao
0 siblings, 1 reply; 5+ messages in thread
From: Daniel Bokmann @ 2022-11-14 17:49 UTC (permalink / raw)
To: sdf
Cc: Hou Tao, bpf, Yonghong Song, Martin KaFai Lau, Andrii Nakryiko,
Song Liu, Hao Luo, Alexei Starovoitov, Daniel Borkmann, KP Singh,
Jiri Olsa, John Fastabend, Xu Kuohai, houtao1
On Fri, Nov 11, 2022 at 09:43:18AM -0800, sdf@google.com wrote:
> On 11/11, Hou Tao wrote:
> > From: Hou Tao <houtao1@huawei.com>
>
> > Currently bpf_map_do_batch() first invokes fdget(batch.map_fd) to get
> > the target map file, then it invokes generic_map_update_batch() to do
> > batch update. generic_map_update_batch() will get the target map file
> > by using fdget(batch.map_fd) again and pass it to
> > bpf_map_update_value().
>
> > The problem is map file returned by the second fdget() may be NULL or a
> > totally different file compared by map file in bpf_map_do_batch(). The
> > reason is that the first fdget() only guarantees the liveness of struct
> > file instead of file descriptor and the file description may be released
> > by concurrent close() through pick_file().
>
> > It doesn't incur any problem as for now, because maps with batch update
> > support don't use map file in .map_fd_get_ptr() ops. But it is better to
> > fix the access of a potentially invalid map file.
Right, that's mainly for the perf RB map ...
> > using __bpf_map_get() again in generic_map_update_batch() can not fix
> > the problem, because batch.map_fd may be closed and reopened, and the
> > returned map file may be different with map file got in
> > bpf_map_do_batch(), so just passing the map file directly to
> > .map_update_batch() in bpf_map_do_batch().
>
> > Signed-off-by: Hou Tao <houtao1@huawei.com>
>
> Acked-by: Stanislav Fomichev <sdf@google.com>
> [..]
>
> > +#define BPF_DO_BATCH_WITH_FILE(fn) \
> > + do { \
> > + if (!fn) { \
> > + err = -ENOTSUPP; \
> > + goto err_put; \
> > + } \
> > + err = fn(map, f.file, attr, uattr); \
> > + } while (0)
> > +
>
> nit: probably not worth defining this for a single user? but not sure
> it matters..
Yeah, just the BPF_DO_BATCH could be used but extended via __VA_ARGS__.
Thanks,
Daniel
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly
2022-11-14 17:49 ` Daniel Bokmann
@ 2022-11-15 11:18 ` Hou Tao
0 siblings, 0 replies; 5+ messages in thread
From: Hou Tao @ 2022-11-15 11:18 UTC (permalink / raw)
To: Daniel Bokmann, sdf
Cc: bpf, Yonghong Song, Martin KaFai Lau, Andrii Nakryiko, Song Liu,
Hao Luo, Alexei Starovoitov, Daniel Borkmann, KP Singh, Jiri Olsa,
John Fastabend, Xu Kuohai, houtao1
Hi,
On 11/15/2022 1:49 AM, Daniel Bokmann wrote:
> On Fri, Nov 11, 2022 at 09:43:18AM -0800, sdf@google.com wrote:
>> On 11/11, Hou Tao wrote:
>>> From: Hou Tao <houtao1@huawei.com>
>>> Currently bpf_map_do_batch() first invokes fdget(batch.map_fd) to get
>>> the target map file, then it invokes generic_map_update_batch() to do
>>> batch update. generic_map_update_batch() will get the target map file
>>> by using fdget(batch.map_fd) again and pass it to
>>> bpf_map_update_value().
>>> The problem is map file returned by the second fdget() may be NULL or a
>>> totally different file compared by map file in bpf_map_do_batch(). The
>>> reason is that the first fdget() only guarantees the liveness of struct
>>> file instead of file descriptor and the file description may be released
>>> by concurrent close() through pick_file().
>>> It doesn't incur any problem as for now, because maps with batch update
>>> support don't use map file in .map_fd_get_ptr() ops. But it is better to
>>> fix the access of a potentially invalid map file.
> Right, that's mainly for the perf RB map ...
Yes. BPF_MAP_TYPE_PERF_EVENT_ARRAY will use the passed map file, but it doesn't
support batch update.
>
>>> using __bpf_map_get() again in generic_map_update_batch() can not fix
>>> the problem, because batch.map_fd may be closed and reopened, and the
>>> returned map file may be different with map file got in
>>> bpf_map_do_batch(), so just passing the map file directly to
>>> .map_update_batch() in bpf_map_do_batch().
>>> Signed-off-by: Hou Tao <houtao1@huawei.com>
>> Acked-by: Stanislav Fomichev <sdf@google.com>
>> [..]
>>
>>> +#define BPF_DO_BATCH_WITH_FILE(fn) \
>>> + do { \
>>> + if (!fn) { \
>>> + err = -ENOTSUPP; \
>>> + goto err_put; \
>>> + } \
>>> + err = fn(map, f.file, attr, uattr); \
>>> + } while (0)
>>> +
>> nit: probably not worth defining this for a single user? but not sure
>> it matters..
> Yeah, just the BPF_DO_BATCH could be used but extended via __VA_ARGS__.
Good idea. Will do in v3.
>
> Thanks,
> Daniel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly
2022-11-11 8:07 [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly Hou Tao
2022-11-11 17:43 ` sdf
@ 2022-11-11 23:02 ` Yonghong Song
1 sibling, 0 replies; 5+ messages in thread
From: Yonghong Song @ 2022-11-11 23:02 UTC (permalink / raw)
To: Hou Tao, bpf, Yonghong Song
Cc: Martin KaFai Lau, Andrii Nakryiko, Song Liu, Hao Luo,
Alexei Starovoitov, Daniel Borkmann, KP Singh, Stanislav Fomichev,
Jiri Olsa, John Fastabend, Xu Kuohai, houtao1
On 11/11/22 12:07 AM, Hou Tao wrote:
> From: Hou Tao <houtao1@huawei.com>
>
> Currently bpf_map_do_batch() first invokes fdget(batch.map_fd) to get
> the target map file, then it invokes generic_map_update_batch() to do
> batch update. generic_map_update_batch() will get the target map file
> by using fdget(batch.map_fd) again and pass it to
> bpf_map_update_value().
>
> The problem is map file returned by the second fdget() may be NULL or a
> totally different file compared by map file in bpf_map_do_batch(). The
> reason is that the first fdget() only guarantees the liveness of struct
> file instead of file descriptor and the file description may be released
> by concurrent close() through pick_file().
>
> It doesn't incur any problem as for now, because maps with batch update
> support don't use map file in .map_fd_get_ptr() ops. But it is better to
> fix the access of a potentially invalid map file.
>
> using __bpf_map_get() again in generic_map_update_batch() can not fix
> the problem, because batch.map_fd may be closed and reopened, and the
> returned map file may be different with map file got in
> bpf_map_do_batch(), so just passing the map file directly to
> .map_update_batch() in bpf_map_do_batch().
>
> Signed-off-by: Hou Tao <houtao1@huawei.com>
Acked-by: Yonghong Song <yhs@fb.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2022-11-15 11:20 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-11-11 8:07 [PATCH bpf-next v2] bpf: Pass map file to .map_update_batch directly Hou Tao
2022-11-11 17:43 ` sdf
2022-11-14 17:49 ` Daniel Bokmann
2022-11-15 11:18 ` Hou Tao
2022-11-11 23:02 ` Yonghong Song
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox