* [bpf-next v2 1/2] bpf: Add bpf_copy_from_user_str kfunc
@ 2024-08-11 23:54 Jordan Rome
2024-08-11 23:54 ` [bpf-next v2 2/2] bpf: Add tests for " Jordan Rome
2024-08-12 15:55 ` [bpf-next v2 1/2] bpf: Add " Kui-Feng Lee
0 siblings, 2 replies; 4+ messages in thread
From: Jordan Rome @ 2024-08-11 23:54 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Kernel Team, sinquersw
This adds a kfunc wrapper around strncpy_from_user,
which can be called from sleepable BPF programs.
This matches the non-sleepable 'bpf_probe_read_user_str'
helper.
Signed-off-by: Jordan Rome <linux@jordanrome.com>
---
kernel/bpf/helpers.c | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index d02ae323996b..5eeb7c2ca622 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2939,6 +2939,37 @@ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
bpf_mem_free(&bpf_global_ma, kit->bits);
}
+/**
+ * bpf_copy_from_user_str() - Copy a string from an unsafe user address
+ * @dst: Destination address, in kernel space. This buffer must be at
+ * least @dst__szk bytes long.
+ * @dst__szk: Maximum number of bytes to copy, including the trailing NUL.
+ * @unsafe_ptr__ign: Source address, in user space.
+ *
+ * Copies a NUL-terminated string from userspace to BPF space. If user string is
+ * too long this will still ensure zero termination in the dst buffer unless
+ * buffer size is 0.
+ */
+__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__szk, const void __user *unsafe_ptr__ign)
+{
+ int ret;
+
+ if (unlikely(!dst__szk))
+ return 0;
+
+ ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__szk);
+ if (unlikely(ret < 0)) {
+ memset(dst, 0, dst__szk);
+ } else if (ret >= dst__szk) {
+ ret = dst__szk;
+ ((char *)dst)[ret - 1] = '\0';
+ } else if (ret > 0) {
+ ret++;
+ }
+
+ return ret;
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@@ -3024,6 +3055,7 @@ BTF_ID_FLAGS(func, bpf_preempt_enable)
BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
--
2.44.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [bpf-next v2 2/2] bpf: Add tests for bpf_copy_from_user_str kfunc
2024-08-11 23:54 [bpf-next v2 1/2] bpf: Add bpf_copy_from_user_str kfunc Jordan Rome
@ 2024-08-11 23:54 ` Jordan Rome
2024-08-12 15:55 ` [bpf-next v2 1/2] bpf: Add " Kui-Feng Lee
1 sibling, 0 replies; 4+ messages in thread
From: Jordan Rome @ 2024-08-11 23:54 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Kernel Team, sinquersw
This adds tests for both the happy path and
the error path.
Signed-off-by: Jordan Rome <linux@jordanrome.com>
---
.../selftests/bpf/prog_tests/attach_probe.c | 8 ++++---
.../selftests/bpf/prog_tests/read_vsyscall.c | 1 +
.../selftests/bpf/progs/read_vsyscall.c | 9 ++++++-
.../selftests/bpf/progs/test_attach_probe.c | 24 ++++++++++++++++---
4 files changed, 35 insertions(+), 7 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 7175af39134f..329c7862b52d 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -283,9 +283,11 @@ static void test_uprobe_sleepable(struct test_attach_probe *skel)
trigger_func3();
ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
- ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
- ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
- ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
}
void test_attach_probe(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
index 3405923fe4e6..c7b9ba8b1d06 100644
--- a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
+++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
@@ -23,6 +23,7 @@ struct read_ret_desc {
{ .name = "probe_read_user_str", .ret = -EFAULT },
{ .name = "copy_from_user", .ret = -EFAULT },
{ .name = "copy_from_user_task", .ret = -EFAULT },
+ { .name = "copy_from_user_str", .ret = -EFAULT },
};
void test_read_vsyscall(void)
diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c
index 986f96687ae1..27de1e907754 100644
--- a/tools/testing/selftests/bpf/progs/read_vsyscall.c
+++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
@@ -7,10 +8,15 @@
int target_pid = 0;
void *user_ptr = 0;
-int read_ret[8];
+int read_ret[9];
char _license[] SEC("license") = "GPL";
+/*
+ * This is the only kfunc, the others are helpers
+ */
+int bpf_copy_from_user_str(void *dst, u32, const void *) __weak __ksym;
+
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int do_probe_read(void *ctx)
{
@@ -40,6 +46,7 @@ int do_copy_from_user(void *ctx)
read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr);
read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr,
bpf_get_current_task_btf(), 0);
+ read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 68466a6ad18c..8b63d9507625 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -14,11 +14,15 @@ int uretprobe_byname_res = 0;
int uprobe_byname2_res = 0;
int uretprobe_byname2_res = 0;
int uprobe_byname3_sleepable_res = 0;
+int uprobe_byname3_str_sleepable_res = 0;
int uprobe_byname3_res = 0;
int uretprobe_byname3_sleepable_res = 0;
+int uretprobe_byname3_str_sleepable_res = 0;
int uretprobe_byname3_res = 0;
void *user_ptr = 0;
+int bpf_copy_from_user_str(void *dst, u32, const void *) __weak __ksym;
+
SEC("ksyscall/nanosleep")
int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
{
@@ -87,11 +91,23 @@ static __always_inline bool verify_sleepable_user_copy(void)
return bpf_strncmp(data, sizeof(data), "test_data") == 0;
}
+static __always_inline bool verify_sleepable_user_copy_str(void)
+{
+ int ret;
+ char data[4];
+
+ ret = bpf_copy_from_user_str(data, sizeof(data), user_ptr);
+
+ return bpf_strncmp(data, 3, "tes") == 0 && ret == 4 && data[3] == '\0';
+}
+
SEC("uprobe.s//proc/self/exe:trigger_func3")
int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
{
if (verify_sleepable_user_copy())
uprobe_byname3_sleepable_res = 9;
+ if (verify_sleepable_user_copy_str())
+ uprobe_byname3_str_sleepable_res = 10;
return 0;
}
@@ -102,7 +118,7 @@ int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
SEC("uprobe//proc/self/exe:trigger_func3")
int handle_uprobe_byname3(struct pt_regs *ctx)
{
- uprobe_byname3_res = 10;
+ uprobe_byname3_res = 11;
return 0;
}
@@ -110,14 +126,16 @@ SEC("uretprobe.s//proc/self/exe:trigger_func3")
int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
{
if (verify_sleepable_user_copy())
- uretprobe_byname3_sleepable_res = 11;
+ uretprobe_byname3_sleepable_res = 12;
+ if (verify_sleepable_user_copy_str())
+ uretprobe_byname3_str_sleepable_res = 13;
return 0;
}
SEC("uretprobe//proc/self/exe:trigger_func3")
int handle_uretprobe_byname3(struct pt_regs *ctx)
{
- uretprobe_byname3_res = 12;
+ uretprobe_byname3_res = 14;
return 0;
}
--
2.44.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [bpf-next v2 1/2] bpf: Add bpf_copy_from_user_str kfunc
2024-08-11 23:54 [bpf-next v2 1/2] bpf: Add bpf_copy_from_user_str kfunc Jordan Rome
2024-08-11 23:54 ` [bpf-next v2 2/2] bpf: Add tests for " Jordan Rome
@ 2024-08-12 15:55 ` Kui-Feng Lee
2024-08-12 21:03 ` Jordan Rome
1 sibling, 1 reply; 4+ messages in thread
From: Kui-Feng Lee @ 2024-08-12 15:55 UTC (permalink / raw)
To: Jordan Rome, bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Kernel Team
On 8/11/24 16:54, Jordan Rome wrote:
> This adds a kfunc wrapper around strncpy_from_user,
> which can be called from sleepable BPF programs.
>
> This matches the non-sleepable 'bpf_probe_read_user_str'
> helper.
>
> Signed-off-by: Jordan Rome <linux@jordanrome.com>
> ---
> kernel/bpf/helpers.c | 32 ++++++++++++++++++++++++++++++++
> 1 file changed, 32 insertions(+)
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index d02ae323996b..5eeb7c2ca622 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2939,6 +2939,37 @@ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
> bpf_mem_free(&bpf_global_ma, kit->bits);
> }
>
> +/**
> + * bpf_copy_from_user_str() - Copy a string from an unsafe user address
> + * @dst: Destination address, in kernel space. This buffer must be at
> + * least @dst__szk bytes long.
> + * @dst__szk: Maximum number of bytes to copy, including the trailing NUL.
> + * @unsafe_ptr__ign: Source address, in user space.
> + *
> + * Copies a NUL-terminated string from userspace to BPF space. If user string is
> + * too long this will still ensure zero termination in the dst buffer unless
> + * buffer size is 0.
> + */
> +__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__szk, const void __user *unsafe_ptr__ign)
> +{
> + int ret;
> +
> + if (unlikely(!dst__szk))
> + return 0;
> +
> + ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__szk);
> + if (unlikely(ret < 0)) {
> + memset(dst, 0, dst__szk);
> + } else if (ret >= dst__szk) {
> + ret = dst__szk;
> + ((char *)dst)[ret - 1] = '\0';
> + } else if (ret > 0) {
> + ret++;
I prefer to keep consistent with strncpy_from_user().
Considering ret >= dst__szk, it is not actually copying dst__szk bytes.
The last byte is generated by this function, not copying from
the source buffer.
Copying at most dst__szk - 1 bytes is more concise.
The code could be simpler with this concept.
ret = strncpy_from_user(dst, unsafe_ptr__ign, dst_szk - 1);
((char *)dst)[max(ret, 0)] = 0;
WDYT?
> + }
> +
> + return ret;
> +}
> +
> __bpf_kfunc_end_defs();
>
> BTF_KFUNCS_START(generic_btf_ids)
> @@ -3024,6 +3055,7 @@ BTF_ID_FLAGS(func, bpf_preempt_enable)
> BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
> BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
> BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
> +BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
> BTF_KFUNCS_END(common_btf_ids)
>
> static const struct btf_kfunc_id_set common_kfunc_set = {
> --
> 2.44.1
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [bpf-next v2 1/2] bpf: Add bpf_copy_from_user_str kfunc
2024-08-12 15:55 ` [bpf-next v2 1/2] bpf: Add " Kui-Feng Lee
@ 2024-08-12 21:03 ` Jordan Rome
0 siblings, 0 replies; 4+ messages in thread
From: Jordan Rome @ 2024-08-12 21:03 UTC (permalink / raw)
To: Kui-Feng Lee
Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Kernel Team
On Mon, Aug 12, 2024 at 11:55 AM Kui-Feng Lee <sinquersw@gmail.com> wrote:
>
>
>
> On 8/11/24 16:54, Jordan Rome wrote:
> > This adds a kfunc wrapper around strncpy_from_user,
> > which can be called from sleepable BPF programs.
> >
> > This matches the non-sleepable 'bpf_probe_read_user_str'
> > helper.
> >
> > Signed-off-by: Jordan Rome <linux@jordanrome.com>
> > ---
> > kernel/bpf/helpers.c | 32 ++++++++++++++++++++++++++++++++
> > 1 file changed, 32 insertions(+)
> >
> > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> > index d02ae323996b..5eeb7c2ca622 100644
> > --- a/kernel/bpf/helpers.c
> > +++ b/kernel/bpf/helpers.c
> > @@ -2939,6 +2939,37 @@ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
> > bpf_mem_free(&bpf_global_ma, kit->bits);
> > }
> >
> > +/**
> > + * bpf_copy_from_user_str() - Copy a string from an unsafe user address
> > + * @dst: Destination address, in kernel space. This buffer must be at
> > + * least @dst__szk bytes long.
> > + * @dst__szk: Maximum number of bytes to copy, including the trailing NUL.
> > + * @unsafe_ptr__ign: Source address, in user space.
> > + *
> > + * Copies a NUL-terminated string from userspace to BPF space. If user string is
> > + * too long this will still ensure zero termination in the dst buffer unless
> > + * buffer size is 0.
> > + */
> > +__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__szk, const void __user *unsafe_ptr__ign)
> > +{
> > + int ret;
> > +
> > + if (unlikely(!dst__szk))
> > + return 0;
> > +
> > + ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__szk);
> > + if (unlikely(ret < 0)) {
> > + memset(dst, 0, dst__szk);
> > + } else if (ret >= dst__szk) {
> > + ret = dst__szk;
> > + ((char *)dst)[ret - 1] = '\0';
> > + } else if (ret > 0) {
> > + ret++;
>
> I prefer to keep consistent with strncpy_from_user().
> Considering ret >= dst__szk, it is not actually copying dst__szk bytes.
> The last byte is generated by this function, not copying from
> the source buffer.
>
> Copying at most dst__szk - 1 bytes is more concise.
> The code could be simpler with this concept.
>
> ret = strncpy_from_user(dst, unsafe_ptr__ign, dst_szk - 1);
> ((char *)dst)[max(ret, 0)] = 0;
>
> WDYT?
>
Makes sense. No need to copy extra data if we're just going to overwrite it.
> > + }
> > +
> > + return ret;
> > +}
> > +
> > __bpf_kfunc_end_defs();
> >
> > BTF_KFUNCS_START(generic_btf_ids)
> > @@ -3024,6 +3055,7 @@ BTF_ID_FLAGS(func, bpf_preempt_enable)
> > BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
> > BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
> > BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
> > +BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
> > BTF_KFUNCS_END(common_btf_ids)
> >
> > static const struct btf_kfunc_id_set common_kfunc_set = {
> > --
> > 2.44.1
> >
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-08-12 21:03 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-08-11 23:54 [bpf-next v2 1/2] bpf: Add bpf_copy_from_user_str kfunc Jordan Rome
2024-08-11 23:54 ` [bpf-next v2 2/2] bpf: Add tests for " Jordan Rome
2024-08-12 15:55 ` [bpf-next v2 1/2] bpf: Add " Kui-Feng Lee
2024-08-12 21:03 ` Jordan Rome
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox