From: sdf@google.com
To: Kees Cook <keescook@chromium.org>
Cc: Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
John Fastabend <john.fastabend@gmail.com>,
Andrii Nakryiko <andrii@kernel.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
Song Liu <song@kernel.org>, Yonghong Song <yhs@fb.com>,
KP Singh <kpsingh@kernel.org>, Hao Luo <haoluo@google.com>,
Jiri Olsa <jolsa@kernel.org>,
bpf@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-hardening@vger.kernel.org
Subject: Re: [PATCH] bpf: Use kmalloc_size_roundup() to match ksize() usage
Date: Tue, 18 Oct 2022 11:07:38 -0700 [thread overview]
Message-ID: <Y07raim32wOBRGPi@google.com> (raw)
In-Reply-To: <20221018090550.never.834-kees@kernel.org>
On 10/18, Kees Cook wrote:
> Round up allocations with kmalloc_size_roundup() so that the verifier's
> use of ksize() is always accurate and no special handling of the memory
> is needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE. Pass the new size
> information back up to callers so they can use the space immediately,
> so array resizing to happen less frequently as well. Explicitly zero
> any trailing bytes in new allocations.
> Additionally fix a memory allocation leak: if krealloc() fails, "arr"
> wasn't freed, but NULL was return to the caller of realloc_array() would
> be writing NULL to the lvalue, losing the reference to the original
> memory.
> Cc: Alexei Starovoitov <ast@kernel.org>
> Cc: Daniel Borkmann <daniel@iogearbox.net>
> Cc: John Fastabend <john.fastabend@gmail.com>
> Cc: Andrii Nakryiko <andrii@kernel.org>
> Cc: Martin KaFai Lau <martin.lau@linux.dev>
> Cc: Song Liu <song@kernel.org>
> Cc: Yonghong Song <yhs@fb.com>
> Cc: KP Singh <kpsingh@kernel.org>
> Cc: Stanislav Fomichev <sdf@google.com>
> Cc: Hao Luo <haoluo@google.com>
> Cc: Jiri Olsa <jolsa@kernel.org>
> Cc: bpf@vger.kernel.org
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> kernel/bpf/verifier.c | 49 +++++++++++++++++++++++++++----------------
> 1 file changed, 31 insertions(+), 18 deletions(-)
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 014ee0953dbd..8a0b60207d0e 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -1000,42 +1000,53 @@ static void print_insn_state(struct
> bpf_verifier_env *env,
> */
> static void *copy_array(void *dst, const void *src, size_t n, size_t
> size, gfp_t flags)
> {
> - size_t bytes;
> + size_t src_bytes, dst_bytes;
> if (ZERO_OR_NULL_PTR(src))
> goto out;
> - if (unlikely(check_mul_overflow(n, size, &bytes)))
> + if (unlikely(check_mul_overflow(n, size, &src_bytes)))
> return NULL;
> - if (ksize(dst) < bytes) {
> + dst_bytes = kmalloc_size_roundup(src_bytes);
> + if (ksize(dst) < dst_bytes) {
Why not simply do the following here?
if (ksize(dst) < ksize(src)) {
?
It seems like we care about src_bytes/bytes only in this case, so maybe
move that check_mul_overflow under this branch as well?
> kfree(dst);
> - dst = kmalloc_track_caller(bytes, flags);
> + dst = kmalloc_track_caller(dst_bytes, flags);
> if (!dst)
> return NULL;
> }
> - memcpy(dst, src, bytes);
> + memcpy(dst, src, src_bytes);
> + memset(dst + src_bytes, 0, dst_bytes - src_bytes);
> out:
> return dst ? dst : ZERO_SIZE_PTR;
> }
> -/* resize an array from old_n items to new_n items. the array is
> reallocated if it's too
> - * small to hold new_n items. new items are zeroed out if the array
> grows.
> +/* Resize an array from old_n items to *new_n items. The array is
> reallocated if it's too
> + * small to hold *new_n items. New items are zeroed out if the array
> grows. Allocation
> + * is rounded up to next kmalloc bucket size to reduce frequency of
> resizing. *new_n
> + * contains the new total number of items that will fit.
> *
> - * Contrary to krealloc_array, does not free arr if new_n is zero.
> + * Contrary to krealloc, does not free arr if new_n is zero.
> */
> -static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t
> size)
> +static void *realloc_array(void *arr, size_t old_n, size_t *new_n,
> size_t size)
> {
> - if (!new_n || old_n == new_n)
> + void *old_arr = arr;
> + size_t alloc_size;
> +
> + if (!new_n || !*new_n || old_n == *new_n)
> goto out;
[..]
> - arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
> - if (!arr)
> + alloc_size = kmalloc_size_roundup(size_mul(*new_n, size));
> + arr = krealloc(old_arr, alloc_size, GFP_KERNEL);
> + if (!arr) {
> + kfree(old_arr);
> return NULL;
> + }
Any reason not do hide this complexity behind krealloc_array? Why can't
it take care of those roundup details?
> - if (new_n > old_n)
> - memset(arr + old_n * size, 0, (new_n - old_n) * size);
> + *new_n = alloc_size / size;
> + if (*new_n > old_n)
> + memset(arr + old_n * size, 0, (*new_n - old_n) * size);
> out:
> return arr ? arr : ZERO_SIZE_PTR;
> @@ -1067,7 +1078,7 @@ static int copy_stack_state(struct bpf_func_state
> *dst, const struct bpf_func_st
> static int resize_reference_state(struct bpf_func_state *state, size_t n)
> {
> - state->refs = realloc_array(state->refs, state->acquired_refs, n,
> + state->refs = realloc_array(state->refs, state->acquired_refs, &n,
> sizeof(struct bpf_reference_state));
> if (!state->refs)
> return -ENOMEM;
> @@ -1083,11 +1094,11 @@ static int grow_stack_state(struct bpf_func_state
> *state, int size)
> if (old_n >= n)
> return 0;
> - state->stack = realloc_array(state->stack, old_n, n, sizeof(struct
> bpf_stack_state));
> + state->stack = realloc_array(state->stack, old_n, &n, sizeof(struct
> bpf_stack_state));
> if (!state->stack)
> return -ENOMEM;
> - state->allocated_stack = size;
> + state->allocated_stack = n * BPF_REG_SIZE;
> return 0;
> }
> @@ -2499,9 +2510,11 @@ static int push_jmp_history(struct
> bpf_verifier_env *env,
> {
> u32 cnt = cur->jmp_history_cnt;
> struct bpf_idx_pair *p;
> + size_t size;
> cnt++;
> - p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
> + size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
> + p = krealloc(cur->jmp_history, size, GFP_USER);
> if (!p)
> return -ENOMEM;
> p[cnt - 1].idx = env->insn_idx;
> --
> 2.34.1
next prev parent reply other threads:[~2022-10-18 18:07 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-18 9:06 [PATCH] bpf: Use kmalloc_size_roundup() to match ksize() usage Kees Cook
2022-10-18 18:07 ` sdf [this message]
2022-10-18 18:19 ` Kees Cook
2022-10-18 20:07 ` Stanislav Fomichev
2022-10-28 23:19 ` Kees Cook
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Y07raim32wOBRGPi@google.com \
--to=sdf@google.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=haoluo@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=keescook@chromium.org \
--cc=kpsingh@kernel.org \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=martin.lau@linux.dev \
--cc=song@kernel.org \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox