From: Maxim Mikityanskiy <maxtram95@gmail.com>
To: Eduard Zingerman <eddyz87@gmail.com>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Andrii Nakryiko <andrii@kernel.org>,
Shung-Hsi Yu <shung-hsi.yu@suse.com>
Cc: John Fastabend <john.fastabend@gmail.com>,
Martin KaFai Lau <martin.lau@linux.dev>,
Song Liu <song@kernel.org>,
Yonghong Song <yonghong.song@linux.dev>,
KP Singh <kpsingh@kernel.org>,
Stanislav Fomichev <sdf@google.com>, Hao Luo <haoluo@google.com>,
Jiri Olsa <jolsa@kernel.org>, Mykola Lysenko <mykolal@fb.com>,
Shuah Khan <shuah@kernel.org>,
"David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>,
Jesper Dangaard Brouer <hawk@kernel.org>,
bpf@vger.kernel.org, linux-kselftest@vger.kernel.org,
netdev@vger.kernel.org
Subject: [PATCH bpf-next v2 14/15] bpf: Optimize state pruning for spilled scalars
Date: Mon, 8 Jan 2024 22:52:08 +0200 [thread overview]
Message-ID: <20240108205209.838365-15-maxtram95@gmail.com> (raw)
In-Reply-To: <20240108205209.838365-1-maxtram95@gmail.com>
From: Eduard Zingerman <eddyz87@gmail.com>
Changes for scalar ID tracking of spilled unbound scalars lead to
certain verification performance regression. This commit mitigates the
regression by exploiting the following properties maintained by
check_stack_read_fixed_off():
- a mix of STACK_MISC, STACK_ZERO and STACK_INVALID marks is read as
unbounded scalar register;
- spi with all slots marked STACK_ZERO is read as scalar register with
value zero.
This commit modifies stacksafe() to consider situations above
equivalent.
Veristat results after this patch show significant gains:
$ ./veristat -e file,prog,states -f '!states_pct<10' -f '!states_b<10' -C not-opt after
File Program States (A) States (B) States (DIFF)
---------------- -------- ---------- ---------- ----------------
pyperf180.bpf.o on_event 10456 8422 -2034 (-19.45%)
pyperf600.bpf.o on_event 37319 22519 -14800 (-39.66%)
strobemeta.bpf.o on_event 13435 4703 -8732 (-64.99%)
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
kernel/bpf/verifier.c | 83 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 83 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index aeb3e198a5ea..cb82f8d4226f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1170,6 +1170,12 @@ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
*stype = STACK_MISC;
}
+static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
+{
+ return stack->slot_type[0] == STACK_SPILL &&
+ stack->spilled_ptr.type == SCALAR_VALUE;
+}
+
static void scrub_spilled_slot(u8 *stype)
{
if (*stype != STACK_INVALID)
@@ -16459,11 +16465,45 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
}
}
+static bool is_stack_zero64(struct bpf_stack_state *stack)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i)
+ if (stack->slot_type[i] != STACK_ZERO)
+ return false;
+ return true;
+}
+
+static bool is_stack_unbound_slot64(struct bpf_verifier_env *env,
+ struct bpf_stack_state *stack)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i)
+ if (stack->slot_type[i] != STACK_ZERO &&
+ stack->slot_type[i] != STACK_MISC &&
+ (!env->allow_uninit_stack || stack->slot_type[i] != STACK_INVALID))
+ return false;
+ return true;
+}
+
+static bool is_spilled_unbound_scalar_reg64(struct bpf_stack_state *stack)
+{
+ return is_spilled_scalar_reg64(stack) && __is_scalar_unbounded(&stack->spilled_ptr);
+}
+
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
{
+ struct bpf_reg_state unbound_reg = {};
+ struct bpf_reg_state zero_reg = {};
int i, spi;
+ __mark_reg_unknown(env, &unbound_reg);
+ __mark_reg_const_zero(env, &zero_reg);
+ zero_reg.precise = true;
+
/* walk slots of the explored stack and ignore any additional
* slots in the current stack, since explored(safe) state
* didn't use them
@@ -16484,6 +16524,49 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
continue;
}
+ /* load of stack value with all MISC and ZERO slots produces unbounded
+ * scalar value, call regsafe to ensure scalar ids are compared.
+ */
+ if (is_spilled_unbound_scalar_reg64(&old->stack[spi]) &&
+ is_stack_unbound_slot64(env, &cur->stack[spi])) {
+ i += BPF_REG_SIZE - 1;
+ if (!regsafe(env, &old->stack[spi].spilled_ptr, &unbound_reg,
+ idmap, exact))
+ return false;
+ continue;
+ }
+
+ if (is_stack_unbound_slot64(env, &old->stack[spi]) &&
+ is_spilled_unbound_scalar_reg64(&cur->stack[spi])) {
+ i += BPF_REG_SIZE - 1;
+ if (!regsafe(env, &unbound_reg, &cur->stack[spi].spilled_ptr,
+ idmap, exact))
+ return false;
+ continue;
+ }
+
+ /* load of stack value with all ZERO slots produces scalar value 0,
+ * call regsafe to ensure scalar ids are compared and precision
+ * flags are taken into account.
+ */
+ if (is_spilled_scalar_reg64(&old->stack[spi]) &&
+ is_stack_zero64(&cur->stack[spi])) {
+ if (!regsafe(env, &old->stack[spi].spilled_ptr, &zero_reg,
+ idmap, exact))
+ return false;
+ i += BPF_REG_SIZE - 1;
+ continue;
+ }
+
+ if (is_stack_zero64(&old->stack[spi]) &&
+ is_spilled_scalar_reg64(&cur->stack[spi])) {
+ if (!regsafe(env, &zero_reg, &cur->stack[spi].spilled_ptr,
+ idmap, exact))
+ return false;
+ i += BPF_REG_SIZE - 1;
+ continue;
+ }
+
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
--
2.43.0
next prev parent reply other threads:[~2024-01-08 20:53 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-08 20:51 [PATCH bpf-next v2 00/15] Improvements for tracking scalars in the BPF verifier Maxim Mikityanskiy
2024-01-08 20:51 ` [PATCH bpf-next v2 01/15] selftests/bpf: Fix the u64_offset_to_skb_data test Maxim Mikityanskiy
2024-01-08 20:51 ` [PATCH bpf-next v2 02/15] bpf: make infinite loop detection in is_state_visited() exact Maxim Mikityanskiy
2024-01-08 20:51 ` [PATCH bpf-next v2 03/15] selftests/bpf: check if imprecise stack spills confuse infinite loop detection Maxim Mikityanskiy
2024-01-08 20:51 ` [PATCH bpf-next v2 04/15] bpf: Make bpf_for_each_spilled_reg consider narrow spills Maxim Mikityanskiy
2024-01-08 20:51 ` [PATCH bpf-next v2 05/15] selftests/bpf: Add a test case for 32-bit spill tracking Maxim Mikityanskiy
2024-01-08 20:52 ` [PATCH bpf-next v2 06/15] bpf: Add the assign_scalar_id_before_mov function Maxim Mikityanskiy
2024-01-08 20:52 ` [PATCH bpf-next v2 07/15] bpf: Add the get_reg_width function Maxim Mikityanskiy
2024-01-08 20:52 ` [PATCH bpf-next v2 08/15] bpf: Assign ID to scalars on spill Maxim Mikityanskiy
2024-01-08 20:52 ` [PATCH bpf-next v2 09/15] selftests/bpf: Test assigning " Maxim Mikityanskiy
2024-01-09 23:34 ` Andrii Nakryiko
2024-01-08 20:52 ` [PATCH bpf-next v2 10/15] bpf: Track spilled unbounded scalars Maxim Mikityanskiy
2024-01-12 19:10 ` Alexei Starovoitov
2024-01-12 20:44 ` Maxim Mikityanskiy
2024-01-12 20:50 ` Alexei Starovoitov
2024-01-08 20:52 ` [PATCH bpf-next v2 11/15] selftests/bpf: Test tracking " Maxim Mikityanskiy
2024-01-08 20:52 ` [PATCH bpf-next v2 12/15] bpf: Preserve boundaries and track scalars on narrowing fill Maxim Mikityanskiy
2024-01-09 23:51 ` Andrii Nakryiko
2024-01-08 20:52 ` [PATCH bpf-next v2 13/15] selftests/bpf: Add test cases for " Maxim Mikityanskiy
2024-01-09 23:55 ` Andrii Nakryiko
2024-01-08 20:52 ` Maxim Mikityanskiy [this message]
2024-01-10 0:22 ` [PATCH bpf-next v2 14/15] bpf: Optimize state pruning for spilled scalars Andrii Nakryiko
2024-01-10 21:04 ` Eduard Zingerman
2024-01-10 21:52 ` Andrii Nakryiko
2024-01-08 20:52 ` [PATCH bpf-next v2 15/15] selftests/bpf: states pruning checks for scalar vs STACK_{MISC,ZERO} Maxim Mikityanskiy
2024-01-10 0:27 ` Andrii Nakryiko
2024-01-10 20:27 ` Eduard Zingerman
2024-01-12 3:00 ` [PATCH bpf-next v2 00/15] Improvements for tracking scalars in the BPF verifier patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240108205209.838365-15-maxtram95@gmail.com \
--to=maxtram95@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=eddyz87@gmail.com \
--cc=haoluo@google.com \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=martin.lau@linux.dev \
--cc=mykolal@fb.com \
--cc=netdev@vger.kernel.org \
--cc=sdf@google.com \
--cc=shuah@kernel.org \
--cc=shung-hsi.yu@suse.com \
--cc=song@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox