From: Jiong Wang <jiong.wang@netronome.com>
To: alexei.starovoitov@gmail.com, daniel@iogearbox.net
Cc: john.fastabend@gmail.com, netdev@vger.kernel.org,
oss-drivers@netronome.com, Jiong Wang <jiong.wang@netronome.com>
Subject: [RFC bpf-next 08/10] bpf: cfg: remove push_insn and check_cfg
Date: Mon, 7 May 2018 06:22:44 -0400 [thread overview]
Message-ID: <1525688567-19618-9-git-send-email-jiong.wang@netronome.com> (raw)
In-Reply-To: <1525688567-19618-1-git-send-email-jiong.wang@netronome.com>
As we have detected loop and unreachable insns based on domination
information and call graph, there is no need of check_cfg.
This patch removes check_cfg and it's associated push_insn.
state prune heuristic marking as moved to check_subprog.
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
---
kernel/bpf/verifier.c | 228 ++++----------------------------------------------
1 file changed, 16 insertions(+), 212 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 12f5399..54f2fe3 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -735,6 +735,8 @@ enum reg_arg_type {
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
+#define STATE_LIST_MARK ((struct bpf_verifier_state_list *)-1L)
+
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
@@ -815,8 +817,14 @@ static int check_subprogs(struct bpf_verifier_env *env)
target);
if (ret < 0)
return ret;
+
+ env->explored_states[target] = STATE_LIST_MARK;
+ env->explored_states[i] = STATE_LIST_MARK;
}
+ if (i + 1 < insn_cnt)
+ env->explored_states[i + 1] = STATE_LIST_MARK;
+
goto next;
}
@@ -836,6 +844,13 @@ static int check_subprogs(struct bpf_verifier_env *env)
if (ret < 0)
goto free_nodes;
}
+
+ if (BPF_OP(code) != BPF_JA) {
+ env->explored_states[i] = STATE_LIST_MARK;
+ env->explored_states[off] = STATE_LIST_MARK;
+ } else if (i + 1 < insn_cnt) {
+ env->explored_states[i + 1] = STATE_LIST_MARK;
+ }
next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
@@ -3951,217 +3966,6 @@ static int check_return_code(struct bpf_verifier_env *env)
return 0;
}
-/* non-recursive DFS pseudo code
- * 1 procedure DFS-iterative(G,v):
- * 2 label v as discovered
- * 3 let S be a stack
- * 4 S.push(v)
- * 5 while S is not empty
- * 6 t <- S.pop()
- * 7 if t is what we're looking for:
- * 8 return t
- * 9 for all edges e in G.adjacentEdges(t) do
- * 10 if edge e is already labelled
- * 11 continue with the next edge
- * 12 w <- G.adjacentVertex(t,e)
- * 13 if vertex w is not discovered and not explored
- * 14 label e as tree-edge
- * 15 label w as discovered
- * 16 S.push(w)
- * 17 continue at 5
- * 18 else if vertex w is discovered
- * 19 label e as back-edge
- * 20 else
- * 21 // vertex w is explored
- * 22 label e as forward- or cross-edge
- * 23 label t as explored
- * 24 S.pop()
- *
- * convention:
- * 0x10 - discovered
- * 0x11 - discovered and fall-through edge labelled
- * 0x12 - discovered and fall-through and branch edges labelled
- * 0x20 - explored
- */
-
-enum {
- DISCOVERED = 0x10,
- EXPLORED = 0x20,
- FALLTHROUGH = 1,
- BRANCH = 2,
-};
-
-#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
-
-static int *insn_stack; /* stack of insns to process */
-static int cur_stack; /* current stack index */
-static int *insn_state;
-
-/* t, w, e - match pseudo-code above:
- * t - index of current instruction
- * w - next instruction
- * e - edge
- */
-static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
-{
- if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
- return 0;
-
- if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
- return 0;
-
- if (w < 0 || w >= env->prog->len) {
- verbose(env, "jump out of range from insn %d to %d\n", t, w);
- return -EINVAL;
- }
-
- if (e == BRANCH)
- /* mark branch target for state pruning */
- env->explored_states[w] = STATE_LIST_MARK;
-
- if (insn_state[w] == 0) {
- /* tree-edge */
- insn_state[t] = DISCOVERED | e;
- insn_state[w] = DISCOVERED;
- if (cur_stack >= env->prog->len)
- return -E2BIG;
- insn_stack[cur_stack++] = w;
- return 1;
- } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
- verbose(env, "back-edge from insn %d to %d\n", t, w);
- return -EINVAL;
- } else if (insn_state[w] == EXPLORED) {
- /* forward- or cross-edge */
- insn_state[t] = DISCOVERED | e;
- } else {
- verbose(env, "insn state internal bug\n");
- return -EFAULT;
- }
- return 0;
-}
-
-/* non-recursive depth-first-search to detect loops in BPF program
- * loop == back-edge in directed graph
- */
-static int check_cfg(struct bpf_verifier_env *env)
-{
- struct bpf_insn *insns = env->prog->insnsi;
- int insn_cnt = env->prog->len;
- int ret = 0;
- int i, t;
-
- ret = check_subprogs(env);
- if (ret < 0)
- return ret;
-
- insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
- if (!insn_state)
- return -ENOMEM;
-
- insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
- if (!insn_stack) {
- kfree(insn_state);
- return -ENOMEM;
- }
-
- insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
- insn_stack[0] = 0; /* 0 is the first instruction */
- cur_stack = 1;
-
-peek_stack:
- if (cur_stack == 0)
- goto check_state;
- t = insn_stack[cur_stack - 1];
-
- if (BPF_CLASS(insns[t].code) == BPF_JMP) {
- u8 opcode = BPF_OP(insns[t].code);
-
- if (opcode == BPF_EXIT) {
- goto mark_explored;
- } else if (opcode == BPF_CALL) {
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- if (t + 1 < insn_cnt)
- env->explored_states[t + 1] = STATE_LIST_MARK;
- if (insns[t].src_reg == BPF_PSEUDO_CALL) {
- env->explored_states[t] = STATE_LIST_MARK;
- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- }
- } else if (opcode == BPF_JA) {
- if (BPF_SRC(insns[t].code) != BPF_K) {
- ret = -EINVAL;
- goto err_free;
- }
- /* unconditional jump with single edge */
- ret = push_insn(t, t + insns[t].off + 1,
- FALLTHROUGH, env);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- /* tell verifier to check for equivalent states
- * after every call and jump
- */
- if (t + 1 < insn_cnt)
- env->explored_states[t + 1] = STATE_LIST_MARK;
- } else {
- /* conditional jump with two edges */
- env->explored_states[t] = STATE_LIST_MARK;
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
-
- ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- }
- } else {
- /* all other non-branch instructions with single
- * fall-through edge
- */
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- }
-
-mark_explored:
- insn_state[t] = EXPLORED;
- if (cur_stack-- <= 0) {
- verbose(env, "pop stack internal bug\n");
- ret = -EFAULT;
- goto err_free;
- }
- goto peek_stack;
-
-check_state:
- for (i = 0; i < insn_cnt; i++) {
- if (insn_state[i] != EXPLORED) {
- verbose(env, "unreachable insn %d\n", i);
- ret = -EINVAL;
- goto err_free;
- }
- }
- ret = 0; /* cfg looks good */
-
-err_free:
- kfree(insn_state);
- kfree(insn_stack);
- return ret;
-}
-
/* check %cur's range satisfies %old's */
static bool range_within(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
@@ -5710,7 +5514,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
- ret = check_cfg(env);
+ ret = check_subprogs(env);
if (ret < 0)
goto skip_full_check;
--
2.7.4
next prev parent reply other threads:[~2018-05-07 10:23 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-07 10:22 [RFC bpf-next 00/10] initial control flow support for eBPF verifier Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 01/10] bpf: cfg: partition basic blocks for each subprog Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 02/10] bpf: cfg: add edges between basic blocks to form CFG Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 03/10] bpf: cfg: build domination tree using Tarjan algorithm Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 04/10] bpf: cfg: detect loop use domination information Jiong Wang
2018-05-10 18:17 ` John Fastabend
2018-05-11 15:11 ` Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 05/10] bpf: cfg: detect unreachable basic blocks Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 06/10] bpf: cfg: move find_subprog/add_subprog to cfg.c Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 07/10] bpf: cfg: build call graph and detect unreachable/recursive call Jiong Wang
2018-05-07 10:22 ` Jiong Wang [this message]
2018-05-07 10:22 ` [RFC bpf-next 09/10] bpf: cfg: reduce k*alloc/free call by using memory pool for allocating nodes Jiong Wang
2018-05-07 10:22 ` [RFC bpf-next 10/10] bpf: cfg: reduce memory usage by using singly list + compat pointer Jiong Wang
2018-05-07 10:33 ` [RFC bpf-next 00/10] initial control flow support for eBPF verifier Jiong Wang
2018-05-09 0:28 ` David Miller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1525688567-19618-9-git-send-email-jiong.wang@netronome.com \
--to=jiong.wang@netronome.com \
--cc=alexei.starovoitov@gmail.com \
--cc=daniel@iogearbox.net \
--cc=john.fastabend@gmail.com \
--cc=netdev@vger.kernel.org \
--cc=oss-drivers@netronome.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).