netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jiong Wang <jiong.wang@netronome.com>
To: alexei.starovoitov@gmail.com, daniel@iogearbox.net
Cc: ecree@solarflare.com, naveen.n.rao@linux.vnet.ibm.com,
	andriin@fb.com, jakub.kicinski@netronome.com,
	bpf@vger.kernel.org, netdev@vger.kernel.org,
	oss-drivers@netronome.com, Jiong Wang <jiong.wang@netronome.com>
Subject: [RFC bpf-next 2/8] bpf: extend list based insn patching infra to verification layer
Date: Thu,  4 Jul 2019 22:26:45 +0100	[thread overview]
Message-ID: <1562275611-31790-3-git-send-email-jiong.wang@netronome.com> (raw)
In-Reply-To: <1562275611-31790-1-git-send-email-jiong.wang@netronome.com>

Verification layer also needs to handle auxiliar info as well as adjusting
subprog start.

At this layer, insns inside patch buffer could be jump, but they should
have been resolved, meaning they shouldn't jump to insn outside of the
patch buffer. Lineration function for this layer won't touch insns inside
patch buffer.

Adjusting subprog is finished along with adjusting jump target when the
input will cover bpf to bpf call insn, re-register subprog start is cheap.
But adjustment when there is insn deleteion is not considered yet.

Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
---
 kernel/bpf/verifier.c | 150 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 150 insertions(+)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a2e7637..2026d64 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8350,6 +8350,156 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
 	}
 }
 
+/* Linearize bpf list insn to array (verifier layer). */
+static struct bpf_verifier_env *
+verifier_linearize_list_insn(struct bpf_verifier_env *env,
+			     struct bpf_list_insn *list)
+{
+	u32 *idx_map, idx, orig_cnt, fini_cnt = 0;
+	struct bpf_subprog_info *new_subinfo;
+	struct bpf_insn_aux_data *new_data;
+	struct bpf_prog *prog = env->prog;
+	struct bpf_verifier_env *ret_env;
+	struct bpf_insn *insns, *insn;
+	struct bpf_list_insn *elem;
+	int ret;
+
+	/* Calculate final size. */
+	for (elem = list; elem; elem = elem->next)
+		if (!(elem->flag & LIST_INSN_FLAG_REMOVED))
+			fini_cnt++;
+
+	orig_cnt = prog->len;
+	insns = prog->insnsi;
+	/* If prog length remains same, nothing else to do. */
+	if (fini_cnt == orig_cnt) {
+		for (insn = insns, elem = list; elem; elem = elem->next, insn++)
+			*insn = elem->insn;
+		return env;
+	}
+	/* Realloc insn buffer when necessary. */
+	if (fini_cnt > orig_cnt)
+		prog = bpf_prog_realloc(prog, bpf_prog_size(fini_cnt),
+					GFP_USER);
+	if (!prog)
+		return ERR_PTR(-ENOMEM);
+	insns = prog->insnsi;
+	prog->len = fini_cnt;
+	ret_env = env;
+
+	/* idx_map[OLD_IDX] = NEW_IDX */
+	idx_map = kvmalloc(orig_cnt * sizeof(u32), GFP_KERNEL);
+	if (!idx_map)
+		return ERR_PTR(-ENOMEM);
+	memset(idx_map, 0xff, orig_cnt * sizeof(u32));
+
+	/* Use the same alloc method used when allocating env->insn_aux_data. */
+	new_data = vzalloc(array_size(sizeof(*new_data), fini_cnt));
+	if (!new_data) {
+		kvfree(idx_map);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Copy over insn + calculate idx_map. */
+	for (idx = 0, elem = list; elem; elem = elem->next) {
+		int orig_idx = elem->orig_idx - 1;
+
+		if (orig_idx >= 0) {
+			idx_map[orig_idx] = idx;
+
+			if (elem->flag & LIST_INSN_FLAG_REMOVED)
+				continue;
+
+			new_data[idx] = env->insn_aux_data[orig_idx];
+
+			if (elem->flag & LIST_INSN_FLAG_PATCHED)
+				new_data[idx].zext_dst =
+					insn_has_def32(env, &elem->insn);
+		} else {
+			new_data[idx].seen = true;
+			new_data[idx].zext_dst = insn_has_def32(env,
+								&elem->insn);
+		}
+		insns[idx++] = elem->insn;
+	}
+
+	new_subinfo = kvzalloc(sizeof(env->subprog_info), GFP_KERNEL);
+	if (!new_subinfo) {
+		kvfree(idx_map);
+		vfree(new_data);
+		return ERR_PTR(-ENOMEM);
+	}
+	memcpy(new_subinfo, env->subprog_info, sizeof(env->subprog_info));
+	memset(env->subprog_info, 0, sizeof(env->subprog_info));
+	env->subprog_cnt = 0;
+	env->prog = prog;
+	ret = add_subprog(env, 0);
+	if (ret < 0) {
+		ret_env = ERR_PTR(ret);
+		goto free_all_ret;
+	}
+	/* Relocate jumps using idx_map.
+	 *   old_dst = jmp_insn.old_target + old_pc + 1;
+	 *   new_dst = idx_map[old_dst] = jmp_insn.new_target + new_pc + 1;
+	 *   jmp_insn.new_target = new_dst - new_pc - 1;
+	 */
+	for (idx = 0, elem = list; elem; elem = elem->next) {
+		int orig_idx = elem->orig_idx;
+
+		if (elem->flag & LIST_INSN_FLAG_REMOVED)
+			continue;
+		if ((elem->flag & LIST_INSN_FLAG_PATCHED) || !orig_idx) {
+			idx++;
+			continue;
+		}
+
+		ret = bpf_jit_adj_imm_off(&insns[idx], orig_idx - 1, idx,
+					  idx_map);
+		if (ret < 0) {
+			ret_env = ERR_PTR(ret);
+			goto free_all_ret;
+		}
+		/* Recalculate subprog start as we are at bpf2bpf call insn. */
+		if (ret > 0) {
+			ret = add_subprog(env, idx + insns[idx].imm + 1);
+			if (ret < 0) {
+				ret_env = ERR_PTR(ret);
+				goto free_all_ret;
+			}
+		}
+		idx++;
+	}
+	if (ret < 0) {
+		ret_env = ERR_PTR(ret);
+		goto free_all_ret;
+	}
+
+	env->subprog_info[env->subprog_cnt].start = fini_cnt;
+	for (idx = 0; idx <= env->subprog_cnt; idx++)
+		new_subinfo[idx].start = env->subprog_info[idx].start;
+	memcpy(env->subprog_info, new_subinfo, sizeof(env->subprog_info));
+
+	/* Adjust linfo.
+	 * FIXME: no support for insn removal at the moment.
+	 */
+	if (prog->aux->nr_linfo) {
+		struct bpf_line_info *linfo = prog->aux->linfo;
+		u32 nr_linfo = prog->aux->nr_linfo;
+
+		for (idx = 0; idx < nr_linfo; idx++)
+			linfo[idx].insn_off = idx_map[linfo[idx].insn_off];
+	}
+	vfree(env->insn_aux_data);
+	env->insn_aux_data = new_data;
+	goto free_mem_list_ret;
+free_all_ret:
+	vfree(new_data);
+free_mem_list_ret:
+	kvfree(new_subinfo);
+	kvfree(idx_map);
+	return ret_env;
+}
+
 static int opt_remove_dead_code(struct bpf_verifier_env *env)
 {
 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
-- 
2.7.4


  parent reply	other threads:[~2019-07-04 21:27 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-04 21:26 [RFC bpf-next 0/8] bpf: accelerate insn patching speed Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 1/8] bpf: introducing list based insn patching infra to core layer Jiong Wang
2019-07-10 17:49   ` Andrii Nakryiko
2019-07-11 11:53     ` Jiong Wang
2019-07-12 19:48       ` Andrii Nakryiko
2019-07-15  9:58         ` Jiong Wang
2019-07-04 21:26 ` Jiong Wang [this message]
2019-07-10 17:50   ` [RFC bpf-next 2/8] bpf: extend list based insn patching infra to verification layer Andrii Nakryiko
2019-07-11 11:59     ` [oss-drivers] " Jiong Wang
2019-07-11 12:20       ` Jiong Wang
2019-07-12 19:51         ` Andrii Nakryiko
2019-07-15 10:02           ` Jiong Wang
2019-07-15 22:29             ` Andrii Nakryiko
2019-07-16  8:12               ` Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 3/8] bpf: migrate jit blinding to list patching infra Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 4/8] bpf: migrate convert_ctx_accesses " Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 5/8] bpf: migrate fixup_bpf_calls " Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 6/8] bpf: migrate zero extension opt " Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 7/8] bpf: migrate insn remove " Jiong Wang
2019-07-04 21:26 ` [RFC bpf-next 8/8] bpf: delete all those code around old insn patching infrastructure Jiong Wang
2019-07-10 17:39 ` [RFC bpf-next 0/8] bpf: accelerate insn patching speed Andrii Nakryiko
2019-07-11 11:22   ` Jiong Wang
2019-07-12 19:43     ` Andrii Nakryiko
2019-07-15  9:21       ` Jiong Wang
2019-07-15 22:55         ` Andrii Nakryiko
2019-07-15 23:00           ` Andrii Nakryiko
2019-07-16  8:50           ` Jiong Wang
2019-07-16 16:17             ` Alexei Starovoitov
2019-07-16 19:39               ` Jiong Wang
2019-07-16 22:12               ` Jakub Kicinski
2019-07-17  1:17                 ` Alexei Starovoitov
2019-07-16 17:49             ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1562275611-31790-3-git-send-email-jiong.wang@netronome.com \
    --to=jiong.wang@netronome.com \
    --cc=alexei.starovoitov@gmail.com \
    --cc=andriin@fb.com \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=ecree@solarflare.com \
    --cc=jakub.kicinski@netronome.com \
    --cc=naveen.n.rao@linux.vnet.ibm.com \
    --cc=netdev@vger.kernel.org \
    --cc=oss-drivers@netronome.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).