From: Stanislav Fomichev <sdf@google.com>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, ast@kernel.org, daniel@iogearbox.net,
Stanislav Fomichev <sdf@google.com>
Subject: [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
Date: Tue, 22 Jan 2019 13:23:14 -0800 [thread overview]
Message-ID: <20190122212315.137291-3-sdf@google.com> (raw)
In-Reply-To: <20190122212315.137291-1-sdf@google.com>
The input is packet data, the output is struct bpf_flow_key. This should
make it easy to test flow dissector programs without elaborate
setup.
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
include/linux/bpf.h | 3 ++
net/bpf/test_run.c | 75 ++++++++++++++++++++++++++++++++++++++++++++-
net/core/filter.c | 1 +
3 files changed, 78 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e734f163bd0b..701ef954a258 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -397,6 +397,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
/* an array of programs to be executed under rcu_lock.
*
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..ecad72885f23 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -16,12 +16,26 @@
static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
{
+ struct bpf_skb_data_end *cb;
+ struct sk_buff *skb;
u32 ret;
preempt_disable();
rcu_read_lock();
bpf_cgroup_storage_set(storage);
- ret = BPF_PROG_RUN(prog, ctx);
+
+ switch (prog->type) {
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ skb = (struct sk_buff *)ctx;
+ cb = (struct bpf_skb_data_end *)skb->cb;
+ ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
+ cb->qdisc_cb.flow_keys);
+ break;
+ default:
+ ret = BPF_PROG_RUN(prog, ctx);
+ break;
+ }
+
rcu_read_unlock();
preempt_enable();
@@ -240,3 +254,62 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
kfree(data);
return ret;
}
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ u32 size = kattr->test.data_size_in;
+ u32 repeat = kattr->test.repeat;
+ struct bpf_flow_keys flow_keys;
+ struct bpf_skb_data_end *cb;
+ u32 retval, duration;
+ struct sk_buff *skb;
+ struct sock *sk;
+ void *data;
+ int ret;
+
+ if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+ return -EINVAL;
+
+ data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ sk = kzalloc(sizeof(*sk), GFP_USER);
+ if (!sk) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ sock_net_set(sk, current->nsproxy->net_ns);
+ sock_init_data(NULL, sk);
+
+ skb = build_skb(data, 0);
+ if (!skb) {
+ kfree(data);
+ kfree(sk);
+ return -ENOMEM;
+ }
+ skb->sk = sk;
+
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ __skb_put(skb, size);
+ skb->protocol = eth_type_trans(skb,
+ current->nsproxy->net_ns->loopback_dev);
+ skb_reset_network_header(skb);
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+ cb->qdisc_cb.flow_keys = &flow_keys;
+ ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
+ if (ret)
+ goto out;
+
+ ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+ retval, duration);
+
+out:
+ kfree_skb(skb);
+ kfree(sk);
+ return ret;
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b3b436ef545..ff4641dae2be 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7690,6 +7690,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
};
const struct bpf_prog_ops flow_dissector_prog_ops = {
+ .test_run = bpf_prog_test_run_flow_dissector,
};
int sk_detach_filter(struct sock *sk)
--
2.20.1.321.g9e740568ce-goog
next prev parent reply other threads:[~2019-01-22 21:23 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-22 21:23 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
2019-01-22 21:23 ` [PATCH bpf-next 1/3] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect Stanislav Fomichev
2019-01-22 21:23 ` Stanislav Fomichev [this message]
2019-01-24 3:57 ` [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector Alexei Starovoitov
2019-01-24 16:34 ` Stanislav Fomichev
2019-01-22 21:23 ` [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples " Stanislav Fomichev
-- strict thread matches above, loose matches on Subject: below --
2018-12-03 18:59 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
2018-12-03 18:59 ` [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector Stanislav Fomichev
2018-12-03 22:28 ` Song Liu
2018-12-03 23:08 ` Stanislav Fomichev
2018-12-04 3:54 ` Stanislav Fomichev
2018-12-04 23:25 ` Song Liu
2018-12-04 23:36 ` Stanislav Fomichev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190122212315.137291-3-sdf@google.com \
--to=sdf@google.com \
--cc=ast@kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).