From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: netdev@vger.kernel.org, alexei.starovoitov@gmail.com,
daniel@iogearbox.net, davem@davemloft.net
Cc: oss-drivers@netronome.com, tehnerd@fb.com,
Jakub Kicinski <jakub.kicinski@netronome.com>
Subject: [PATCH bpf-next 04/16] bpf: hashtab: move checks out of alloc function
Date: Thu, 4 Jan 2018 22:09:19 -0800 [thread overview]
Message-ID: <20180105060931.30815-5-jakub.kicinski@netronome.com> (raw)
In-Reply-To: <20180105060931.30815-1-jakub.kicinski@netronome.com>
Use the new callback to perform allocation checks for hash maps.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
---
kernel/bpf/hashtab.c | 55 +++++++++++++++++++++++++++++++++++++---------------
1 file changed, 39 insertions(+), 16 deletions(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b80f42adf068..7fd6519444d3 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
}
/* Called from syscall */
-static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+static int htab_map_alloc_check(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr);
- struct bpf_htab *htab;
- int err, i;
- u64 cost;
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev));
@@ -254,33 +251,33 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now.
*/
- return ERR_PTR(-EPERM);
+ return -EPERM;
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (!lru && percpu_lru)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (lru && !prealloc)
- return ERR_PTR(-ENOTSUPP);
+ return -ENOTSUPP;
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* check sanity of attributes.
* value_size == 0 may be allowed in the future to use map as a set
*/
if (attr->max_entries == 0 || attr->key_size == 0 ||
attr->value_size == 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attr->key_size > MAX_BPF_STACK)
/* eBPF programs initialize keys on stack, so they cannot be
* larger than max stack size
*/
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
if (attr->value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
@@ -289,7 +286,28 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
* sure that the elem_size doesn't overflow and it's
* kmalloc-able later in htab_map_update_elem()
*/
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+ bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ /* percpu_lru means each cpu has its own LRU list.
+ * it is different from BPF_MAP_TYPE_PERCPU_HASH where
+ * the map's value itself is percpu. percpu_lru has
+ * nothing to do with the map's value.
+ */
+ bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
+ bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+ int numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_htab *htab;
+ int err, i;
+ u64 cost;
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
@@ -1142,6 +1160,7 @@ static void htab_map_free(struct bpf_map *map)
}
const struct bpf_map_ops htab_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1152,6 +1171,7 @@ const struct bpf_map_ops htab_map_ops = {
};
const struct bpf_map_ops htab_lru_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1235,6 +1255,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
}
const struct bpf_map_ops htab_percpu_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1244,6 +1265,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
};
const struct bpf_map_ops htab_lru_percpu_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1252,11 +1274,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_delete_elem = htab_lru_map_delete_elem,
};
-static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+static int fd_htab_map_alloc_check(union bpf_attr *attr)
{
if (attr->value_size != sizeof(u32))
- return ERR_PTR(-EINVAL);
- return htab_map_alloc(attr);
+ return -EINVAL;
+ return htab_map_alloc_check(attr);
}
static void fd_htab_map_free(struct bpf_map *map)
@@ -1327,7 +1349,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta))
return inner_map_meta;
- map = fd_htab_map_alloc(attr);
+ map = htab_map_alloc(attr);
if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta);
return map;
@@ -1371,6 +1393,7 @@ static void htab_of_map_free(struct bpf_map *map)
}
const struct bpf_map_ops htab_of_maps_map_ops = {
+ .map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc,
.map_free = htab_of_map_free,
.map_get_next_key = htab_map_get_next_key,
--
2.15.1
next prev parent reply other threads:[~2018-01-05 6:10 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-05 6:09 [PATCH bpf-next 00/16] bpf: support creating maps on networking devices Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 01/16] bpf: add map_alloc_check callback Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 02/16] bpf: array: move checks out of alloc function Jakub Kicinski
2018-01-05 17:21 ` Alexei Starovoitov
2018-01-05 17:40 ` Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 03/16] bpf: hashtab: move attribute validation before allocation Jakub Kicinski
2018-01-05 6:09 ` Jakub Kicinski [this message]
2018-01-05 6:09 ` [PATCH bpf-next 05/16] bpf: add helper for copying attrs to struct bpf_map Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 06/16] bpf: rename bpf_dev_offload -> bpf_prog_offload Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 07/16] bpf: offload: factor out netdev checking at allocation time Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 08/16] bpf: offload: add map offload infrastructure Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 09/16] nfp: hand over to BPF offload app at coarser granularity Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 10/16] nfp: bpf: add map data structure Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 11/16] nfp: bpf: add basic control channel communication Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 12/16] nfp: bpf: implement helpers for FW map ops Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 13/16] nfp: bpf: parse function call and map capabilities Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 14/16] nfp: bpf: add verification and codegen for map lookups Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 15/16] nfp: bpf: add support for reading map memory Jakub Kicinski
2018-01-05 6:09 ` [PATCH bpf-next 16/16] nfp: bpf: implement bpf map offload Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180105060931.30815-5-jakub.kicinski@netronome.com \
--to=jakub.kicinski@netronome.com \
--cc=alexei.starovoitov@gmail.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
--cc=oss-drivers@netronome.com \
--cc=tehnerd@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).