From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: alexei.starovoitov@gmail.com, daniel@iogearbox.net, davem@davemloft.net
Cc: netdev@vger.kernel.org, oss-drivers@netronome.com,
tehnerd@fb.com, Jakub Kicinski <jakub.kicinski@netronome.com>
Subject: [PATCH bpf-next v2 04/15] bpf: add helper for copying attrs to struct bpf_map
Date: Thu, 11 Jan 2018 20:29:06 -0800 [thread overview]
Message-ID: <20180112042917.10348-5-jakub.kicinski@netronome.com> (raw)
In-Reply-To: <20180112042917.10348-1-jakub.kicinski@netronome.com>
All map types reimplement the field-by-field copy of union bpf_attr
members into struct bpf_map. Add a helper to perform this operation.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
---
include/linux/bpf.h | 1 +
kernel/bpf/cpumap.c | 8 +-------
kernel/bpf/devmap.c | 8 +-------
kernel/bpf/hashtab.c | 9 +--------
kernel/bpf/lpm_trie.c | 7 +------
kernel/bpf/sockmap.c | 8 +-------
kernel/bpf/stackmap.c | 6 +-----
kernel/bpf/syscall.c | 10 ++++++++++
8 files changed, 17 insertions(+), 40 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 2041ac5db2a3..cfbee9f83fbe 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -378,6 +378,7 @@ void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ce5b669003b2..192151ec9d12 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
if (!cmap)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- cmap->map.map_type = attr->map_type;
- cmap->map.key_size = attr->key_size;
- cmap->map.value_size = attr->value_size;
- cmap->map.max_entries = attr->max_entries;
- cmap->map.map_flags = attr->map_flags;
- cmap->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&cmap->map, attr);
/* Pre-limit array size based on NR_CPUS, not final CPU check */
if (cmap->map.max_entries > NR_CPUS) {
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index ebdef54bf7df..565f9ece9115 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!dtab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- dtab->map.map_type = attr->map_type;
- dtab->map.key_size = attr->key_size;
- dtab->map.value_size = attr->value_size;
- dtab->map.max_entries = attr->max_entries;
- dtab->map.map_flags = attr->map_flags;
- dtab->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 7fd6519444d3..b76828f23b49 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -304,7 +304,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
- int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_htab *htab;
int err, i;
u64 cost;
@@ -313,13 +312,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- htab->map.map_type = attr->map_type;
- htab->map.key_size = attr->key_size;
- htab->map.value_size = attr->value_size;
- htab->map.max_entries = attr->max_entries;
- htab->map.map_flags = attr->map_flags;
- htab->map.numa_node = numa_node;
+ bpf_map_init_from_attr(&htab->map, attr);
if (percpu_lru) {
/* ensure each CPU's lru list has >=1 elements.
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 885e45479680..584e02227671 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
- trie->map.map_type = attr->map_type;
- trie->map.key_size = attr->key_size;
- trie->map.value_size = attr->value_size;
- trie->map.max_entries = attr->max_entries;
- trie->map.map_flags = attr->map_flags;
- trie->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&trie->map, attr);
trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 079968680bc3..0314d1783d77 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -513,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (!stab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- stab->map.map_type = attr->map_type;
- stab->map.key_size = attr->key_size;
- stab->map.value_size = attr->value_size;
- stab->map.max_entries = attr->max_entries;
- stab->map.map_flags = attr->map_flags;
- stab->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&stab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 6c63c2222ea8..b0ecf43f5894 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE)
goto free_smap;
- smap->map.map_type = attr->map_type;
- smap->map.key_size = attr->key_size;
+ bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
- smap->map.max_entries = attr->max_entries;
- smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- smap->map.numa_node = bpf_map_attr_numa_node(attr);
err = bpf_map_precharge_memlock(smap->map.pages);
if (err)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c0ac03a04880..a3f726bb42ea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -143,6 +143,16 @@ void bpf_map_area_free(void *area)
kvfree(area);
}
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
+{
+ map->map_type = attr->map_type;
+ map->key_size = attr->key_size;
+ map->value_size = attr->value_size;
+ map->max_entries = attr->max_entries;
+ map->map_flags = attr->map_flags;
+ map->numa_node = bpf_map_attr_numa_node(attr);
+}
+
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
--
2.15.1
next prev parent reply other threads:[~2018-01-12 4:29 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-12 4:29 [PATCH bpf-next v2 00/15] bpf: support creating maps on networking devices Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 01/15] bpf: add map_alloc_check callback Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 02/15] bpf: hashtab: move attribute validation before allocation Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 03/15] bpf: hashtab: move checks out of alloc function Jakub Kicinski
2018-01-12 4:29 ` Jakub Kicinski [this message]
2018-01-12 4:29 ` [PATCH bpf-next v2 05/15] bpf: rename bpf_dev_offload -> bpf_prog_offload Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 06/15] bpf: offload: factor out netdev checking at allocation time Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 07/15] bpf: offload: add map offload infrastructure Jakub Kicinski
2018-01-14 23:37 ` Daniel Borkmann
2018-01-14 23:52 ` Jakub Kicinski
2018-01-14 23:58 ` Daniel Borkmann
2018-01-12 4:29 ` [PATCH bpf-next v2 08/15] nfp: bpf: add map data structure Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 09/15] nfp: bpf: add basic control channel communication Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 10/15] nfp: bpf: implement helpers for FW map ops Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 11/15] nfp: bpf: parse function call and map capabilities Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 12/15] nfp: bpf: add helpers for updating immediate instructions Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 13/15] nfp: bpf: add verification and codegen for map lookups Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 14/15] nfp: bpf: add support for reading map memory Jakub Kicinski
2018-01-12 4:29 ` [PATCH bpf-next v2 15/15] nfp: bpf: implement bpf map offload Jakub Kicinski
2018-01-15 0:00 ` [PATCH bpf-next v2 00/15] bpf: support creating maps on networking devices Daniel Borkmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180112042917.10348-5-jakub.kicinski@netronome.com \
--to=jakub.kicinski@netronome.com \
--cc=alexei.starovoitov@gmail.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
--cc=oss-drivers@netronome.com \
--cc=tehnerd@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).