From: Charalampos Stylianopoulos <charalampos.stylianopoulos@gmail.com>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Nick Zavaritsky <mejedi@gmail.com>,
Charalampos Stylianopoulos <charalampos.stylianopoulos@gmail.com>
Subject: [PATCH bpf-next 1/4] bpf: Add map_num_entries map op
Date: Mon, 6 Jan 2025 15:53:25 +0100 [thread overview]
Message-ID: <20250106145328.399610-2-charalampos.stylianopoulos@gmail.com> (raw)
In-Reply-To: <20250106145328.399610-1-charalampos.stylianopoulos@gmail.com>
This patch extends map operations with map_num_entries that returns the number of
entries currently present in the map. Provides implementation of the ops
for maps that track the number of elements added in them.
Co-developed-by: Nick Zavaritsky <mejedi@gmail.com>
Signed-off-by: Nick Zavaritsky <mejedi@gmail.com>
Signed-off-by: Charalampos Stylianopoulos <charalampos.stylianopoulos@gmail.com>
---
include/linux/bpf.h | 3 +++
include/linux/bpf_local_storage.h | 1 +
kernel/bpf/devmap.c | 14 ++++++++++++++
kernel/bpf/hashtab.c | 10 ++++++++++
kernel/bpf/lpm_trie.c | 8 ++++++++
kernel/bpf/queue_stack_maps.c | 11 ++++++++++-
6 files changed, 46 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index feda0ce90f5a..217260a8f5f4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -175,6 +175,7 @@ struct bpf_map_ops {
void *callback_ctx, u64 flags);
u64 (*map_mem_usage)(const struct bpf_map *map);
+ s64 (*map_num_entries)(const struct bpf_map *map);
/* BTF id of struct allocated by map_alloc */
int *map_btf_id;
@@ -2402,6 +2403,8 @@ static inline void bpf_map_dec_elem_count(struct bpf_map *map)
this_cpu_dec(*map->elem_count);
}
+s64 bpf_map_sum_elem_count(const struct bpf_map *map);
+
extern int sysctl_unprivileged_bpf_disabled;
bool bpf_token_capable(const struct bpf_token *token, int cap);
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index ab7244d8108f..3a9e69e44c1d 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -204,5 +204,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
+s64 bpf_local_storage_map_num_entries(const struct bpf_map *map);
#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 3aa002a47a96..f43a58389f8f 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -1041,6 +1041,18 @@ static u64 dev_map_mem_usage(const struct bpf_map *map)
return usage;
}
+static s64 dev_map_num_entries(const struct bpf_map *map)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ s64 entries = 0;
+
+ if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
+ entries = atomic_read((atomic_t *)&dtab->items);
+ else
+ entries = -EOPNOTSUPP;
+ return entries;
+}
+
BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
const struct bpf_map_ops dev_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -1053,6 +1065,7 @@ const struct bpf_map_ops dev_map_ops = {
.map_delete_elem = dev_map_delete_elem,
.map_check_btf = map_check_no_btf,
.map_mem_usage = dev_map_mem_usage,
+ .map_num_entries = dev_map_num_entries,
.map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_map_redirect,
};
@@ -1068,6 +1081,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
.map_delete_elem = dev_map_hash_delete_elem,
.map_check_btf = map_check_no_btf,
.map_mem_usage = dev_map_mem_usage,
+ .map_num_entries = dev_map_num_entries,
.map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_hash_map_redirect,
};
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ec941a0ea41..769a4c33c81f 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2287,6 +2287,11 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
return usage;
}
+static s64 htab_map_num_entries(const struct bpf_map *map)
+{
+ return bpf_map_sum_elem_count(map);
+}
+
BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
const struct bpf_map_ops htab_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -2304,6 +2309,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
+ .map_num_entries = htab_map_num_entries,
BATCH_OPS(htab),
.map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
@@ -2326,6 +2332,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
+ .map_num_entries = htab_map_num_entries,
BATCH_OPS(htab_lru),
.map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
@@ -2499,6 +2506,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
+ .map_num_entries = htab_map_num_entries,
BATCH_OPS(htab_percpu),
.map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
@@ -2519,6 +2527,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
+ .map_num_entries = htab_map_num_entries,
BATCH_OPS(htab_lru_percpu),
.map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
@@ -2663,6 +2672,7 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_gen_lookup = htab_of_map_gen_lookup,
.map_check_btf = map_check_no_btf,
.map_mem_usage = htab_map_mem_usage,
+ .map_num_entries = htab_map_num_entries,
BATCH_OPS(htab),
.map_btf_id = &htab_map_btf_ids[0],
};
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index f8bc1e096182..5297eb2e8e97 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -780,6 +780,13 @@ static u64 trie_mem_usage(const struct bpf_map *map)
return elem_size * READ_ONCE(trie->n_entries);
}
+static s64 trie_num_entries(const struct bpf_map *map)
+{
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+
+ return READ_ONCE(trie->n_entries);
+}
+
BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
const struct bpf_map_ops trie_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -794,5 +801,6 @@ const struct bpf_map_ops trie_map_ops = {
.map_delete_batch = generic_map_delete_batch,
.map_check_btf = trie_check_btf,
.map_mem_usage = trie_mem_usage,
+ .map_num_entries = trie_num_entries,
.map_btf_id = &trie_map_btf_ids[0],
};
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..f66aa31248e7 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -22,7 +22,7 @@ struct bpf_queue_stack {
char elements[] __aligned(8);
};
-static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
+static struct bpf_queue_stack *bpf_queue_stack(const struct bpf_map *map)
{
return container_of(map, struct bpf_queue_stack, map);
}
@@ -265,6 +265,13 @@ static u64 queue_stack_map_mem_usage(const struct bpf_map *map)
return usage;
}
+static s64 queue_stack_map_num_entries(const struct bpf_map *map)
+{
+ struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ s64 entries = qs->head - qs->tail;
+ return entries;
+}
+
BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
const struct bpf_map_ops queue_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -279,6 +286,7 @@ const struct bpf_map_ops queue_map_ops = {
.map_peek_elem = queue_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
.map_mem_usage = queue_stack_map_mem_usage,
+ .map_num_entries = queue_stack_map_num_entries,
.map_btf_id = &queue_map_btf_ids[0],
};
@@ -295,5 +303,6 @@ const struct bpf_map_ops stack_map_ops = {
.map_peek_elem = stack_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
.map_mem_usage = queue_stack_map_mem_usage,
+ .map_num_entries = queue_stack_map_num_entries,
.map_btf_id = &queue_map_btf_ids[0],
};
--
2.43.0
next prev parent reply other threads:[~2025-01-06 14:53 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-06 14:53 [PATCH bpf-next 0/4] expose number of map entries to userspace Charalampos Stylianopoulos
2025-01-06 14:53 ` Charalampos Stylianopoulos [this message]
2025-01-06 14:53 ` [PATCH bpf-next 2/4] bpf: Add bpf command to get number of map entries Charalampos Stylianopoulos
2025-01-07 17:52 ` kernel test robot
2025-01-06 14:53 ` [PATCH bpf-next 3/4] libbpf: Add support for MAP_GET_NUM_ENTRIES command Charalampos Stylianopoulos
2025-01-06 14:53 ` [PATCH bpf-next 4/4] selftests/bpf: Add tests for bpf_map_get_num_entries Charalampos Stylianopoulos
2025-01-06 16:19 ` [PATCH bpf-next 0/4] expose number of map entries to userspace Daniel Borkmann
2025-01-07 7:43 ` Anton Protopopov
2025-01-07 7:48 ` Anton Protopopov
[not found] ` <CAAvdH+yNG=GefEd5CcP_52gPzzZexWMMxFAxnM3isX04iErMfQ@mail.gmail.com>
2025-01-07 11:10 ` Charalampos Stylianopoulos
2025-01-09 17:37 ` Anton Protopopov
2025-01-14 11:38 ` Nick Zavaritsky
2025-01-16 14:59 ` Anton Protopopov
2025-01-16 17:52 ` Nick Zavaritsky
2025-01-17 10:35 ` Anton Protopopov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250106145328.399610-2-charalampos.stylianopoulos@gmail.com \
--to=charalampos.stylianopoulos@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=mejedi@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox