From: Leon Hwang <leon.hwang@linux.dev>
To: bpf@vger.kernel.org
Cc: ast@kernel.org, andrii@kernel.org, daniel@iogearbox.net,
jolsa@kernel.org, yonghong.song@linux.dev, song@kernel.org,
eddyz87@gmail.com, dxu@dxuuu.xyz, deso@posteo.net,
leon.hwang@linux.dev, kernel-patches-bot@fb.com
Subject: [PATCH bpf-next v5 1/9] bpf: Generalize data copying for percpu maps
Date: Mon, 8 Sep 2025 22:36:36 +0800 [thread overview]
Message-ID: <20250908143644.30993-2-leon.hwang@linux.dev> (raw)
In-Reply-To: <20250908143644.30993-1-leon.hwang@linux.dev>
Refactor the data copying logic of the following percpu map types:
* percpu_array
* percpu_hash
* lru_percpu_hash
* percpu_cgroup_storage
by introducing two helpers:
* 'bpf_percpu_copy_data()'
* 'bpf_percpu_update_data()'
It is to introduce BPF_F_CPU and BPF_F_ALL_CPUS flags for these percpu
maps with less code churn.
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
include/linux/bpf.h | 28 +++++++++++++++++++++++++++-
kernel/bpf/arraymap.c | 14 ++------------
kernel/bpf/hashtab.c | 27 ++++-----------------------
kernel/bpf/local_storage.c | 18 ++++++------------
4 files changed, 39 insertions(+), 48 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8f6e87f0f3a89..ce523a49dc20c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -547,6 +547,33 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
}
+#ifdef CONFIG_BPF_SYSCALL
+static inline void bpf_percpu_copy_data(struct bpf_map *map, void __percpu *pptr, void *value,
+ u32 size)
+{
+ int cpu, off = 0;
+
+ for_each_possible_cpu(cpu) {
+ copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
+ off += size;
+ }
+}
+
+void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+
+static inline void bpf_percpu_update_data(struct bpf_map *map, void __percpu *pptr, void *value,
+ u32 size)
+{
+ int cpu, off = 0;
+
+ for_each_possible_cpu(cpu) {
+ bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
+ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
+ off += size;
+ }
+}
+#endif
+
static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
{
unsigned long *src_uptr, *dst_uptr;
@@ -2417,7 +2444,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
-void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3d080916faf97..ed9e47dc4137b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -300,7 +300,6 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
void __percpu *pptr;
- int cpu, off = 0;
u32 size;
if (unlikely(index >= array->map.max_entries))
@@ -313,11 +312,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
size = array->elem_size;
rcu_read_lock();
pptr = array->pptrs[index & array->index_mask];
- for_each_possible_cpu(cpu) {
- copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
- check_and_init_map_value(map, value + off);
- off += size;
- }
+ bpf_percpu_copy_data(map, pptr, value, size);
rcu_read_unlock();
return 0;
}
@@ -387,7 +382,6 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
void __percpu *pptr;
- int cpu, off = 0;
u32 size;
if (unlikely(map_flags > BPF_EXIST))
@@ -411,11 +405,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
size = array->elem_size;
rcu_read_lock();
pptr = array->pptrs[index & array->index_mask];
- for_each_possible_cpu(cpu) {
- copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
- bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
- off += size;
- }
+ bpf_percpu_update_data(map, pptr, value, size);
rcu_read_unlock();
return 0;
}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 71f9931ac64cd..0a2c1042d5fdb 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -944,12 +944,8 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
} else {
u32 size = round_up(htab->map.value_size, 8);
- int off = 0, cpu;
- for_each_possible_cpu(cpu) {
- copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
- off += size;
- }
+ bpf_percpu_update_data(&htab->map, pptr, value, size);
}
}
@@ -1610,14 +1606,9 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
if (is_percpu) {
u32 roundup_value_size = round_up(map->value_size, 8);
void __percpu *pptr;
- int off = 0, cpu;
pptr = htab_elem_get_ptr(l, key_size);
- for_each_possible_cpu(cpu) {
- copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
- check_and_init_map_value(&htab->map, value + off);
- off += roundup_value_size;
- }
+ bpf_percpu_copy_data(&htab->map, pptr, value, roundup_value_size);
} else {
void *src = htab_elem_value(l, map->key_size);
@@ -1802,15 +1793,10 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
memcpy(dst_key, l->key, key_size);
if (is_percpu) {
- int off = 0, cpu;
void __percpu *pptr;
pptr = htab_elem_get_ptr(l, map->key_size);
- for_each_possible_cpu(cpu) {
- copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
- check_and_init_map_value(&htab->map, dst_val + off);
- off += size;
- }
+ bpf_percpu_copy_data(&htab->map, pptr, dst_val, size);
} else {
value = htab_elem_value(l, key_size);
if (is_fd_htab(htab)) {
@@ -2370,7 +2356,6 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
struct htab_elem *l;
void __percpu *pptr;
int ret = -ENOENT;
- int cpu, off = 0;
u32 size;
/* per_cpu areas are zero-filled and bpf programs can only
@@ -2386,11 +2371,7 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
* eviction heuristics when user space does a map walk.
*/
pptr = htab_elem_get_ptr(l, map->key_size);
- for_each_possible_cpu(cpu) {
- copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
- check_and_init_map_value(map, value + off);
- off += size;
- }
+ bpf_percpu_copy_data(map, pptr, value, size);
ret = 0;
out:
rcu_read_unlock();
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index c93a756e035c0..a1debbd26a415 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -184,7 +184,7 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
{
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
struct bpf_cgroup_storage *storage;
- int cpu, off = 0;
+ void __percpu *pptr;
u32 size;
rcu_read_lock();
@@ -199,11 +199,8 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
* will not leak any kernel data
*/
size = round_up(_map->value_size, 8);
- for_each_possible_cpu(cpu) {
- bpf_long_memcpy(value + off,
- per_cpu_ptr(storage->percpu_buf, cpu), size);
- off += size;
- }
+ pptr = storage->percpu_buf;
+ bpf_percpu_copy_data(_map, pptr, value, size);
rcu_read_unlock();
return 0;
}
@@ -213,7 +210,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
{
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
struct bpf_cgroup_storage *storage;
- int cpu, off = 0;
+ void __percpu *pptr;
u32 size;
if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
@@ -233,11 +230,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
* so no kernel data leaks possible
*/
size = round_up(_map->value_size, 8);
- for_each_possible_cpu(cpu) {
- bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
- value + off, size);
- off += size;
- }
+ pptr = storage->percpu_buf;
+ bpf_percpu_update_data(_map, pptr, value, size);
rcu_read_unlock();
return 0;
}
--
2.50.1
next prev parent reply other threads:[~2025-09-08 14:37 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-08 14:36 [PATCH bpf-next v5 0/9] bpf: Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags for percpu maps Leon Hwang
2025-09-08 14:36 ` Leon Hwang [this message]
2025-09-08 17:35 ` [PATCH bpf-next v5 1/9] bpf: Generalize data copying " Alexei Starovoitov
2025-09-09 2:20 ` Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 2/9] bpf: Introduce internal bpf_map_check_op_flags helper function Leon Hwang
2025-09-08 17:36 ` Alexei Starovoitov
2025-09-09 2:26 ` Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 3/9] bpf: Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 4/9] bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu maps data copying Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 5/9] bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_array maps Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 6/9] bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_hash and lru_percpu_hash maps Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 7/9] bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_cgroup_storage maps Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 8/9] libbpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu maps Leon Hwang
2025-09-08 14:36 ` [PATCH bpf-next v5 9/9] selftests/bpf: Add cases to test BPF_F_CPU and BPF_F_ALL_CPUS flags Leon Hwang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250908143644.30993-2-leon.hwang@linux.dev \
--to=leon.hwang@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=deso@posteo.net \
--cc=dxu@dxuuu.xyz \
--cc=eddyz87@gmail.com \
--cc=jolsa@kernel.org \
--cc=kernel-patches-bot@fb.com \
--cc=song@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox