From: Amery Hung <ameryhung@gmail.com>
To: bpf@vger.kernel.org
Cc: netdev@vger.kernel.org, alexei.starovoitov@gmail.com,
andrii@kernel.org, daniel@iogearbox.net, memxor@gmail.com,
martin.lau@kernel.org, kpsingh@kernel.org,
yonghong.song@linux.dev, song@kernel.org, haoluo@google.com,
ameryhung@gmail.com, kernel-team@meta.com
Subject: [PATCH bpf-next v5 01/16] bpf: Select bpf_local_storage_map_bucket based on bpf_local_storage
Date: Sun, 1 Feb 2026 09:50:34 -0800 [thread overview]
Message-ID: <20260201175050.468601-2-ameryhung@gmail.com> (raw)
In-Reply-To: <20260201175050.468601-1-ameryhung@gmail.com>
A later bpf_local_storage refactor will acquire all locks before
performing any update. To simplified the number of locks needed to take
in bpf_local_storage_map_update(), determine the bucket based on the
local_storage an selem belongs to instead of the selem pointer.
Currently, when a new selem needs to be created to replace the old selem
in bpf_local_storage_map_update(), locks of both buckets need to be
acquired to prevent racing. This can be simplified if the two selem
belongs to the same bucket so that only one bucket needs to be locked.
Therefore, instead of hashing selem, hashing the local_storage pointer
the selem belongs.
Performance wise, this is slightly better as update now requires locking
one bucket. It should not change the level of contention on one bucket
as the pointers to local storages of selems in a map are just as unique
as pointers to selems.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
include/linux/bpf_local_storage.h | 1 +
kernel/bpf/bpf_local_storage.c | 17 +++++++++++------
net/core/bpf_sk_storage.c | 2 +-
3 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 66432248cd81..2638487425b8 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -179,6 +179,7 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem);
struct bpf_local_storage_elem *
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index e2fe6c32822b..91b28f4e3130 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -19,9 +19,9 @@
static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *selem)
+ struct bpf_local_storage *local_storage)
{
- return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
+ return &smap->buckets[hash_ptr(local_storage, smap->bucket_log)];
}
static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
@@ -349,6 +349,7 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
{
+ struct bpf_local_storage *local_storage;
struct bpf_local_storage_map *smap;
struct bpf_local_storage_map_bucket *b;
unsigned long flags;
@@ -357,8 +358,10 @@ static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
/* selem has already be unlinked from smap */
return;
+ local_storage = rcu_dereference_check(selem->local_storage,
+ bpf_rcu_lock_held());
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
- b = select_bucket(smap, selem);
+ b = select_bucket(smap, local_storage);
raw_spin_lock_irqsave(&b->lock, flags);
if (likely(selem_linked_to_map(selem)))
hlist_del_init_rcu(&selem->map_node);
@@ -366,11 +369,13 @@ static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
}
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem)
{
- struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
+ struct bpf_local_storage_map_bucket *b;
unsigned long flags;
+ b = select_bucket(smap, local_storage);
raw_spin_lock_irqsave(&b->lock, flags);
hlist_add_head_rcu(&selem->map_node, &b->list);
raw_spin_unlock_irqrestore(&b->lock, flags);
@@ -448,7 +453,7 @@ int bpf_local_storage_alloc(void *owner,
storage->use_kmalloc_nolock = smap->use_kmalloc_nolock;
bpf_selem_link_storage_nolock(storage, first_selem);
- bpf_selem_link_map(smap, first_selem);
+ bpf_selem_link_map(smap, storage, first_selem);
owner_storage_ptr =
(struct bpf_local_storage **)owner_storage(smap, owner);
@@ -576,7 +581,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
alloc_selem = NULL;
/* First, link the new selem to the map */
- bpf_selem_link_map(smap, selem);
+ bpf_selem_link_map(smap, local_storage, selem);
/* Second, link (and publish) the new selem to local_storage */
bpf_selem_link_storage_nolock(local_storage, selem);
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index de111818f3a0..e36273e4fcbd 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -191,7 +191,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
}
if (new_sk_storage) {
- bpf_selem_link_map(smap, copy_selem);
+ bpf_selem_link_map(smap, new_sk_storage, copy_selem);
bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
} else {
ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
--
2.47.3
next prev parent reply other threads:[~2026-02-01 17:50 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-01 17:50 [PATCH bpf-next v5 00/16] Remove task and cgroup local storage percpu counters Amery Hung
2026-02-01 17:50 ` Amery Hung [this message]
2026-02-01 17:50 ` [PATCH bpf-next v5 02/16] bpf: Convert bpf_selem_unlink_map to failable Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 03/16] bpf: Convert bpf_selem_link_map " Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 04/16] bpf: Convert bpf_selem_unlink " Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 05/16] bpf: Change local_storage->lock and b->lock to rqspinlock Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 06/16] bpf: Remove task local storage percpu counter Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 07/16] bpf: Remove cgroup " Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 08/16] bpf: Remove unused percpu counter from bpf_local_storage_map_free Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 09/16] bpf: Prepare for bpf_selem_unlink_nofail() Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 10/16] bpf: Support lockless unlink when freeing map or local storage Amery Hung
2026-02-01 18:22 ` bot+bpf-ci
2026-02-04 5:39 ` Martin KaFai Lau
2026-02-04 23:14 ` Amery Hung
2026-02-05 1:08 ` Martin KaFai Lau
2026-02-01 17:50 ` [PATCH bpf-next v5 11/16] bpf: Switch to bpf_selem_unlink_nofail in bpf_local_storage_{map_free, destroy} Amery Hung
2026-02-04 1:52 ` Martin KaFai Lau
2026-02-04 23:20 ` Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 12/16] selftests/bpf: Update sk_storage_omem_uncharge test Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 13/16] selftests/bpf: Update task_local_storage/recursion test Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 14/16] selftests/bpf: Update task_local_storage/task_storage_nodeadlock test Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 15/16] selftests/bpf: Remove test_task_storage_map_stress_lookup Amery Hung
2026-02-01 17:50 ` [PATCH bpf-next v5 16/16] selftests/bpf: Choose another percpu variable in bpf for btf_dump test Amery Hung
2026-02-01 23:29 ` [PATCH bpf-next v5 00/16] Remove task and cgroup local storage percpu counters Alexei Starovoitov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260201175050.468601-2-ameryhung@gmail.com \
--to=ameryhung@gmail.com \
--cc=alexei.starovoitov@gmail.com \
--cc=andrii@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=haoluo@google.com \
--cc=kernel-team@meta.com \
--cc=kpsingh@kernel.org \
--cc=martin.lau@kernel.org \
--cc=memxor@gmail.com \
--cc=netdev@vger.kernel.org \
--cc=song@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox