From: Stanislav Fomichev <stfomichev@gmail.com>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, skalluru@marvell.com, manishc@marvell.com,
andrew+netdev@lunn.ch, michael.chan@broadcom.com,
pavan.chebbi@broadcom.com, ajit.khaparde@broadcom.com,
sriharsha.basavapatna@broadcom.com, somnath.kotur@broadcom.com,
anthony.l.nguyen@intel.com, przemyslaw.kitszel@intel.com,
tariqt@nvidia.com, saeedm@nvidia.com, louis.peens@corigine.com,
shshaikh@marvell.com, GR-Linux-NIC-Dev@marvell.com,
ecree.xilinx@gmail.com, horms@kernel.org, dsahern@kernel.org,
shuah@kernel.org, tglx@linutronix.de, mingo@kernel.org,
ruanjinjie@huawei.com, idosch@nvidia.com, razor@blackwall.org,
petrm@nvidia.com, kuniyu@google.com, sdf@fomichev.me,
linux-kernel@vger.kernel.org, intel-wired-lan@lists.osuosl.org,
linux-rdma@vger.kernel.org, oss-drivers@corigine.com,
linux-net-drivers@amd.com, linux-kselftest@vger.kernel.org,
leon@kernel.org
Subject: [PATCH net-next v4 2/6] vxlan: drop sock_lock
Date: Fri, 13 Jun 2025 13:33:21 -0700 [thread overview]
Message-ID: <20250613203325.1127217-3-stfomichev@gmail.com> (raw)
In-Reply-To: <20250613203325.1127217-1-stfomichev@gmail.com>
We won't be able to sleep soon in vxlan_offload_rx_ports and won't be
able to grab sock_lock. Instead of having separate spinlock to
manage sockets, rely on rtnl lock. This is similar to how geneve
manages its sockets.
Signed-off-by: Stanislav Fomichev <stfomichev@gmail.com>
---
drivers/net/vxlan/vxlan_core.c | 34 +++++++++++++----------------
drivers/net/vxlan/vxlan_private.h | 2 +-
drivers/net/vxlan/vxlan_vnifilter.c | 18 ++++++---------
3 files changed, 23 insertions(+), 31 deletions(-)
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 97792de896b7..01362e98325c 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1487,19 +1487,19 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
+ ASSERT_RTNL();
+
if (!vs)
return false;
if (!refcount_dec_and_test(&vs->refcnt))
return false;
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
- spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
udp_tunnel_notify_del_rx_port(vs->sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
- spin_unlock(&vn->sock_lock);
return true;
}
@@ -2847,26 +2847,23 @@ static void vxlan_cleanup(struct timer_list *t)
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ ASSERT_RTNL();
- spin_lock(&vn->sock_lock);
hlist_del_init_rcu(&vxlan->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&vxlan->hlist6.hlist);
#endif
- spin_unlock(&vn->sock_lock);
}
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
struct vxlan_dev_node *node)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
__be32 vni = vxlan->default_dst.remote_vni;
+ ASSERT_RTNL();
+
node->vxlan = vxlan;
- spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
- spin_unlock(&vn->sock_lock);
}
/* Setup stats when device is created */
@@ -3291,9 +3288,10 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int i;
- spin_lock(&vn->sock_lock);
+ ASSERT_RTNL();
+
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+ hlist_for_each_entry(vs, &vn->sock_list[i], hlist) {
unsigned short type;
if (vs->flags & VXLAN_F_GPE)
@@ -3307,7 +3305,6 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
udp_tunnel_drop_rx_port(dev, vs->sock, type);
}
}
- spin_unlock(&vn->sock_lock);
}
/* Initialize the device structure. */
@@ -3537,12 +3534,13 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
__be16 port, u32 flags,
int ifindex)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
unsigned int h;
struct udp_tunnel_sock_cfg tunnel_cfg;
+ ASSERT_RTNL();
+
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
return ERR_PTR(-ENOMEM);
@@ -3560,13 +3558,11 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
refcount_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
udp_tunnel_notify_add_rx_port(sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
- spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
@@ -3590,26 +3586,27 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
+ ASSERT_RTNL();
+
if (vxlan->cfg.remote_ifindex)
l3mdev_index = l3mdev_master_upper_ifindex_by_index(
vxlan->net, vxlan->cfg.remote_ifindex);
if (!vxlan->cfg.no_share) {
- spin_lock(&vn->sock_lock);
+ rcu_read_lock();
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
vxlan->cfg.dst_port, vxlan->cfg.flags,
l3mdev_index);
if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
- spin_unlock(&vn->sock_lock);
+ rcu_read_unlock();
return -EBUSY;
}
- spin_unlock(&vn->sock_lock);
+ rcu_read_unlock();
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
@@ -4875,7 +4872,6 @@ static __net_init int vxlan_init_net(struct net *net)
unsigned int h;
INIT_LIST_HEAD(&vn->vxlan_list);
- spin_lock_init(&vn->sock_lock);
vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index d328aed9feef..6c625fb29c6c 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -19,8 +19,8 @@ extern const struct rhashtable_params vxlan_vni_rht_params;
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
+ /* sock_list is protected by rtnl lock */
struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
struct notifier_block nexthop_notifier_block;
};
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 186d0660669a..4ff56d9f8f28 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -40,11 +40,11 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
struct vxlan_vni_node *v,
bool del)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_dev_node *node;
struct vxlan_sock *vs;
- spin_lock(&vn->sock_lock);
+ ASSERT_RTNL();
+
if (del) {
if (!hlist_unhashed(&v->hlist4.hlist))
hlist_del_init_rcu(&v->hlist4.hlist);
@@ -52,7 +52,7 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
if (!hlist_unhashed(&v->hlist6.hlist))
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
- goto out;
+ return;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -67,23 +67,21 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
node = &v->hlist4;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
-out:
- spin_unlock(&vn->sock_lock);
}
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
bool ipv6)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
struct vxlan_dev_node *node;
+ ASSERT_RTNL();
+
if (!vg)
return;
- spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
@@ -94,26 +92,24 @@ void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
node->vxlan = vxlan;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
- spin_unlock(&vn->sock_lock);
}
void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_node *v, *tmp;
+ ASSERT_RTNL();
+
if (!vg)
return;
- spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
}
- spin_unlock(&vn->sock_lock);
}
static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
--
2.49.0
next prev parent reply other threads:[~2025-06-13 20:33 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-13 20:33 [PATCH net-next v4 0/6] udp_tunnel: remove rtnl_lock dependency Stanislav Fomichev
2025-06-13 20:33 ` [PATCH net-next v4 1/6] geneve: rely on rtnl lock in geneve_offload_rx_ports Stanislav Fomichev
2025-06-13 20:33 ` Stanislav Fomichev [this message]
2025-06-13 20:33 ` [PATCH net-next v4 3/6] udp_tunnel: remove rtnl_lock dependency Stanislav Fomichev
2025-06-13 20:33 ` [PATCH net-next v4 4/6] net: remove redundant ASSERT_RTNL() in queue setup functions Stanislav Fomichev
2025-06-13 20:33 ` [PATCH net-next v4 5/6] netdevsim: remove udp_ports_sleep Stanislav Fomichev
2025-06-13 20:33 ` [PATCH net-next v4 6/6] Revert "bnxt_en: bring back rtnl_lock() in the bnxt_open() path" Stanislav Fomichev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250613203325.1127217-3-stfomichev@gmail.com \
--to=stfomichev@gmail.com \
--cc=GR-Linux-NIC-Dev@marvell.com \
--cc=ajit.khaparde@broadcom.com \
--cc=andrew+netdev@lunn.ch \
--cc=anthony.l.nguyen@intel.com \
--cc=davem@davemloft.net \
--cc=dsahern@kernel.org \
--cc=ecree.xilinx@gmail.com \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=idosch@nvidia.com \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=kuba@kernel.org \
--cc=kuniyu@google.com \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-net-drivers@amd.com \
--cc=linux-rdma@vger.kernel.org \
--cc=louis.peens@corigine.com \
--cc=manishc@marvell.com \
--cc=michael.chan@broadcom.com \
--cc=mingo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=oss-drivers@corigine.com \
--cc=pabeni@redhat.com \
--cc=pavan.chebbi@broadcom.com \
--cc=petrm@nvidia.com \
--cc=przemyslaw.kitszel@intel.com \
--cc=razor@blackwall.org \
--cc=ruanjinjie@huawei.com \
--cc=saeedm@nvidia.com \
--cc=sdf@fomichev.me \
--cc=shshaikh@marvell.com \
--cc=shuah@kernel.org \
--cc=skalluru@marvell.com \
--cc=somnath.kotur@broadcom.com \
--cc=sriharsha.basavapatna@broadcom.com \
--cc=tariqt@nvidia.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).