From: Jakub Kicinski <kuba@kernel.org>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com,
andrew+netdev@lunn.ch, horms@kernel.org, jdamato@fastly.com,
Jakub Kicinski <kuba@kernel.org>
Subject: [PATCH net-next v2 05/11] net: protect netdev->napi_list with netdev_lock()
Date: Tue, 14 Jan 2025 19:53:13 -0800 [thread overview]
Message-ID: <20250115035319.559603-6-kuba@kernel.org> (raw)
In-Reply-To: <20250115035319.559603-1-kuba@kernel.org>
Hold netdev->lock when NAPIs are getting added or removed.
This will allow safe access to NAPI instances of a net_device
without rtnl_lock.
Create a family of helpers which assume the lock is already taken.
Switch iavf to them, as it makes extensive use of netdev->lock,
already.
Reviewed-by: Joe Damato <jdamato@fastly.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
include/linux/netdevice.h | 54 ++++++++++++++++++---
drivers/net/ethernet/intel/iavf/iavf_main.c | 6 +--
net/core/dev.c | 15 ++++--
3 files changed, 60 insertions(+), 15 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fdf3a8d93185..e8c8a5ea7326 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2456,7 +2456,7 @@ struct net_device {
* Drivers are free to use it for other protection.
*
* Protects:
- * @net_shaper_hierarchy, @reg_state
+ * @napi_list, @net_shaper_hierarchy, @reg_state
*
* Partially protects (writers must hold both @lock and rtnl_lock):
* @up
@@ -2712,8 +2712,19 @@ static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
*/
#define NAPI_POLL_WEIGHT 64
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+void netif_napi_add_weight_locked(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
+
+static inline void
+netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ netdev_lock(dev);
+ netif_napi_add_weight_locked(dev, napi, poll, weight);
+ netdev_unlock(dev);
+}
/**
* netif_napi_add() - initialize a NAPI context
@@ -2731,6 +2742,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
+static inline void
+netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
static inline void
netif_napi_add_tx_weight(struct net_device *dev,
struct napi_struct *napi,
@@ -2741,6 +2759,15 @@ netif_napi_add_tx_weight(struct net_device *dev,
netif_napi_add_weight(dev, napi, poll, weight);
}
+static inline void
+netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ napi->index = index;
+ napi->config = &dev->napi_config[index];
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
/**
* netif_napi_add_config - initialize a NAPI context with persistent config
* @dev: network device
@@ -2752,9 +2779,9 @@ static inline void
netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int index)
{
- napi->index = index;
- napi->config = &dev->napi_config[index];
- netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+ netdev_lock(dev);
+ netif_napi_add_config_locked(dev, napi, poll, index);
+ netdev_unlock(dev);
}
/**
@@ -2774,6 +2801,8 @@ static inline void netif_napi_add_tx(struct net_device *dev,
netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
+void __netif_napi_del_locked(struct napi_struct *napi);
+
/**
* __netif_napi_del - remove a NAPI context
* @napi: NAPI context
@@ -2782,7 +2811,18 @@ static inline void netif_napi_add_tx(struct net_device *dev,
* containing @napi. Drivers might want to call this helper to combine
* all the needed RCU grace periods into a single one.
*/
-void __netif_napi_del(struct napi_struct *napi);
+static inline void __netif_napi_del(struct napi_struct *napi)
+{
+ netdev_lock(napi->dev);
+ __netif_napi_del_locked(napi);
+ netdev_unlock(napi->dev);
+}
+
+static inline void netif_napi_del_locked(struct napi_struct *napi)
+{
+ __netif_napi_del_locked(napi);
+ synchronize_net();
+}
/**
* netif_napi_del - remove a NAPI context
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ab908d620285..2db97c5d9f9e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1800,8 +1800,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll);
}
return 0;
@@ -1827,7 +1827,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 4cba553a4742..7511207057e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6909,9 +6909,12 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
list_add_rcu(&napi->dev_list, higher); /* adds after higher */
}
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
+void netif_napi_add_weight_locked(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
{
+ netdev_assert_locked(dev);
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return;
@@ -6952,7 +6955,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
dev->threaded = false;
netif_napi_set_irq(napi, -1);
}
-EXPORT_SYMBOL(netif_napi_add_weight);
+EXPORT_SYMBOL(netif_napi_add_weight_locked);
void napi_disable(struct napi_struct *n)
{
@@ -7023,8 +7026,10 @@ static void flush_gro_hash(struct napi_struct *napi)
}
/* Must be called in process context */
-void __netif_napi_del(struct napi_struct *napi)
+void __netif_napi_del_locked(struct napi_struct *napi)
{
+ netdev_assert_locked(napi->dev);
+
if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
return;
@@ -7044,7 +7049,7 @@ void __netif_napi_del(struct napi_struct *napi)
napi->thread = NULL;
}
}
-EXPORT_SYMBOL(__netif_napi_del);
+EXPORT_SYMBOL(__netif_napi_del_locked);
static int __napi_poll(struct napi_struct *n, bool *repoll)
{
--
2.48.0
next prev parent reply other threads:[~2025-01-15 3:53 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-15 3:53 [PATCH net-next v2 00/11] net: use netdev->lock to protect NAPI Jakub Kicinski
2025-01-15 3:53 ` [PATCH net-next v2 01/11] net: add netdev_lock() / netdev_unlock() helpers Jakub Kicinski
2025-01-15 8:21 ` Kuniyuki Iwashima
2025-01-15 8:36 ` Przemek Kitszel
2025-01-15 9:24 ` Ido Schimmel
2025-01-15 14:08 ` Jakub Kicinski
2025-01-15 14:24 ` Przemek Kitszel
2025-01-15 3:53 ` [PATCH net-next v2 02/11] net: make netdev_lock() protect netdev->reg_state Jakub Kicinski
2025-01-15 8:30 ` Kuniyuki Iwashima
2025-01-15 14:18 ` Jakub Kicinski
2025-01-15 3:53 ` [PATCH net-next v2 03/11] net: add helpers for lookup and walking netdevs under netdev_lock() Jakub Kicinski
2025-01-15 8:41 ` Kuniyuki Iwashima
2025-01-15 3:53 ` [PATCH net-next v2 04/11] net: add netdev->up protected by netdev_lock() Jakub Kicinski
2025-01-15 8:45 ` Kuniyuki Iwashima
2025-01-15 3:53 ` Jakub Kicinski [this message]
2025-01-15 8:57 ` [PATCH net-next v2 05/11] net: protect netdev->napi_list with netdev_lock() Kuniyuki Iwashima
2025-01-17 22:21 ` Eric Dumazet
2025-01-17 22:52 ` Jakub Kicinski
2025-01-15 3:53 ` [PATCH net-next v2 06/11] net: protect NAPI enablement " Jakub Kicinski
2025-01-15 9:02 ` Kuniyuki Iwashima
2025-01-21 8:32 ` Dan Carpenter
2025-01-21 8:50 ` David Laight
2025-01-21 15:27 ` Dan Carpenter
2025-01-15 3:53 ` [PATCH net-next v2 07/11] net: make netdev netlink ops hold netdev_lock() Jakub Kicinski
2025-01-15 9:07 ` Kuniyuki Iwashima
2025-01-15 3:53 ` [PATCH net-next v2 08/11] net: protect threaded status of NAPI with netdev_lock() Jakub Kicinski
2025-01-15 9:09 ` Kuniyuki Iwashima
2025-01-15 3:53 ` [PATCH net-next v2 09/11] net: protect napi->irq " Jakub Kicinski
2025-01-15 9:12 ` Kuniyuki Iwashima
2025-01-15 3:53 ` [PATCH net-next v2 10/11] net: protect NAPI config fields " Jakub Kicinski
2025-01-15 9:15 ` Kuniyuki Iwashima
2025-01-15 3:53 ` [PATCH net-next v2 11/11] netdev-genl: remove rtnl_lock protection from NAPI ops Jakub Kicinski
2025-01-15 9:18 ` Kuniyuki Iwashima
2025-01-16 3:30 ` [PATCH net-next v2 00/11] net: use netdev->lock to protect NAPI patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250115035319.559603-6-kuba@kernel.org \
--to=kuba@kernel.org \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=jdamato@fastly.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).