From: Jakub Kicinski <kuba@kernel.org>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com,
almasrymina@google.com, hawk@kernel.org,
ilias.apalodimas@linaro.org, dsahern@gmail.com,
dtatulea@nvidia.com, Jakub Kicinski <kuba@kernel.org>
Subject: [PATCH net-next v2 05/15] net: page_pool: record pools per netdev
Date: Mon, 20 Nov 2023 16:00:38 -0800 [thread overview]
Message-ID: <20231121000048.789613-6-kuba@kernel.org> (raw)
In-Reply-To: <20231121000048.789613-1-kuba@kernel.org>
Link the page pools with netdevs. This needs to be netns compatible
so we have two options. Either we record the pools per netns and
have to worry about moving them as the netdev gets moved.
Or we record them directly on the netdev so they move with the netdev
without any extra work.
Implement the latter option. Since pools may outlast netdev we need
a place to store orphans. In time honored tradition use loopback
for this purpose.
Reviewed-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
v1: fix race between page pool and netdev disappearing (Simon)
---
include/linux/list.h | 20 ++++++++
include/linux/netdevice.h | 4 ++
include/linux/poison.h | 2 +
include/net/page_pool/types.h | 4 ++
net/core/page_pool_user.c | 90 +++++++++++++++++++++++++++++++++++
5 files changed, 120 insertions(+)
diff --git a/include/linux/list.h b/include/linux/list.h
index 1837caedf723..059aa1fff41e 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -1119,6 +1119,26 @@ static inline void hlist_move_list(struct hlist_head *old,
old->first = NULL;
}
+/**
+ * hlist_splice_init() - move all entries from one list to another
+ * @from: hlist_head from which entries will be moved
+ * @last: last entry on the @from list
+ * @to: hlist_head to which entries will be moved
+ *
+ * @to can be empty, @from must contain at least @last.
+ */
+static inline void hlist_splice_init(struct hlist_head *from,
+ struct hlist_node *last,
+ struct hlist_head *to)
+{
+ if (to->first)
+ to->first->pprev = &last->next;
+ last->next = to->first;
+ to->first = from->first;
+ from->first->pprev = &to->first;
+ from->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2d840d7056f2..d6554f308ff1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2435,6 +2435,10 @@ struct net_device {
#if IS_ENABLED(CONFIG_DPLL)
struct dpll_pin *dpll_pin;
#endif
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ /** @page_pools: page pools created for this netdevice */
+ struct hlist_head page_pools;
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 851a855d3868..27a7dad17eef 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -83,6 +83,8 @@
/********** net/core/skbuff.c **********/
#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+/********** net/ **********/
+#define NET_PTR_POISON ((void *)(0x801 + POISON_POINTER_DELTA))
/********** kernel/bpf/ **********/
#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index c19f0df3bf0b..b258a571201e 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -5,6 +5,7 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
+#include <linux/types.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
@@ -48,6 +49,7 @@ struct pp_alloc_cache {
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
@@ -66,6 +68,7 @@ struct page_pool_params {
unsigned int offset;
);
struct_group_tagged(page_pool_params_slow, slow,
+ struct net_device *netdev;
/* private: used by test code only */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
@@ -189,6 +192,7 @@ struct page_pool {
struct page_pool_params_slow slow;
/* User-facing fields, protected by page_pools_lock */
struct {
+ struct hlist_node list;
u32 id;
} user;
};
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 630d1eeecf2a..1591dbd66d51 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -1,14 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mutex.h>
+#include <linux/netdevice.h>
#include <linux/xarray.h>
+#include <net/net_debug.h>
#include <net/page_pool/types.h>
#include "page_pool_priv.h"
static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
+/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
+ * Ordering: inside rtnl_lock
+ */
static DEFINE_MUTEX(page_pools_lock);
+/* Page pools are only reachable from user space (via netlink) if they are
+ * linked to a netdev at creation time. Following page pool "visibility"
+ * states are possible:
+ * - normal
+ * - user.list: linked to real netdev, netdev: real netdev
+ * - orphaned - real netdev has disappeared
+ * - user.list: linked to lo, netdev: lo
+ * - invisible - either (a) created without netdev linking, (b) unlisted due
+ * to error, or (c) the entire namespace which owned this pool disappeared
+ * - user.list: unhashed, netdev: unknown
+ */
+
int page_pool_list(struct page_pool *pool)
{
static u32 id_alloc_next;
@@ -20,6 +37,10 @@ int page_pool_list(struct page_pool *pool)
if (err < 0)
goto err_unlock;
+ if (pool->slow.netdev)
+ hlist_add_head(&pool->user.list,
+ &pool->slow.netdev->page_pools);
+
mutex_unlock(&page_pools_lock);
return 0;
@@ -32,5 +53,74 @@ void page_pool_unlist(struct page_pool *pool)
{
mutex_lock(&page_pools_lock);
xa_erase(&page_pools, pool->user.id);
+ hlist_del(&pool->user.list);
mutex_unlock(&page_pools_lock);
}
+
+static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
+{
+ struct page_pool *pool;
+ struct hlist_node *n;
+
+ mutex_lock(&page_pools_lock);
+ hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
+ hlist_del_init(&pool->user.list);
+ pool->slow.netdev = NET_PTR_POISON;
+ }
+ mutex_unlock(&page_pools_lock);
+}
+
+static void page_pool_unreg_netdev(struct net_device *netdev)
+{
+ struct page_pool *pool, *last;
+ struct net_device *lo;
+
+ lo = __dev_get_by_index(dev_net(netdev), 1);
+ if (!lo) {
+ netdev_err_once(netdev,
+ "can't get lo to store orphan page pools\n");
+ page_pool_unreg_netdev_wipe(netdev);
+ return;
+ }
+
+ mutex_lock(&page_pools_lock);
+ last = NULL;
+ hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
+ pool->slow.netdev = lo;
+ last = pool;
+ }
+ if (last)
+ hlist_splice_init(&netdev->page_pools, &last->user.list,
+ &lo->page_pools);
+ mutex_unlock(&page_pools_lock);
+}
+
+static int
+page_pool_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (event != NETDEV_UNREGISTER)
+ return NOTIFY_DONE;
+
+ if (hlist_empty(&netdev->page_pools))
+ return NOTIFY_OK;
+
+ if (netdev->ifindex != LOOPBACK_IFINDEX)
+ page_pool_unreg_netdev(netdev);
+ else
+ page_pool_unreg_netdev_wipe(netdev);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block page_pool_netdevice_nb = {
+ .notifier_call = page_pool_netdevice_event,
+};
+
+static int __init page_pool_user_init(void)
+{
+ return register_netdevice_notifier(&page_pool_netdevice_nb);
+}
+
+subsys_initcall(page_pool_user_init);
--
2.42.0
next prev parent reply other threads:[~2023-11-21 0:00 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-21 0:00 [PATCH net-next v2 00/15] net: page_pool: add netlink-based introspection Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 01/15] net: page_pool: split the page_pool_params into fast and slow Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 02/15] net: page_pool: avoid touching slow on the fastpath Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 03/15] net: page_pool: factor out uninit Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 04/15] net: page_pool: id the page pools Jakub Kicinski
2023-11-21 0:00 ` Jakub Kicinski [this message]
2023-11-21 0:00 ` [PATCH net-next v2 06/15] net: page_pool: stash the NAPI ID for easier access Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 07/15] eth: link netdev to page_pools in drivers Jakub Kicinski
2023-11-21 13:13 ` kernel test robot
2023-11-21 21:25 ` kernel test robot
2023-11-21 0:00 ` [PATCH net-next v2 08/15] net: page_pool: add nlspec for basic access to page pools Jakub Kicinski
2023-11-21 18:24 ` Willem de Bruijn
2023-11-21 20:37 ` Jakub Kicinski
2023-11-21 21:33 ` Willem de Bruijn
2023-11-21 22:00 ` Jakub Kicinski
2023-11-21 22:49 ` David Ahern
2023-11-21 23:42 ` Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 09/15] net: page_pool: implement GET in the netlink API Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 10/15] net: page_pool: add netlink notifications for state changes Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 11/15] net: page_pool: report amount of memory held by page pools Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 12/15] net: page_pool: report when page pool was destroyed Jakub Kicinski
2023-11-21 20:45 ` Jesper Dangaard Brouer
2023-11-21 21:49 ` Jakub Kicinski
2023-11-22 8:53 ` Jesper Dangaard Brouer
2023-11-21 0:00 ` [PATCH net-next v2 13/15] net: page_pool: expose page pool stats via netlink Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 14/15] net: page_pool: mute the periodic warning for visible page pools Jakub Kicinski
2023-11-21 0:00 ` [PATCH net-next v2 15/15] tools: ynl: add sample for getting page-pool information Jakub Kicinski
2023-11-22 1:31 ` [PATCH net-next v2 00/15] net: page_pool: add netlink-based introspection Jakub Kicinski
2023-11-22 8:45 ` Jesper Dangaard Brouer
2023-11-22 1:40 ` patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231121000048.789613-6-kuba@kernel.org \
--to=kuba@kernel.org \
--cc=almasrymina@google.com \
--cc=davem@davemloft.net \
--cc=dsahern@gmail.com \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=ilias.apalodimas@linaro.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).