From: Daniel Borkmann <daniel@iogearbox.net>
To: netdev@vger.kernel.org
Cc: bpf@vger.kernel.org, kuba@kernel.org, davem@davemloft.net,
razor@blackwall.org, pabeni@redhat.com, willemb@google.com,
sdf@fomichev.me, john.fastabend@gmail.com, martin.lau@kernel.org,
jordan@jrife.io, maciej.fijalkowski@intel.com,
magnus.karlsson@intel.com, dw@davidwei.uk, toke@redhat.com,
yangzhenze@bytedance.com, wangdongdong.6@bytedance.com
Subject: [PATCH net-next v4 05/14] net: Proxy net_mp_{open,close}_rxq for mapped queues
Date: Fri, 31 Oct 2025 22:20:54 +0100 [thread overview]
Message-ID: <20251031212103.310683-6-daniel@iogearbox.net> (raw)
In-Reply-To: <20251031212103.310683-1-daniel@iogearbox.net>
From: David Wei <dw@davidwei.uk>
When a process in a container wants to setup a memory provider, it will
use the virtual netdev and a mapped rxq, and call net_mp_{open,close}_rxq
to try and restart the queue. At this point, proxy the queue restart on
the real rxq in the physical netdev.
For memory providers (io_uring zero-copy rx and devmem), it causes the
real rxq in the physical netdev to be filled from a memory provider that
has DMA mapped memory from a process within a container.
Signed-off-by: David Wei <dw@davidwei.uk>
Co-developed-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
---
include/net/page_pool/memory_provider.h | 4 +-
net/core/netdev_rx_queue.c | 55 +++++++++++++++++--------
2 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
index ada4f968960a..b6f811c3416b 100644
--- a/include/net/page_pool/memory_provider.h
+++ b/include/net/page_pool/memory_provider.h
@@ -23,12 +23,12 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
void net_mp_niov_clear_page_pool(struct net_iov *niov);
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *p);
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack);
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+void net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *old_p);
void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *old_p);
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 889b7382cdb6..0e43254b64cd 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -174,49 +174,63 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack)
{
+ struct net_device *orig_dev = dev;
struct netdev_rx_queue *rxq;
int ret;
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
-
if (rxq_idx >= dev->real_num_rx_queues) {
NL_SET_ERR_MSG(extack, "rx queue index out of range");
return -ERANGE;
}
- rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+ rxq = netif_get_rx_queue_peer_locked(&dev, &rxq_idx);
+ if (!rxq) {
+ NL_SET_ERR_MSG(extack, "rx queue peered to a virtual netdev");
+ return -EBUSY;
+ }
+ if (!dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "rx queue is mapped to a virtual netdev");
+ ret = -EBUSY;
+ goto out;
+ }
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (dev->cfg->hds_thresh) {
NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (dev_xdp_prog_count(dev)) {
NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
-
- rxq = __netif_get_rx_queue(dev, rxq_idx);
if (rxq->mp_params.mp_ops) {
NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
#ifdef CONFIG_XDP_SOCKETS
if (rxq->pool) {
NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
#endif
-
rxq->mp_params = *p;
ret = netdev_rx_queue_restart(dev, rxq_idx);
if (ret) {
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
}
+out:
+ netif_put_rx_queue_peer_locked(orig_dev, dev);
return ret;
}
@@ -231,38 +245,43 @@ int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
return ret;
}
-void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *old_p)
{
+ struct net_device *orig_dev = dev;
struct netdev_rx_queue *rxq;
int err;
- if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
+ if (WARN_ON_ONCE(rxq_idx >= dev->real_num_rx_queues))
return;
- rxq = __netif_get_rx_queue(dev, ifq_idx);
+ rxq = netif_get_rx_queue_peer_locked(&dev, &rxq_idx);
+ if (WARN_ON_ONCE(!rxq))
+ return;
/* Callers holding a netdev ref may get here after we already
* went thru shutdown via dev_memory_provider_uninstall().
*/
if (dev->reg_state > NETREG_REGISTERED &&
!rxq->mp_params.mp_ops)
- return;
+ goto out;
if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
rxq->mp_params.mp_priv != old_p->mp_priv))
- return;
+ goto out;
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
- err = netdev_rx_queue_restart(dev, ifq_idx);
+ err = netdev_rx_queue_restart(dev, rxq_idx);
WARN_ON(err && err != -ENETDOWN);
+out:
+ netif_put_rx_queue_peer_locked(orig_dev, dev);
}
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+void net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *old_p)
{
netdev_lock(dev);
- __net_mp_close_rxq(dev, ifq_idx, old_p);
+ __net_mp_close_rxq(dev, rxq_idx, old_p);
netdev_unlock(dev);
}
--
2.43.0
next prev parent reply other threads:[~2025-10-31 21:21 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-31 21:20 [PATCH net-next v4 00/14] netkit: Support for io_uring zero-copy and AF_XDP Daniel Borkmann
2025-10-31 21:20 ` [PATCH net-next v4 01/14] net: Add bind-queue operation Daniel Borkmann
2025-11-07 0:39 ` Jakub Kicinski
2025-11-19 14:57 ` Daniel Borkmann
2025-11-20 2:20 ` Jakub Kicinski
2025-10-31 21:20 ` [PATCH net-next v4 02/14] net: Implement netdev_nl_bind_queue_doit Daniel Borkmann
2025-11-07 0:39 ` Jakub Kicinski
2025-10-31 21:20 ` [PATCH net-next v4 03/14] net: Add peer info to queue-get response Daniel Borkmann
2025-10-31 21:20 ` [PATCH net-next v4 04/14] net, ethtool: Disallow peered real rxqs to be resized Daniel Borkmann
2025-10-31 21:20 ` Daniel Borkmann [this message]
2025-10-31 21:20 ` [PATCH net-next v4 06/14] xsk: Move NETDEV_XDP_ACT_ZC into generic header Daniel Borkmann
2025-10-31 21:20 ` [PATCH net-next v4 07/14] xsk: Extend xsk_rcv_check validation Daniel Borkmann
2025-10-31 21:20 ` [PATCH net-next v4 08/14] xsk: Proxy pool management for mapped queues Daniel Borkmann
2025-10-31 21:20 ` [PATCH net-next v4 09/14] netkit: Add single device mode for netkit Daniel Borkmann
2025-10-31 21:20 ` [PATCH net-next v4 10/14] netkit: Document fast vs slowpath members via macros Daniel Borkmann
2025-10-31 21:21 ` [PATCH net-next v4 11/14] netkit: Implement rtnl_link_ops->alloc and ndo_queue_create Daniel Borkmann
2025-11-07 0:41 ` Jakub Kicinski
2025-11-07 15:01 ` Daniel Borkmann
2025-11-07 15:54 ` Jakub Kicinski
2025-10-31 21:21 ` [PATCH net-next v4 12/14] netkit: Add netkit notifier to check for unregistering devices Daniel Borkmann
2025-10-31 21:21 ` [PATCH net-next v4 13/14] netkit: Add io_uring zero-copy support for TCP Daniel Borkmann
2025-11-07 0:43 ` Jakub Kicinski
2025-10-31 21:21 ` [PATCH net-next v4 14/14] netkit: Add xsk support for af_xdp applications Daniel Borkmann
2025-11-04 23:22 ` [PATCH net-next v4 00/14] netkit: Support for io_uring zero-copy and AF_XDP Stanislav Fomichev
2025-11-05 0:43 ` David Wei
2025-11-05 19:51 ` Stanislav Fomichev
2025-11-08 22:18 ` David Wei
2025-11-07 0:47 ` Jakub Kicinski
2025-11-07 1:00 ` patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251031212103.310683-6-daniel@iogearbox.net \
--to=daniel@iogearbox.net \
--cc=bpf@vger.kernel.org \
--cc=davem@davemloft.net \
--cc=dw@davidwei.uk \
--cc=john.fastabend@gmail.com \
--cc=jordan@jrife.io \
--cc=kuba@kernel.org \
--cc=maciej.fijalkowski@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=martin.lau@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=razor@blackwall.org \
--cc=sdf@fomichev.me \
--cc=toke@redhat.com \
--cc=wangdongdong.6@bytedance.com \
--cc=willemb@google.com \
--cc=yangzhenze@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).