* [PATCH v6 1/5] io_uring/zcrx: count zcrx users
2025-11-08 18:14 [PATCH v6 0/5] io_uring zcrx ifq sharing David Wei
@ 2025-11-08 18:14 ` David Wei
2025-11-08 18:14 ` [PATCH v6 2/5] io_uring/zcrx: move io_zcrx_scrub() and dependencies up David Wei
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: David Wei @ 2025-11-08 18:14 UTC (permalink / raw)
To: io-uring, netdev; +Cc: Jens Axboe, Pavel Begunkov
From: Pavel Begunkov <asml.silence@gmail.com>
zcrx tries to detach ifq / terminate page pools when the io_uring ctx
owning it is being destroyed. There will be multiple io_uring instances
attached to it in the future, so add a separate counter to track the
users. Note, refs can't be reused for this purpose as it only used to
prevent zcrx and rings destruction, and also used by page pools to keep
it alive.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
io_uring/zcrx.c | 7 +++++--
io_uring/zcrx.h | 2 ++
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 5f7a1b29842e..de4ba6e61130 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -482,6 +482,7 @@ static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
spin_lock_init(&ifq->rq_lock);
mutex_init(&ifq->pp_lock);
refcount_set(&ifq->refs, 1);
+ refcount_set(&ifq->user_refs, 1);
return ifq;
}
@@ -742,8 +743,10 @@ void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
if (!ifq)
break;
- io_close_queue(ifq);
- io_zcrx_scrub(ifq);
+ if (refcount_dec_and_test(&ifq->user_refs)) {
+ io_close_queue(ifq);
+ io_zcrx_scrub(ifq);
+ }
io_put_zcrx_ifq(ifq);
}
diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h
index f29edc22c91f..32ab95b2cb81 100644
--- a/io_uring/zcrx.h
+++ b/io_uring/zcrx.h
@@ -55,6 +55,8 @@ struct io_zcrx_ifq {
struct net_device *netdev;
netdevice_tracker netdev_tracker;
refcount_t refs;
+ /* counts userspace facing users like io_uring */
+ refcount_t user_refs;
/*
* Page pool and net configuration lock, can be taken deeper in the
--
2.47.3
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 2/5] io_uring/zcrx: move io_zcrx_scrub() and dependencies up
2025-11-08 18:14 [PATCH v6 0/5] io_uring zcrx ifq sharing David Wei
2025-11-08 18:14 ` [PATCH v6 1/5] io_uring/zcrx: count zcrx users David Wei
@ 2025-11-08 18:14 ` David Wei
2025-11-08 18:14 ` [PATCH v6 3/5] io_uring/zcrx: export zcrx via a file David Wei
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: David Wei @ 2025-11-08 18:14 UTC (permalink / raw)
To: io-uring, netdev; +Cc: Jens Axboe, Pavel Begunkov
In preparation for adding zcrx ifq exporting and importing, move
io_zcrx_scrub() and its dependencies up the file to be closer to
io_close_queue().
Signed-off-by: David Wei <dw@davidwei.uk>
---
io_uring/zcrx.c | 84 ++++++++++++++++++++++++-------------------------
1 file changed, 42 insertions(+), 42 deletions(-)
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index de4ba6e61130..48eabcc05873 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -544,6 +544,48 @@ static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
io_zcrx_ifq_free(ifq);
}
+static void io_zcrx_return_niov_freelist(struct net_iov *niov)
+{
+ struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
+
+ spin_lock_bh(&area->freelist_lock);
+ area->freelist[area->free_count++] = net_iov_idx(niov);
+ spin_unlock_bh(&area->freelist_lock);
+}
+
+static void io_zcrx_return_niov(struct net_iov *niov)
+{
+ netmem_ref netmem = net_iov_to_netmem(niov);
+
+ if (!niov->desc.pp) {
+ /* copy fallback allocated niovs */
+ io_zcrx_return_niov_freelist(niov);
+ return;
+ }
+ page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
+}
+
+static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
+{
+ struct io_zcrx_area *area = ifq->area;
+ int i;
+
+ if (!area)
+ return;
+
+ /* Reclaim back all buffers given to the user space. */
+ for (i = 0; i < area->nia.num_niovs; i++) {
+ struct net_iov *niov = &area->nia.niovs[i];
+ int nr;
+
+ if (!atomic_read(io_get_user_counter(niov)))
+ continue;
+ nr = atomic_xchg(io_get_user_counter(niov), 0);
+ if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
+ io_zcrx_return_niov(niov);
+ }
+}
+
struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
unsigned int id)
{
@@ -684,48 +726,6 @@ static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
return &area->nia.niovs[niov_idx];
}
-static void io_zcrx_return_niov_freelist(struct net_iov *niov)
-{
- struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
-
- spin_lock_bh(&area->freelist_lock);
- area->freelist[area->free_count++] = net_iov_idx(niov);
- spin_unlock_bh(&area->freelist_lock);
-}
-
-static void io_zcrx_return_niov(struct net_iov *niov)
-{
- netmem_ref netmem = net_iov_to_netmem(niov);
-
- if (!niov->desc.pp) {
- /* copy fallback allocated niovs */
- io_zcrx_return_niov_freelist(niov);
- return;
- }
- page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
-}
-
-static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
-{
- struct io_zcrx_area *area = ifq->area;
- int i;
-
- if (!area)
- return;
-
- /* Reclaim back all buffers given to the user space. */
- for (i = 0; i < area->nia.num_niovs; i++) {
- struct net_iov *niov = &area->nia.niovs[i];
- int nr;
-
- if (!atomic_read(io_get_user_counter(niov)))
- continue;
- nr = atomic_xchg(io_get_user_counter(niov), 0);
- if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
- io_zcrx_return_niov(niov);
- }
-}
-
void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
{
struct io_zcrx_ifq *ifq;
--
2.47.3
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 3/5] io_uring/zcrx: export zcrx via a file
2025-11-08 18:14 [PATCH v6 0/5] io_uring zcrx ifq sharing David Wei
2025-11-08 18:14 ` [PATCH v6 1/5] io_uring/zcrx: count zcrx users David Wei
2025-11-08 18:14 ` [PATCH v6 2/5] io_uring/zcrx: move io_zcrx_scrub() and dependencies up David Wei
@ 2025-11-08 18:14 ` David Wei
2025-11-08 18:14 ` [PATCH v6 4/5] io_uring/zcrx: add io_fill_zcrx_offsets() David Wei
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: David Wei @ 2025-11-08 18:14 UTC (permalink / raw)
To: io-uring, netdev; +Cc: Jens Axboe, Pavel Begunkov
From: Pavel Begunkov <asml.silence@gmail.com>
Add an option to wrap a zcrx instance into a file and expose it to the
user space. Currently, users can't do anything meaningful with the file,
but it'll be used in a next patch to import it into another io_uring
instance. It's implemented as a new op called ZCRX_CTRL_EXPORT for the
IORING_REGISTER_ZCRX_CTRL registration opcode.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
include/uapi/linux/io_uring.h | 1 +
io_uring/zcrx.c | 59 +++++++++++++++++++++++++++++++----
2 files changed, 54 insertions(+), 6 deletions(-)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 190657d8307d..f5dae95bc0a8 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -1087,6 +1087,7 @@ struct io_uring_zcrx_ifq_reg {
enum zcrx_ctrl_op {
ZCRX_CTRL_FLUSH_RQ,
+ ZCRX_CTRL_EXPORT,
__ZCRX_CTRL_LAST,
};
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 48eabcc05873..3fba3bbff570 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff_ref.h>
+#include <linux/anon_inodes.h>
#include <net/page_pool/helpers.h>
#include <net/page_pool/memory_provider.h>
@@ -586,6 +587,15 @@ static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
}
}
+static void zcrx_unregister(struct io_zcrx_ifq *ifq)
+{
+ if (refcount_dec_and_test(&ifq->user_refs)) {
+ io_close_queue(ifq);
+ io_zcrx_scrub(ifq);
+ }
+ io_put_zcrx_ifq(ifq);
+}
+
struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
unsigned int id)
{
@@ -596,6 +606,46 @@ struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
return ifq ? &ifq->region : NULL;
}
+static int zcrx_box_release(struct inode *inode, struct file *file)
+{
+ struct io_zcrx_ifq *ifq = file->private_data;
+
+ zcrx_unregister(ifq);
+ return 0;
+}
+
+static const struct file_operations zcrx_box_fops = {
+ .owner = THIS_MODULE,
+ .release = zcrx_box_release,
+};
+
+static int export_zcrx(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
+ struct zcrx_ctrl *ctrl)
+{
+ struct file *file;
+ int fd = -1;
+
+ if (!mem_is_zero(&ctrl->resv, sizeof(ctrl->resv)))
+ return -EINVAL;
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ refcount_inc(&ifq->refs);
+ refcount_inc(&ifq->user_refs);
+
+ file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
+ ifq, O_CLOEXEC, NULL);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ zcrx_unregister(ifq);
+ return PTR_ERR(file);
+ }
+
+ fd_install(fd, file);
+ return fd;
+}
+
int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
struct io_uring_zcrx_ifq_reg __user *arg)
{
@@ -742,12 +792,7 @@ void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
}
if (!ifq)
break;
-
- if (refcount_dec_and_test(&ifq->user_refs)) {
- io_close_queue(ifq);
- io_zcrx_scrub(ifq);
- }
- io_put_zcrx_ifq(ifq);
+ zcrx_unregister(ifq);
}
xa_destroy(&ctx->zcrx_ctxs);
@@ -1025,6 +1070,8 @@ int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
switch (ctrl.op) {
case ZCRX_CTRL_FLUSH_RQ:
return zcrx_flush_rq(ctx, zcrx, &ctrl);
+ case ZCRX_CTRL_EXPORT:
+ return export_zcrx(ctx, zcrx, &ctrl);
}
return -EOPNOTSUPP;
--
2.47.3
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 4/5] io_uring/zcrx: add io_fill_zcrx_offsets()
2025-11-08 18:14 [PATCH v6 0/5] io_uring zcrx ifq sharing David Wei
` (2 preceding siblings ...)
2025-11-08 18:14 ` [PATCH v6 3/5] io_uring/zcrx: export zcrx via a file David Wei
@ 2025-11-08 18:14 ` David Wei
2025-11-08 18:14 ` [PATCH v6 5/5] io_uring/zcrx: share an ifq between rings David Wei
2025-11-11 14:40 ` [PATCH v6 0/5] io_uring zcrx ifq sharing Pavel Begunkov
5 siblings, 0 replies; 8+ messages in thread
From: David Wei @ 2025-11-08 18:14 UTC (permalink / raw)
To: io-uring, netdev; +Cc: Jens Axboe, Pavel Begunkov
Add a helper io_fill_zcrx_offsets() that sets the constant offsets in
struct io_uring_zcrx_offsets returned to userspace.
Signed-off-by: David Wei <dw@davidwei.uk>
---
io_uring/zcrx.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 3fba3bbff570..49990c89ce95 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -345,6 +345,13 @@ static void io_zcrx_get_niov_uref(struct net_iov *niov)
atomic_inc(io_get_user_counter(niov));
}
+static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
+{
+ offsets->head = offsetof(struct io_uring, head);
+ offsets->tail = offsetof(struct io_uring, tail);
+ offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
+}
+
static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
struct io_zcrx_ifq *ifq,
struct io_uring_zcrx_ifq_reg *reg,
@@ -356,7 +363,8 @@ static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
void *ptr;
int ret;
- off = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
+ io_fill_zcrx_offsets(®->offsets);
+ off = reg->offsets.rqes;
size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
if (size > rd->size)
return -EINVAL;
@@ -372,9 +380,6 @@ static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
ifq->rq_ring = (struct io_uring *)ptr;
ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
- reg->offsets.head = offsetof(struct io_uring, head);
- reg->offsets.tail = offsetof(struct io_uring, tail);
- reg->offsets.rqes = off;
return 0;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 5/5] io_uring/zcrx: share an ifq between rings
2025-11-08 18:14 [PATCH v6 0/5] io_uring zcrx ifq sharing David Wei
` (3 preceding siblings ...)
2025-11-08 18:14 ` [PATCH v6 4/5] io_uring/zcrx: add io_fill_zcrx_offsets() David Wei
@ 2025-11-08 18:14 ` David Wei
2025-11-11 14:40 ` [PATCH v6 0/5] io_uring zcrx ifq sharing Pavel Begunkov
5 siblings, 0 replies; 8+ messages in thread
From: David Wei @ 2025-11-08 18:14 UTC (permalink / raw)
To: io-uring, netdev; +Cc: Jens Axboe, Pavel Begunkov
Add a way to share an ifq from a src ring that is real (i.e. bound to a
HW RX queue) with other rings. This is done by passing a new flag
IORING_ZCRX_IFQ_REG_IMPORT in the registration struct
io_uring_zcrx_ifq_reg, alongside the fd of an exported zcrx ifq.
Signed-off-by: David Wei <dw@davidwei.uk>
---
include/uapi/linux/io_uring.h | 4 +++
io_uring/zcrx.c | 63 +++++++++++++++++++++++++++++++++--
2 files changed, 65 insertions(+), 2 deletions(-)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index f5dae95bc0a8..49c3ce7f183b 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -1067,6 +1067,10 @@ struct io_uring_zcrx_area_reg {
__u64 __resv2[2];
};
+enum zcrx_reg_flags {
+ ZCRX_REG_IMPORT = 1,
+};
+
/*
* Argument for IORING_REGISTER_ZCRX_IFQ
*/
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 49990c89ce95..ef6819fc51db 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -651,6 +651,63 @@ static int export_zcrx(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
return fd;
}
+static int import_zcrx(struct io_ring_ctx *ctx,
+ struct io_uring_zcrx_ifq_reg __user *arg,
+ struct io_uring_zcrx_ifq_reg *reg)
+{
+ struct io_zcrx_ifq *ifq;
+ struct file *file;
+ int fd, ret;
+ u32 id;
+
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
+ if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
+ return -EINVAL;
+ if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
+ return -EINVAL;
+
+ fd = reg->if_idx;
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ file = fd_file(f);
+ if (file->f_op != &zcrx_box_fops || !file->private_data)
+ return -EBADF;
+
+ ifq = file->private_data;
+ refcount_inc(&ifq->refs);
+ refcount_inc(&ifq->user_refs);
+
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
+ if (ret)
+ goto err;
+ }
+
+ reg->zcrx_id = id;
+ io_fill_zcrx_offsets(®->offsets);
+ if (copy_to_user(arg, reg, sizeof(*reg))) {
+ ret = -EFAULT;
+ goto err_xa_erase;
+ }
+
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ ret = -ENOMEM;
+ if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
+ goto err_xa_erase;
+ }
+
+ return 0;
+err_xa_erase:
+ scoped_guard(mutex, &ctx->mmap_lock)
+ xa_erase(&ctx->zcrx_ctxs, id);
+err:
+ zcrx_unregister(ifq);
+ return ret;
+}
+
int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
struct io_uring_zcrx_ifq_reg __user *arg)
{
@@ -676,11 +733,13 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
return -EINVAL;
if (copy_from_user(®, arg, sizeof(reg)))
return -EFAULT;
- if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
- return -EFAULT;
if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) ||
reg.__resv2 || reg.zcrx_id)
return -EINVAL;
+ if (reg.flags & ZCRX_REG_IMPORT)
+ return import_zcrx(ctx, arg, ®);
+ if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
+ return -EFAULT;
if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
return -EINVAL;
if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
--
2.47.3
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH v6 0/5] io_uring zcrx ifq sharing
2025-11-08 18:14 [PATCH v6 0/5] io_uring zcrx ifq sharing David Wei
` (4 preceding siblings ...)
2025-11-08 18:14 ` [PATCH v6 5/5] io_uring/zcrx: share an ifq between rings David Wei
@ 2025-11-11 14:40 ` Pavel Begunkov
2025-11-11 17:35 ` David Wei
5 siblings, 1 reply; 8+ messages in thread
From: Pavel Begunkov @ 2025-11-11 14:40 UTC (permalink / raw)
To: David Wei, io-uring, netdev; +Cc: Jens Axboe
On 11/8/25 18:14, David Wei wrote:
> Each ifq is bound to a HW RX queue with no way to share this across
> multiple rings. It is possible that one ring will not be able to fully
> saturate an entire HW RX queue due to userspace work. There are two ways
> to handle more work:
>
> 1. Move work to other threads, but have to pay context switch overhead
> and cold caches.
> 2. Add more rings with ifqs, but HW RX queues are a limited resource.
>
> This patchset add a way for multiple rings to share the same underlying
> src ifq that is bound to a HW RX queue. Rings with shared ifqs can issue
> io_recvzc on zero copy sockets, just like the src ring.
>
> Userspace are expected to create rings in separate threads and not
> processes, such that all rings share the same address space. This is
> because the sharing and synchronisation of refill rings is purely done
> in userspace with no kernel involvement e.g. dst rings do not mmap the
> refill ring. Also, userspace must distribute zero copy sockets steered
> into the same HW RX queue across rings sharing the ifq.
I agree it's the simplest way to use it, but cross process sharing
is a valid use case. I'm sure you can mmap it by guessing offset
and you can place it into some shared memory otherwise.
The implementation lgtm. I need to give it a run, but let me
queue it up with other dependencies.
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v6 0/5] io_uring zcrx ifq sharing
2025-11-11 14:40 ` [PATCH v6 0/5] io_uring zcrx ifq sharing Pavel Begunkov
@ 2025-11-11 17:35 ` David Wei
0 siblings, 0 replies; 8+ messages in thread
From: David Wei @ 2025-11-11 17:35 UTC (permalink / raw)
To: Pavel Begunkov, io-uring, netdev; +Cc: Jens Axboe
On 2025-11-11 06:40, Pavel Begunkov wrote:
> On 11/8/25 18:14, David Wei wrote:
>> Each ifq is bound to a HW RX queue with no way to share this across
>> multiple rings. It is possible that one ring will not be able to fully
>> saturate an entire HW RX queue due to userspace work. There are two ways
>> to handle more work:
>>
>> 1. Move work to other threads, but have to pay context switch overhead
>> and cold caches.
>> 2. Add more rings with ifqs, but HW RX queues are a limited resource.
>>
>> This patchset add a way for multiple rings to share the same underlying
>> src ifq that is bound to a HW RX queue. Rings with shared ifqs can issue
>> io_recvzc on zero copy sockets, just like the src ring.
>>
>> Userspace are expected to create rings in separate threads and not
>> processes, such that all rings share the same address space. This is
>> because the sharing and synchronisation of refill rings is purely done
>> in userspace with no kernel involvement e.g. dst rings do not mmap the
>> refill ring. Also, userspace must distribute zero copy sockets steered
>> into the same HW RX queue across rings sharing the ifq.
>
> I agree it's the simplest way to use it, but cross process sharing
> is a valid use case. I'm sure you can mmap it by guessing offset
> and you can place it into some shared memory otherwise.
>
> The implementation lgtm. I need to give it a run, but let me
> queue it up with other dependencies.
>
Yeah there's no reason why shm + mmap wouldn't work cross process with
the right offsets, but I do suspect that it'll be niche with most users
running iou across threads in the same process.
We can add cross process support in the future.
^ permalink raw reply [flat|nested] 8+ messages in thread