* [PATCH rdma-next v3 1/1] RDMA/mana: Provide a modern CQ creation interface
@ 2026-03-18 17:54 Konstantin Taranov
2026-03-22 14:01 ` Leon Romanovsky
0 siblings, 1 reply; 3+ messages in thread
From: Konstantin Taranov @ 2026-03-18 17:54 UTC (permalink / raw)
To: kotaranov, shirazsaleem, longli, jgg, leon; +Cc: linux-rdma, linux-kernel
From: Konstantin Taranov <kotaranov@microsoft.com>
The uverbs CQ creation UAPI allows users to supply their own umem for a CQ.
Create cq->umem if it was not created and use it to create a mana queue.
The created umem is owned by IB/core and will be deallocated by IB/core.
To support RDMA objects that own umem, introduce mana_ib_create_queue_with_umem()
to use the umem provided by the caller and do not de-allocate umem if it was allocted
by the caller.
Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
v3: Make umem allocation explicit for cq->umem and use a new helper to create mana queue from it.
Remove the universal helper that was added in v2
v2: Rework of Leon's commit. Introduce univesal helper that returned ownership of umem to caller.
Added removed u32 overlow check for kernel cq.
drivers/infiniband/hw/mana/cq.c | 131 ++++++++++++++++++---------
drivers/infiniband/hw/mana/device.c | 1 +
drivers/infiniband/hw/mana/main.c | 27 +++---
drivers/infiniband/hw/mana/mana_ib.h | 5 +-
4 files changed, 106 insertions(+), 58 deletions(-)
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index b2749f971..08330f5cf 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -8,12 +8,8 @@
int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs)
{
- struct ib_udata *udata = &attrs->driver_udata;
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- struct mana_ib_create_cq_resp resp = {};
- struct mana_ib_ucontext *mana_ucontext;
struct ib_device *ibdev = ibcq->device;
- struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
bool is_rnic_cq;
u32 doorbell;
@@ -26,48 +22,97 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_handle = INVALID_MANA_HANDLE;
is_rnic_cq = mana_ib_is_rnic(mdev);
- if (udata) {
- if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
- return -EINVAL;
-
- err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
- if (err) {
- ibdev_dbg(ibdev, "Failed to copy from udata for create cq, %d\n", err);
- return err;
- }
+ if (attr->cqe > U32_MAX / COMP_ENTRY_SIZE / 2 + 1)
+ return -EINVAL;
- if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) ||
- attr->cqe > U32_MAX / COMP_ENTRY_SIZE) {
- ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
- return -EINVAL;
- }
+ buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
+ cq->cqe = buf_size / COMP_ENTRY_SIZE;
+ err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
+ return err;
+ }
+ doorbell = mdev->gdma_dev->doorbell;
- cq->cqe = attr->cqe;
- err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
- &cq->queue);
+ if (is_rnic_cq) {
+ err = mana_ib_gd_create_cq(mdev, cq, doorbell);
if (err) {
- ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
- return err;
+ ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
+ goto err_destroy_queue;
}
- mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
- ibucontext);
- doorbell = mana_ucontext->doorbell;
- } else {
- if (attr->cqe > U32_MAX / COMP_ENTRY_SIZE / 2 + 1) {
- ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
- return -EINVAL;
- }
- buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
- cq->cqe = buf_size / COMP_ENTRY_SIZE;
- err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
+ err = mana_ib_install_cq_cb(mdev, cq);
if (err) {
- ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
- return err;
+ ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
+ goto err_destroy_rnic_cq;
}
- doorbell = mdev->gdma_dev->doorbell;
}
+ spin_lock_init(&cq->cq_lock);
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
+
+ return 0;
+
+err_destroy_rnic_cq:
+ mana_ib_gd_destroy_cq(mdev, cq);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &cq->queue);
+
+ return err;
+}
+
+int mana_ib_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct mana_ib_create_cq_resp resp = {};
+ struct mana_ib_ucontext *mana_ucontext;
+ struct ib_device *ibdev = ibcq->device;
+ struct mana_ib_create_cq ucmd = {};
+ struct mana_ib_dev *mdev;
+ bool is_rnic_cq;
+ u32 doorbell;
+ int err;
+
+ mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
+ cq->cq_handle = INVALID_MANA_HANDLE;
+ is_rnic_cq = mana_ib_is_rnic(mdev);
+
+ if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to copy from udata for create cq, %d\n", err);
+ return err;
+ }
+
+ if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) ||
+ attr->cqe > U32_MAX / COMP_ENTRY_SIZE)
+ return -EINVAL;
+
+ cq->cqe = attr->cqe;
+ if (!ibcq->umem)
+ ibcq->umem = ib_umem_get(&mdev->ib_dev, ucmd.buf_addr,
+ cq->cqe * COMP_ENTRY_SIZE,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(ibcq->umem))
+ return PTR_ERR(ibcq->umem);
+
+ err = mana_ib_create_queue_from_umem(mdev, ibcq->umem, &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+ return err;
+ }
+
+ mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
+ ibucontext);
+ doorbell = mana_ucontext->doorbell;
+
if (is_rnic_cq) {
err = mana_ib_gd_create_cq(mdev, cq, doorbell);
if (err) {
@@ -82,13 +127,11 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
}
- if (udata) {
- resp.cqid = cq->queue.id;
- err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
- if (err) {
- ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
- goto err_remove_cq_cb;
- }
+ resp.cqid = cq->queue.id;
+ err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
+ goto err_remove_cq_cb;
}
spin_lock_init(&cq->cq_lock);
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index ccc2279ca..c5c5fe051 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -21,6 +21,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.alloc_ucontext = mana_ib_alloc_ucontext,
.create_ah = mana_ib_create_ah,
.create_cq = mana_ib_create_cq,
+ .create_user_cq = mana_ib_create_user_cq,
.create_qp = mana_ib_create_qp,
.create_rwq_ind_table = mana_ib_create_rwq_ind_table,
.create_wq = mana_ib_create_wq,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 8d99cd00f..b928e79f6 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -261,30 +261,31 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
return 0;
}
+int mana_ib_create_queue_from_umem(struct mana_ib_dev *mdev, struct ib_umem *umem,
+ struct mana_ib_queue *queue)
+{
+ queue->umem = NULL;
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+ return mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+}
+
int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
struct mana_ib_queue *queue)
{
struct ib_umem *umem;
int err;
- queue->umem = NULL;
- queue->id = INVALID_QUEUE_ID;
- queue->gdma_region = GDMA_INVALID_DMA_REGION;
-
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
+ if (IS_ERR(umem))
return PTR_ERR(umem);
- }
- err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
- if (err) {
- ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+ err = mana_ib_create_queue_from_umem(mdev, umem, queue);
+ if (err)
goto free_umem;
- }
- queue->umem = umem;
- ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
+ queue->umem = umem;
return 0;
free_umem:
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index a7c8c0fd7..42167f12b 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -626,6 +626,8 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
struct mana_ib_queue *queue);
int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
struct mana_ib_queue *queue);
+int mana_ib_create_queue_from_umem(struct mana_ib_dev *mdev, struct ib_umem *umem,
+ struct mana_ib_queue *queue);
void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
@@ -667,7 +669,8 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
-
+int mana_ib_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
--
2.43.0
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCH rdma-next v3 1/1] RDMA/mana: Provide a modern CQ creation interface
2026-03-18 17:54 [PATCH rdma-next v3 1/1] RDMA/mana: Provide a modern CQ creation interface Konstantin Taranov
@ 2026-03-22 14:01 ` Leon Romanovsky
2026-03-25 10:29 ` [EXTERNAL] " Konstantin Taranov
0 siblings, 1 reply; 3+ messages in thread
From: Leon Romanovsky @ 2026-03-22 14:01 UTC (permalink / raw)
To: Konstantin Taranov
Cc: kotaranov, shirazsaleem, longli, jgg, linux-rdma, linux-kernel
On Wed, Mar 18, 2026 at 10:54:55AM -0700, Konstantin Taranov wrote:
> From: Konstantin Taranov <kotaranov@microsoft.com>
>
> The uverbs CQ creation UAPI allows users to supply their own umem for a CQ.
> Create cq->umem if it was not created and use it to create a mana queue.
> The created umem is owned by IB/core and will be deallocated by IB/core.
>
> To support RDMA objects that own umem, introduce mana_ib_create_queue_with_umem()
> to use the umem provided by the caller and do not de-allocate umem if it was allocted
> by the caller.
>
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> ---
> v3: Make umem allocation explicit for cq->umem and use a new helper to create mana queue from it.
> Remove the universal helper that was added in v2
> v2: Rework of Leon's commit. Introduce univesal helper that returned ownership of umem to caller.
> Added removed u32 overlow check for kernel cq.
> drivers/infiniband/hw/mana/cq.c | 131 ++++++++++++++++++---------
> drivers/infiniband/hw/mana/device.c | 1 +
> drivers/infiniband/hw/mana/main.c | 27 +++---
> drivers/infiniband/hw/mana/mana_ib.h | 5 +-
> 4 files changed, 106 insertions(+), 58 deletions(-)
<...>
> +int mana_ib_create_queue_from_umem(struct mana_ib_dev *mdev, struct ib_umem *umem,
> + struct mana_ib_queue *queue)
> +{
> + queue->umem = NULL;
Two things. First, I'm waiting for Jason to converge on this
ib_copy_*() work. Second, I still believe drivers should not cache
umem.
Thanks
^ permalink raw reply [flat|nested] 3+ messages in thread* RE: [EXTERNAL] Re: [PATCH rdma-next v3 1/1] RDMA/mana: Provide a modern CQ creation interface
2026-03-22 14:01 ` Leon Romanovsky
@ 2026-03-25 10:29 ` Konstantin Taranov
0 siblings, 0 replies; 3+ messages in thread
From: Konstantin Taranov @ 2026-03-25 10:29 UTC (permalink / raw)
To: Leon Romanovsky, Konstantin Taranov
Cc: Shiraz Saleem, Long Li, jgg@ziepe.ca, linux-rdma@vger.kernel.org,
linux-kernel@vger.kernel.org
> -----Original Message-----
> From: Leon Romanovsky <leon@kernel.org>
> Sent: Sunday, 22 March 2026 15:01
> To: Konstantin Taranov <kotaranov@linux.microsoft.com>
> Cc: Konstantin Taranov <kotaranov@microsoft.com>; Shiraz Saleem
> <shirazsaleem@microsoft.com>; Long Li <longli@microsoft.com>;
> jgg@ziepe.ca; linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: [EXTERNAL] Re: [PATCH rdma-next v3 1/1] RDMA/mana: Provide a
> modern CQ creation interface
>
> On Wed, Mar 18, 2026 at 10:54:55AM -0700, Konstantin Taranov wrote:
> > From: Konstantin Taranov <kotaranov@microsoft.com>
> >
> > The uverbs CQ creation UAPI allows users to supply their own umem for a
> CQ.
> > Create cq->umem if it was not created and use it to create a mana queue.
> > The created umem is owned by IB/core and will be deallocated by IB/core.
> >
> > To support RDMA objects that own umem, introduce
> > mana_ib_create_queue_with_umem() to use the umem provided by the
> > caller and do not de-allocate umem if it was allocted by the caller.
> >
> > Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> > ---
> > v3: Make umem allocation explicit for cq->umem and use a new helper to
> create mana queue from it.
> > Remove the universal helper that was added in v2
> > v2: Rework of Leon's commit. Introduce univesal helper that returned
> ownership of umem to caller.
> > Added removed u32 overlow check for kernel cq.
> > drivers/infiniband/hw/mana/cq.c | 131 ++++++++++++++++++---------
> > drivers/infiniband/hw/mana/device.c | 1 +
> > drivers/infiniband/hw/mana/main.c | 27 +++---
> > drivers/infiniband/hw/mana/mana_ib.h | 5 +-
> > 4 files changed, 106 insertions(+), 58 deletions(-)
>
> <...>
>
> > +int mana_ib_create_queue_from_umem(struct mana_ib_dev *mdev,
> struct ib_umem *umem,
> > + struct mana_ib_queue *queue)
> > +{
> > + queue->umem = NULL;
>
> Two things. First, I'm waiting for Jason to converge on this
> ib_copy_*() work. Second, I still believe drivers should not cache umem.
Thanks for reviewing!
The helper mana_ib_create_queue_from_umem() does not cache umem.
In general, for other object, mana_ib needs to cache umem (e.g., for QPs).
In our HW design QPs have several queues and we need to pin them. So that
queue->umem just indicates that some queues still have their own umem.
I am happy to employ mana_ib_create_queue_from_umem() in other places
where it is will be possible and remove queue->umem completely, once it is
possible.
Thanks
>
> Thanks
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2026-03-25 10:29 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-18 17:54 [PATCH rdma-next v3 1/1] RDMA/mana: Provide a modern CQ creation interface Konstantin Taranov
2026-03-22 14:01 ` Leon Romanovsky
2026-03-25 10:29 ` [EXTERNAL] " Konstantin Taranov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox