From: Jiri Pirko <jiri@resnulli.us>
To: linux-rdma@vger.kernel.org
Cc: jgg@ziepe.ca, leon@kernel.org, mrgolin@amazon.com,
gal.pressman@linux.dev, sleybo@amazon.com, parav@nvidia.com,
mbloch@nvidia.com, yanjun.zhu@linux.dev,
marco.crivellari@suse.com, roman.gushchin@linux.dev,
phaddad@nvidia.com, lirongqing@baidu.com, ynachum@amazon.com,
huangjunxian6@hisilicon.com, kalesh-anakkur.purayil@broadcom.com,
ohartoov@nvidia.com, michaelgur@nvidia.com, shayd@nvidia.com,
edwards@nvidia.com, sriharsha.basavapatna@broadcom.com,
andrew.gospodarek@broadcom.com, selvin.xavier@broadcom.com
Subject: [PATCH rdma-next v2 02/15] RDMA/uverbs: Push out CQ buffer umem processing into a helper
Date: Sat, 11 Apr 2026 16:49:02 +0200 [thread overview]
Message-ID: <20260411144915.114571-3-jiri@resnulli.us> (raw)
In-Reply-To: <20260411144915.114571-1-jiri@resnulli.us>
From: Jiri Pirko <jiri@nvidia.com>
Extract the UVERBS_ATTR_CREATE_CQ_BUFFER_* attribute processing from
the CQ create handler into uverbs_create_cq_get_umem() and separate
buffer acquisition logic from the rest of CQ creation.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/infiniband/core/uverbs_std_types_cq.c | 127 ++++++++++--------
1 file changed, 69 insertions(+), 58 deletions(-)
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index d2c8f71f934c..4afe27fef6c9 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -58,6 +58,72 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
return 0;
}
+static struct ib_umem *uverbs_create_cq_get_umem(struct ib_device *ib_dev,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_umem_dmabuf *umem_dmabuf;
+ u64 buffer_length;
+ u64 buffer_offset;
+ u64 buffer_va;
+ int buffer_fd;
+ int ret;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA)) {
+ ret = uverbs_copy_from(&buffer_va, attrs,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_VA);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = uverbs_copy_from(&buffer_length, attrs,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD) ||
+ uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
+ !ib_dev->ops.create_user_cq)
+ return ERR_PTR(-EINVAL);
+
+ return ib_umem_get(ib_dev, buffer_va, buffer_length,
+ IB_ACCESS_LOCAL_WRITE);
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD)) {
+ ret = uverbs_get_raw_fd(&buffer_fd, attrs,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_FD);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = uverbs_copy_from(&buffer_offset, attrs,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = uverbs_copy_from(&buffer_length, attrs,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA) ||
+ !ib_dev->ops.create_user_cq)
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ib_dev, buffer_offset,
+ buffer_length, buffer_fd,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem_dmabuf))
+ return ERR_CAST(umem_dmabuf);
+ return &umem_dmabuf->umem;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
+ uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH) ||
+ !ib_dev->ops.create_cq)
+ return ERR_PTR(-EINVAL);
+
+ return NULL;
+}
+
static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
struct uverbs_attr_bundle *attrs)
{
@@ -66,16 +132,11 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
typeof(*obj), uevent.uobject);
struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_device *ib_dev = attrs->context->device;
- struct ib_umem_dmabuf *umem_dmabuf;
struct ib_cq_init_attr attr = {};
struct ib_uobject *ev_file_uobj;
struct ib_umem *umem = NULL;
- u64 buffer_length;
- u64 buffer_offset;
struct ib_cq *cq;
u64 user_handle;
- u64 buffer_va;
- int buffer_fd;
int ret;
if ((!ib_dev->ops.create_cq && !ib_dev->ops.create_user_cq) ||
@@ -122,59 +183,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->uevent.event_list);
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA)) {
-
- ret = uverbs_copy_from(&buffer_va, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA);
- if (ret)
- goto err_event_file;
-
- ret = uverbs_copy_from(&buffer_length, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
- if (ret)
- goto err_event_file;
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD) ||
- uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
- !ib_dev->ops.create_user_cq) {
- ret = -EINVAL;
- goto err_event_file;
- }
-
- umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- ret = PTR_ERR(umem);
- goto err_event_file;
- }
- } else if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD)) {
-
- ret = uverbs_get_raw_fd(&buffer_fd, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD);
- if (ret)
- goto err_event_file;
-
- ret = uverbs_copy_from(&buffer_offset, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET);
- if (ret)
- goto err_event_file;
-
- ret = uverbs_copy_from(&buffer_length, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
- if (ret)
- goto err_event_file;
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA) ||
- !ib_dev->ops.create_user_cq) {
- ret = -EINVAL;
- goto err_event_file;
- }
-
- umem_dmabuf = ib_umem_dmabuf_get_pinned(ib_dev, buffer_offset, buffer_length,
- buffer_fd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem_dmabuf)) {
- ret = PTR_ERR(umem_dmabuf);
- goto err_event_file;
- }
- umem = &umem_dmabuf->umem;
- } else if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
- uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH) ||
- !ib_dev->ops.create_cq) {
- ret = -EINVAL;
+ umem = uverbs_create_cq_get_umem(ib_dev, attrs);
+ if (IS_ERR(umem)) {
+ ret = PTR_ERR(umem);
goto err_event_file;
}
--
2.53.0
next prev parent reply other threads:[~2026-04-11 14:49 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-11 14:49 [PATCH rdma-next v2 00/15] RDMA: Introduce generic buffer descriptor infrastructure for umem Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 01/15] RDMA/core: " Jiri Pirko
2026-04-12 12:33 ` Michael Margolin
2026-04-11 14:49 ` Jiri Pirko [this message]
2026-04-11 14:49 ` [PATCH rdma-next v2 03/15] RDMA/uverbs: Integrate umem_list into CQ creation Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 04/15] RDMA/efa: Use umem_list for user CQ buffer Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 05/15] RDMA/mlx5: " Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 06/15] RDMA/bnxt_re: " Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 07/15] RDMA/mlx4: " Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 08/15] RDMA/uverbs: Remove legacy umem field from struct ib_cq Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 09/15] RDMA/uverbs: Verify all umem_list buffers are consumed after CQ creation Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 10/15] RDMA/uverbs: Integrate umem_list into QP creation Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 11/15] RDMA/mlx5: Use umem_list for QP buffers in create_qp Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 12/15] RDMA/uverbs: Add doorbell record buffer slot to CQ umem_list Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 13/15] RDMA/mlx5: Use umem_list for CQ doorbell record Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 14/15] RDMA/uverbs: Add doorbell record buffer slot to QP umem_list Jiri Pirko
2026-04-11 14:49 ` [PATCH rdma-next v2 15/15] RDMA/mlx5: Use umem_list for QP doorbell record Jiri Pirko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260411144915.114571-3-jiri@resnulli.us \
--to=jiri@resnulli.us \
--cc=andrew.gospodarek@broadcom.com \
--cc=edwards@nvidia.com \
--cc=gal.pressman@linux.dev \
--cc=huangjunxian6@hisilicon.com \
--cc=jgg@ziepe.ca \
--cc=kalesh-anakkur.purayil@broadcom.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=lirongqing@baidu.com \
--cc=marco.crivellari@suse.com \
--cc=mbloch@nvidia.com \
--cc=michaelgur@nvidia.com \
--cc=mrgolin@amazon.com \
--cc=ohartoov@nvidia.com \
--cc=parav@nvidia.com \
--cc=phaddad@nvidia.com \
--cc=roman.gushchin@linux.dev \
--cc=selvin.xavier@broadcom.com \
--cc=shayd@nvidia.com \
--cc=sleybo@amazon.com \
--cc=sriharsha.basavapatna@broadcom.com \
--cc=yanjun.zhu@linux.dev \
--cc=ynachum@amazon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox