* [PATCH for-next v2 0/1] RDMA/bnxt_re: Add dmabuf support
@ 2023-07-31 8:01 Selvin Xavier
2023-07-31 8:01 ` [PATCH for-next v2 1/1] RDMA/bnxt_re: Add support for dmabuf pinned memory regions Selvin Xavier
0 siblings, 1 reply; 3+ messages in thread
From: Selvin Xavier @ 2023-07-31 8:01 UTC (permalink / raw)
To: jgg, leon; +Cc: linux-rdma, andrew.gospodarek, Selvin Xavier
[-- Attachment #1: Type: text/plain, Size: 644 bytes --]
Adding support for pinned dmabuf memory regions.
The corresponding user library patch is available in the
pull request. https://github.com/linux-rdma/rdma-core/pull/1373
Please review and apply.
Thanks,
Selvin
v1 - v2:
Rework on the patch based on Jason's comment by passing umem
to the helper routine.
Saravanan Vajravel (1):
RDMA/bnxt_re: Add support for dmabuf pinned memory regions
drivers/infiniband/hw/bnxt_re/ib_verbs.c | 83 ++++++++++++++++++++++----------
drivers/infiniband/hw/bnxt_re/ib_verbs.h | 4 ++
drivers/infiniband/hw/bnxt_re/main.c | 1 +
3 files changed, 62 insertions(+), 26 deletions(-)
--
2.5.5
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4224 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH for-next v2 1/1] RDMA/bnxt_re: Add support for dmabuf pinned memory regions
2023-07-31 8:01 [PATCH for-next v2 0/1] RDMA/bnxt_re: Add dmabuf support Selvin Xavier
@ 2023-07-31 8:01 ` Selvin Xavier
2023-08-15 18:58 ` Jason Gunthorpe
0 siblings, 1 reply; 3+ messages in thread
From: Selvin Xavier @ 2023-07-31 8:01 UTC (permalink / raw)
To: jgg, leon
Cc: linux-rdma, andrew.gospodarek, Saravanan Vajravel, Selvin Xavier
[-- Attachment #1: Type: text/plain, Size: 6415 bytes --]
From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Support the new verb which indicates dmabuf support.
bnxt doesn't support ODP. So use the pinned version of the
dmabuf APIs to enable bnxt_re devices to work as dmabuf importer.
Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
drivers/infiniband/hw/bnxt_re/ib_verbs.c | 83 ++++++++++++++++++++++----------
drivers/infiniband/hw/bnxt_re/ib_verbs.h | 4 ++
drivers/infiniband/hw/bnxt_re/main.c | 1 +
3 files changed, 62 insertions(+), 26 deletions(-)
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2b2505a..718572d 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3981,16 +3981,13 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
-/* uverbs */
-struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
+static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_umem *umem)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_re_mr *mr;
- struct ib_umem *umem;
unsigned long page_size;
+ struct bnxt_re_mr *mr;
int umem_pgs, rc;
u32 active_mrs;
@@ -4000,6 +3997,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
}
+ page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
+ if (!page_size) {
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
+ return ERR_PTR(-EINVAL);
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -4011,36 +4014,23 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to allocate MR");
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
- umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
- if (IS_ERR(umem)) {
- ibdev_err(&rdev->ibdev, "Failed to get umem");
- rc = -EFAULT;
- goto free_mrw;
- }
mr->ib_umem = umem;
-
mr->qplib_mr.va = virt_addr;
- page_size = ib_umem_find_best_pgsz(
- umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
- if (!page_size) {
- ibdev_err(&rdev->ibdev, "umem page size unsupported!");
- rc = -EFAULT;
- goto free_umem;
- }
mr->qplib_mr.total_size = length;
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to register user MR");
- goto free_umem;
+ ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
+ rc = -EIO;
+ goto free_mrw;
}
mr->ib_mr.lkey = mr->qplib_mr.lkey;
@@ -4050,8 +4040,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
-free_umem:
- ib_umem_release(umem);
+
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
free_mr:
@@ -4059,6 +4048,48 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return ERR_PTR(rc);
}
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr, int fd,
+ int mr_access_flags, struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
+ fd, mr_access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return ERR_CAST(umem_dmabuf);
+
+ umem = &umem_dmabuf->umem;
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
{
struct ib_device *ibdev = ctx->device;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index f392a09..84715b7 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -229,6 +229,10 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int mr_access_flags,
+ struct ib_udata *udata);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 87960ac..c467415 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -862,6 +862,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_qp = bnxt_re_query_qp,
.query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr,
+ .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
--
2.5.5
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4224 bytes --]
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH for-next v2 1/1] RDMA/bnxt_re: Add support for dmabuf pinned memory regions
2023-07-31 8:01 ` [PATCH for-next v2 1/1] RDMA/bnxt_re: Add support for dmabuf pinned memory regions Selvin Xavier
@ 2023-08-15 18:58 ` Jason Gunthorpe
0 siblings, 0 replies; 3+ messages in thread
From: Jason Gunthorpe @ 2023-08-15 18:58 UTC (permalink / raw)
To: Selvin Xavier; +Cc: leon, linux-rdma, andrew.gospodarek, Saravanan Vajravel
On Mon, Jul 31, 2023 at 01:01:13AM -0700, Selvin Xavier wrote:
> From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
>
> Support the new verb which indicates dmabuf support.
> bnxt doesn't support ODP. So use the pinned version of the
> dmabuf APIs to enable bnxt_re devices to work as dmabuf importer.
>
> Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> ---
> drivers/infiniband/hw/bnxt_re/ib_verbs.c | 83 ++++++++++++++++++++++----------
> drivers/infiniband/hw/bnxt_re/ib_verbs.h | 4 ++
> drivers/infiniband/hw/bnxt_re/main.c | 1 +
> 3 files changed, 62 insertions(+), 26 deletions(-)
Applied to for-next, thanks
Jason
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-08-15 19:00 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-07-31 8:01 [PATCH for-next v2 0/1] RDMA/bnxt_re: Add dmabuf support Selvin Xavier
2023-07-31 8:01 ` [PATCH for-next v2 1/1] RDMA/bnxt_re: Add support for dmabuf pinned memory regions Selvin Xavier
2023-08-15 18:58 ` Jason Gunthorpe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox