* [PATCH] RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
@ 2020-09-30 0:24 Jason Gunthorpe
2020-10-06 8:13 ` Selvin Xavier
2020-10-06 19:46 ` Jason Gunthorpe
0 siblings, 2 replies; 3+ messages in thread
From: Jason Gunthorpe @ 2020-09-30 0:24 UTC (permalink / raw)
To: Devesh Sharma, Doug Ledford, linux-rdma, Naresh Kumar PBS,
Selvin Xavier, Somnath Kotur, Sriharsha Basavapatna
This driver is taking the SGL out of the umem and passing it through a
struct bnxt_qplib_sg_info. Instead of passing the SGL pass the umem and
then use rdma_umem_for_each_dma_block() directly.
Move the calls of ib_umem_num_dma_blocks() closer to their actual point of
use, npages is only set for non-umem pbl flows.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/infiniband/hw/bnxt_re/ib_verbs.c | 18 +++-----------
drivers/infiniband/hw/bnxt_re/qplib_res.c | 30 +++++++++++++----------
drivers/infiniband/hw/bnxt_re/qplib_res.h | 3 +--
3 files changed, 22 insertions(+), 29 deletions(-)
This is part of the umem cleanup. It is a bit complicated, would be good for
someone to check it. Thanks
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a0e8d93595d8e8..e2707b27c9500c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -940,9 +940,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem);
qp->sumem = umem;
- qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
- qplib_qp->sq.sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
- qplib_qp->sq.sg_info.nmap = umem->nmap;
+ qplib_qp->sq.sg_info.umem = umem;
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
qplib_qp->qp_handle = ureq.qp_handle;
@@ -955,10 +953,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
- qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
- qplib_qp->rq.sg_info.npages =
- ib_umem_num_dma_blocks(umem, PAGE_SIZE);
- qplib_qp->rq.sg_info.nmap = umem->nmap;
+ qplib_qp->rq.sg_info.umem = umem;
qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
}
@@ -1612,9 +1607,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem);
srq->umem = umem;
- qplib_srq->sg_info.sghead = umem->sg_head.sgl;
- qplib_srq->sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
- qplib_srq->sg_info.nmap = umem->nmap;
+ qplib_srq->sg_info.umem = umem;
qplib_srq->sg_info.pgsize = PAGE_SIZE;
qplib_srq->sg_info.pgshft = PAGE_SHIFT;
qplib_srq->srq_handle = ureq.srq_handle;
@@ -2865,10 +2858,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = PTR_ERR(cq->umem);
goto fail;
}
- cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
- cq->qplib_cq.sg_info.npages =
- ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
- cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
+ cq->qplib_cq.sg_info.umem = cq->umem;
cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 7efa6e5dce6282..fa7878336100ac 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -45,6 +45,9 @@
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
struct bnxt_qplib_sg_info *sginfo)
{
- struct scatterlist *sghead = sginfo->sghead;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
int i = 0;
- for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
- pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+ rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
+ pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
pbl->pg_arr[i] = NULL;
pbl->pg_count++;
i++;
@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info *sginfo)
{
struct pci_dev *pdev = res->pdev;
- struct scatterlist *sghead;
bool is_umem = false;
u32 pages;
int i;
if (sginfo->nopte)
return 0;
- pages = sginfo->npages;
- sghead = sginfo->sghead;
+ if (sginfo->umem)
+ pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
+ else
+ pages = sginfo->npages;
/* page ptr arrays */
pbl->pg_arr = vmalloc(pages * sizeof(void *));
if (!pbl->pg_arr)
@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize;
- if (!sghead) {
+ if (!sginfo->umem) {
for (i = 0; i < pages; i++) {
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
pbl->pg_size,
@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_sg_info sginfo = {};
u32 depth, stride, npbl, npde;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
- struct scatterlist *sghead = NULL;
struct bnxt_qplib_res *res;
struct pci_dev *pdev;
int i, rc, lvl;
res = hwq_attr->res;
pdev = res->pdev;
- sghead = hwq_attr->sginfo->sghead;
pg_size = hwq_attr->sginfo->pgsize;
hwq->level = PBL_LVL_MAX;
@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
aux_pages++;
}
- if (!sghead) {
+ if (!hwq_attr->sginfo->umem) {
hwq->is_user = false;
npages = (depth * stride) / pg_size + aux_pages;
if ((depth * stride) % pg_size)
@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
return -EINVAL;
hwq_attr->sginfo->npages = npages;
} else {
+ unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
+ hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+
hwq->is_user = true;
- npages = hwq_attr->sginfo->npages;
+ npages = sginfo_num_pages;
npages = (npages * PAGE_SIZE) /
BIT_ULL(hwq_attr->sginfo->pgshft);
- if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+ if ((sginfo_num_pages * PAGE_SIZE) %
BIT_ULL(hwq_attr->sginfo->pgshft))
if (!npages)
npages++;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 9da470d1e4a3c2..ceb94db20a786a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -126,8 +126,7 @@ struct bnxt_qplib_pbl {
};
struct bnxt_qplib_sg_info {
- struct scatterlist *sghead;
- u32 nmap;
+ struct ib_umem *umem;
u32 npages;
u32 pgshft;
u32 pgsize;
--
2.28.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
2020-09-30 0:24 [PATCH] RDMA/bnxt_re: Use rdma_umem_for_each_dma_block() Jason Gunthorpe
@ 2020-10-06 8:13 ` Selvin Xavier
2020-10-06 19:46 ` Jason Gunthorpe
1 sibling, 0 replies; 3+ messages in thread
From: Selvin Xavier @ 2020-10-06 8:13 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: Devesh Sharma, Doug Ledford, linux-rdma, Naresh Kumar PBS,
Somnath Kotur, Sriharsha Basavapatna
[-- Attachment #1: Type: text/plain, Size: 8396 bytes --]
On Wed, Sep 30, 2020 at 5:54 AM Jason Gunthorpe <jgg@nvidia.com> wrote:
>
> This driver is taking the SGL out of the umem and passing it through a
> struct bnxt_qplib_sg_info. Instead of passing the SGL pass the umem and
> then use rdma_umem_for_each_dma_block() directly.
>
> Move the calls of ib_umem_num_dma_blocks() closer to their actual point of
> use, npages is only set for non-umem pbl flows.
>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Tested-by: Selvin Xavier <selvin.xavier@broadcom.com>
Thanks
> ---
> drivers/infiniband/hw/bnxt_re/ib_verbs.c | 18 +++-----------
> drivers/infiniband/hw/bnxt_re/qplib_res.c | 30 +++++++++++++----------
> drivers/infiniband/hw/bnxt_re/qplib_res.h | 3 +--
> 3 files changed, 22 insertions(+), 29 deletions(-)
>
> This is part of the umem cleanup. It is a bit complicated, would be good for
> someone to check it. Thanks
>
> diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> index a0e8d93595d8e8..e2707b27c9500c 100644
> --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> @@ -940,9 +940,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
> return PTR_ERR(umem);
>
> qp->sumem = umem;
> - qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
> - qplib_qp->sq.sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
> - qplib_qp->sq.sg_info.nmap = umem->nmap;
> + qplib_qp->sq.sg_info.umem = umem;
> qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
> qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
> qplib_qp->qp_handle = ureq.qp_handle;
> @@ -955,10 +953,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
> if (IS_ERR(umem))
> goto rqfail;
> qp->rumem = umem;
> - qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
> - qplib_qp->rq.sg_info.npages =
> - ib_umem_num_dma_blocks(umem, PAGE_SIZE);
> - qplib_qp->rq.sg_info.nmap = umem->nmap;
> + qplib_qp->rq.sg_info.umem = umem;
> qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
> qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
> }
> @@ -1612,9 +1607,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
> return PTR_ERR(umem);
>
> srq->umem = umem;
> - qplib_srq->sg_info.sghead = umem->sg_head.sgl;
> - qplib_srq->sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
> - qplib_srq->sg_info.nmap = umem->nmap;
> + qplib_srq->sg_info.umem = umem;
> qplib_srq->sg_info.pgsize = PAGE_SIZE;
> qplib_srq->sg_info.pgshft = PAGE_SHIFT;
> qplib_srq->srq_handle = ureq.srq_handle;
> @@ -2865,10 +2858,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
> rc = PTR_ERR(cq->umem);
> goto fail;
> }
> - cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
> - cq->qplib_cq.sg_info.npages =
> - ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
> - cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
> + cq->qplib_cq.sg_info.umem = cq->umem;
> cq->qplib_cq.dpi = &uctx->dpi;
> } else {
> cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
> diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
> index 7efa6e5dce6282..fa7878336100ac 100644
> --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
> +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
> @@ -45,6 +45,9 @@
> #include <linux/dma-mapping.h>
> #include <linux/if_vlan.h>
> #include <linux/vmalloc.h>
> +#include <rdma/ib_verbs.h>
> +#include <rdma/ib_umem.h>
> +
> #include "roce_hsi.h"
> #include "qplib_res.h"
> #include "qplib_sp.h"
> @@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
> static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
> struct bnxt_qplib_sg_info *sginfo)
> {
> - struct scatterlist *sghead = sginfo->sghead;
> - struct sg_dma_page_iter sg_iter;
> + struct ib_block_iter biter;
> int i = 0;
>
> - for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
> - pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
> + rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
> + pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
> pbl->pg_arr[i] = NULL;
> pbl->pg_count++;
> i++;
> @@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
> struct bnxt_qplib_sg_info *sginfo)
> {
> struct pci_dev *pdev = res->pdev;
> - struct scatterlist *sghead;
> bool is_umem = false;
> u32 pages;
> int i;
>
> if (sginfo->nopte)
> return 0;
> - pages = sginfo->npages;
> - sghead = sginfo->sghead;
> + if (sginfo->umem)
> + pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
> + else
> + pages = sginfo->npages;
> /* page ptr arrays */
> pbl->pg_arr = vmalloc(pages * sizeof(void *));
> if (!pbl->pg_arr)
> @@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
> pbl->pg_count = 0;
> pbl->pg_size = sginfo->pgsize;
>
> - if (!sghead) {
> + if (!sginfo->umem) {
> for (i = 0; i < pages; i++) {
> pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
> pbl->pg_size,
> @@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
> struct bnxt_qplib_sg_info sginfo = {};
> u32 depth, stride, npbl, npde;
> dma_addr_t *src_phys_ptr, **dst_virt_ptr;
> - struct scatterlist *sghead = NULL;
> struct bnxt_qplib_res *res;
> struct pci_dev *pdev;
> int i, rc, lvl;
>
> res = hwq_attr->res;
> pdev = res->pdev;
> - sghead = hwq_attr->sginfo->sghead;
> pg_size = hwq_attr->sginfo->pgsize;
> hwq->level = PBL_LVL_MAX;
>
> @@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
> aux_pages++;
> }
>
> - if (!sghead) {
> + if (!hwq_attr->sginfo->umem) {
> hwq->is_user = false;
> npages = (depth * stride) / pg_size + aux_pages;
> if ((depth * stride) % pg_size)
> @@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
> return -EINVAL;
> hwq_attr->sginfo->npages = npages;
> } else {
> + unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
> + hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
> +
> hwq->is_user = true;
> - npages = hwq_attr->sginfo->npages;
> + npages = sginfo_num_pages;
> npages = (npages * PAGE_SIZE) /
> BIT_ULL(hwq_attr->sginfo->pgshft);
> - if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
> + if ((sginfo_num_pages * PAGE_SIZE) %
> BIT_ULL(hwq_attr->sginfo->pgshft))
> if (!npages)
> npages++;
> diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
> index 9da470d1e4a3c2..ceb94db20a786a 100644
> --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
> +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
> @@ -126,8 +126,7 @@ struct bnxt_qplib_pbl {
> };
>
> struct bnxt_qplib_sg_info {
> - struct scatterlist *sghead;
> - u32 nmap;
> + struct ib_umem *umem;
> u32 npages;
> u32 pgshft;
> u32 pgsize;
> --
> 2.28.0
>
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4181 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
2020-09-30 0:24 [PATCH] RDMA/bnxt_re: Use rdma_umem_for_each_dma_block() Jason Gunthorpe
2020-10-06 8:13 ` Selvin Xavier
@ 2020-10-06 19:46 ` Jason Gunthorpe
1 sibling, 0 replies; 3+ messages in thread
From: Jason Gunthorpe @ 2020-10-06 19:46 UTC (permalink / raw)
To: Devesh Sharma, Doug Ledford, linux-rdma, Naresh Kumar PBS,
Selvin Xavier, Somnath Kotur, Sriharsha Basavapatna
On Tue, Sep 29, 2020 at 09:24:35PM -0300, Jason Gunthorpe wrote:
> This driver is taking the SGL out of the umem and passing it through a
> struct bnxt_qplib_sg_info. Instead of passing the SGL pass the umem and
> then use rdma_umem_for_each_dma_block() directly.
>
> Move the calls of ib_umem_num_dma_blocks() closer to their actual point of
> use, npages is only set for non-umem pbl flows.
>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
> Tested-by: Selvin Xavier <selvin.xavier@broadcom.com>
> ---
> drivers/infiniband/hw/bnxt_re/ib_verbs.c | 18 +++-----------
> drivers/infiniband/hw/bnxt_re/qplib_res.c | 30 +++++++++++++----------
> drivers/infiniband/hw/bnxt_re/qplib_res.h | 3 +--
> 3 files changed, 22 insertions(+), 29 deletions(-)
Applied to for-next, thanks for checking
Jason
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-10-06 19:46 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-09-30 0:24 [PATCH] RDMA/bnxt_re: Use rdma_umem_for_each_dma_block() Jason Gunthorpe
2020-10-06 8:13 ` Selvin Xavier
2020-10-06 19:46 ` Jason Gunthorpe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).