From: Boshi Yu <boshiyu@linux.alibaba.com>
To: jgg@ziepe.ca, leon@kernel.org
Cc: linux-rdma@vger.kernel.org, chengyou@linux.alibaba.com,
kaishen@linux.alibaba.com
Subject: [PATCH for-next 1/3] RDMA/erdma: Rename get/put_mtt_entries to erdma_mem_init/uninit
Date: Thu, 7 May 2026 13:34:19 +0800 [thread overview]
Message-ID: <20260507053437.46211-2-boshiyu@linux.alibaba.com> (raw)
In-Reply-To: <20260507053437.46211-1-boshiyu@linux.alibaba.com>
The get_mtt_entries() interface actually initializes the struct
erdma_mem. Rename the get_mtt_entries/put_mtt_entries to
erdma_mem_init/erdma_mem_uninit, respectively.
Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
---
drivers/infiniband/hw/erdma/erdma_verbs.c | 60 +++++++++++------------
1 file changed, 30 insertions(+), 30 deletions(-)
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 5523b4e151e1..a418a3c92754 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -823,9 +823,9 @@ static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
}
}
-static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
- u64 start, u64 len, int access, u64 virt,
- unsigned long req_page_size, bool force_continuous)
+static int erdma_mem_init(struct erdma_dev *dev, struct erdma_mem *mem,
+ u64 start, u64 len, int access, u64 virt,
+ unsigned long req_page_size, bool force_continuous)
{
int ret = 0;
@@ -862,7 +862,7 @@ static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
return ret;
}
-static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
+static void erdma_mem_uninit(struct erdma_dev *dev, struct erdma_mem *mem)
{
if (mem->mtt)
erdma_destroy_mtt(dev, mem->mtt);
@@ -946,45 +946,45 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
qp->attrs.rq_size * RQE_SIZE))
return -EINVAL;
- ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va,
- qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
- (SZ_1M - SZ_4K), true);
+ ret = erdma_mem_init(qp->dev, &qp->user_qp.sq_mem, va,
+ qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
+ (SZ_1M - SZ_4K), true);
if (ret)
return ret;
rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
qp->user_qp.rq_offset = rq_offset;
- ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
+ ret = erdma_mem_init(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
(SZ_1M - SZ_4K), true);
if (ret)
- goto put_sq_mtt;
+ goto uninit_sq_mem;
ret = erdma_map_user_dbrecords(uctx, dbrec_va,
&qp->user_qp.user_dbr_page,
&dbrec_dma);
if (ret)
- goto put_rq_mtt;
+ goto uninit_rq_mem;
qp->user_qp.sq_dbrec_dma = dbrec_dma;
qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
return 0;
-put_rq_mtt:
- put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
+uninit_rq_mem:
+ erdma_mem_uninit(qp->dev, &qp->user_qp.rq_mem);
-put_sq_mtt:
- put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
+uninit_sq_mem:
+ erdma_mem_uninit(qp->dev, &qp->user_qp.sq_mem);
return ret;
}
static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
{
- put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
- put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
+ erdma_mem_uninit(qp->dev, &qp->user_qp.sq_mem);
+ erdma_mem_uninit(qp->dev, &qp->user_qp.rq_mem);
erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
}
@@ -1248,14 +1248,14 @@ struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
if (!mr)
return ERR_PTR(-ENOMEM);
- ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
- SZ_2G - SZ_4K, false);
+ ret = erdma_mem_init(dev, &mr->mem, start, len, access, virt,
+ SZ_2G - SZ_4K, false);
if (ret)
goto err_out_free;
ret = erdma_create_stag(dev, &stag);
if (ret)
- goto err_out_put_mtt;
+ goto err_uninit_mem;
mr->ibmr.lkey = mr->ibmr.rkey = stag;
mr->ibmr.pd = ibpd;
@@ -1275,8 +1275,8 @@ struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
mr->ibmr.lkey >> 8);
-err_out_put_mtt:
- put_mtt_entries(dev, &mr->mem);
+err_uninit_mem:
+ erdma_mem_uninit(dev, &mr->mem);
err_out_free:
kfree(mr);
@@ -1306,7 +1306,7 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
- put_mtt_entries(dev, &mr->mem);
+ erdma_mem_uninit(dev, &mr->mem);
kfree(mr);
return 0;
@@ -1337,7 +1337,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->kern_cq.dbrec_dma);
} else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
- put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
+ erdma_mem_uninit(dev, &cq->user_cq.qbuf_mem);
}
xa_erase(&dev->cq_xa, cq->cqn);
@@ -1384,8 +1384,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (rdma_is_kernel_res(&qp->ibqp.res)) {
free_kernel_qp(qp);
} else {
- put_mtt_entries(dev, &qp->user_qp.sq_mem);
- put_mtt_entries(dev, &qp->user_qp.rq_mem);
+ erdma_mem_uninit(dev, &qp->user_qp.sq_mem);
+ erdma_mem_uninit(dev, &qp->user_qp.rq_mem);
erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
}
@@ -1907,9 +1907,9 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
int ret;
struct erdma_dev *dev = to_edev(cq->ibcq.device);
- ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
- ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
- true);
+ ret = erdma_mem_init(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
+ ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
+ true);
if (ret)
return ret;
@@ -1917,7 +1917,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
&cq->user_cq.user_dbr_page,
&cq->user_cq.dbrec_dma);
if (ret)
- put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
+ erdma_mem_uninit(dev, &cq->user_cq.qbuf_mem);
return ret;
}
@@ -2009,7 +2009,7 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_free_res:
if (!rdma_is_kernel_res(&ibcq->res)) {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
- put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
+ erdma_mem_uninit(dev, &cq->user_cq.qbuf_mem);
} else {
dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
--
2.46.0
next prev parent reply other threads:[~2026-05-07 5:34 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-07 5:34 [PATCH for-next 0/3] RDMA/erdma: Add DMA-BUF memory registration Boshi Yu
2026-05-07 5:34 ` Boshi Yu [this message]
2026-05-07 5:34 ` [PATCH for-next 2/3] RDMA/erdma: Introduce struct erdma_mem_init_attr Boshi Yu
2026-05-07 5:34 ` [PATCH for-next 3/3] RDMA/erdma: Implement erdma_reg_user_mr_dmabuf Boshi Yu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260507053437.46211-2-boshiyu@linux.alibaba.com \
--to=boshiyu@linux.alibaba.com \
--cc=chengyou@linux.alibaba.com \
--cc=jgg@ziepe.ca \
--cc=kaishen@linux.alibaba.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox