From: Xiao Yang <yangx.jy@fujitsu.com>
To: <jgg@nvidia.com>, <ira.weiny@intel.com>, <linyunsheng@huawei.com>
Cc: <lizhijian@fujitsu.com>, <linux-rdma@vger.kernel.org>,
Xiao Yang <yangx.jy@fujitsu.com>
Subject: [PATCH v2 2/2] RDMA/rxe: Replace page_address() with kmap_local_page()
Date: Fri, 11 Nov 2022 07:51:25 +0000 [thread overview]
Message-ID: <1668153085-15-2-git-send-email-yangx.jy@fujitsu.com> (raw)
In-Reply-To: <1668153085-15-1-git-send-email-yangx.jy@fujitsu.com>
page_address() will be broken when new in-kernel memory protection
scheme[1] is applied in the future so use kmap_local_page() instead.
[1]:
https://lore.kernel.org/lkml/20220419170649.1022246-1-ira.weiny@intel.com/
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
---
drivers/infiniband/sw/rxe/rxe_loc.h | 2 ++
drivers/infiniband/sw/rxe/rxe_mr.c | 44 +++++++++++++++++-----------
drivers/infiniband/sw/rxe/rxe_resp.c | 1 +
3 files changed, 30 insertions(+), 17 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index c2a5c8814a48..a63d29156a66 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -68,6 +68,8 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
+void *rxe_map_to_vaddr(struct rxe_mr *mr, int map_index, int addr_index, size_t offset);
+void rxe_unmap_vaddr(struct rxe_mr *mr, void *vaddr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 4438eb8a3727..2e6408188083 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -118,9 +118,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
struct ib_umem *umem;
struct sg_page_iter sg_iter;
int num_buf;
- void *vaddr;
int err;
- int i;
umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
@@ -154,15 +152,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
num_buf = 0;
}
- vaddr = page_address(sg_page_iter_page(&sg_iter));
- if (!vaddr) {
- pr_warn("%s: Unable to get virtual address\n",
- __func__);
- err = -ENOMEM;
- goto err_cleanup_map;
- }
-
- map[0]->addrs[num_buf] = (uintptr_t)vaddr;
+ map[0]->addrs[num_buf] = (uintptr_t)sg_page_iter_page(&sg_iter);
num_buf++;
}
@@ -176,10 +166,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
return 0;
-err_cleanup_map:
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
- kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -240,6 +226,28 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
}
}
+void *rxe_map_to_vaddr(struct rxe_mr *mr, int map_index, int addr_index, size_t offset)
+{
+ void *vaddr = NULL;
+
+ if (mr->ibmr.type == IB_MR_TYPE_USER) {
+ vaddr = kmap_local_page((struct page *)mr->map[map_index]->addrs[addr_index]);
+ if (vaddr == NULL) {
+ pr_warn("Failed to map page");
+ return NULL;
+ }
+ } else
+ vaddr = (void *)(uintptr_t)mr->map[map_index]->addrs[addr_index];
+
+ return vaddr + offset;
+}
+
+void rxe_unmap_vaddr(struct rxe_mr *mr, void *vaddr)
+{
+ if (mr->ibmr.type == IB_MR_TYPE_USER)
+ kunmap_local(vaddr);
+}
+
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
size_t offset;
@@ -271,7 +279,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- addr = (void *)(uintptr_t)mr->map[m]->addrs[n] + offset;
+ addr = rxe_map_to_vaddr(mr, m, n, offset);
out:
return addr;
@@ -318,7 +326,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
while (length > 0) {
u8 *src, *dest;
- va = (u8 *)(uintptr_t)mr->map[m]->addrs[i] + offset;
+ va = (u8 *)rxe_map_to_vaddr(mr, m, i, offset);
src = (dir == RXE_TO_MR_OBJ) ? addr : va;
dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
@@ -339,6 +347,8 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
i = 0;
m++;
}
+
+ rxe_unmap_vaddr(mr, va);
}
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c32bc12cc82f..31f9ba11a921 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -652,6 +652,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
ret = RESPST_ACKNOWLEDGE;
out:
+ rxe_unmap_vaddr(mr, vaddr);
return ret;
}
--
2.25.1
next prev parent reply other threads:[~2022-11-11 7:51 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-11 7:51 [PATCH v2 1/2] RDMA/rxe: Remove struct rxe_phys_buf Xiao Yang
2022-11-11 7:51 ` Xiao Yang [this message]
2022-11-18 5:22 ` Yang, Xiao/杨 晓
2022-11-19 1:20 ` Jason Gunthorpe
2022-11-20 13:36 ` Yang, Xiao/杨 晓
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1668153085-15-2-git-send-email-yangx.jy@fujitsu.com \
--to=yangx.jy@fujitsu.com \
--cc=ira.weiny@intel.com \
--cc=jgg@nvidia.com \
--cc=linux-rdma@vger.kernel.org \
--cc=linyunsheng@huawei.com \
--cc=lizhijian@fujitsu.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox