From: Jason Gunthorpe <jgg@nvidia.com>
To: Xiao Yang <yangx.jy@fujitsu.com>
Cc: ira.weiny@intel.com, linyunsheng@huawei.com,
lizhijian@fujitsu.com, linux-rdma@vger.kernel.org
Subject: Re: [PATCH v2 1/2] RDMA/rxe: Remove struct rxe_phys_buf
Date: Fri, 18 Nov 2022 21:20:38 -0400 [thread overview]
Message-ID: <Y3gvZr6/NCii9Avy@nvidia.com> (raw)
In-Reply-To: <1668153085-15-1-git-send-email-yangx.jy@fujitsu.com>
On Fri, Nov 11, 2022 at 07:51:24AM +0000, Xiao Yang wrote:
> 1) Remove rxe_phys_buf[n].size by using ibmr.page_size.
> 2) Replace rxe_phys_buf[n].buf with addrs[n].
This almost certainly doesn't work, but here is a general sketch how
all of this really should look:
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index a22476d27b3843..7539cf3e00db55 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -68,7 +68,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index b1423000e4bcda..7cd76f0213c265 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -65,41 +65,23 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
- int i;
- int num_map;
- struct rxe_map **map = mr->map;
+ XA_STATE(xas, &mr->pages, 0);
+ int i = 0;
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+ xa_init(&mr->pages);
- mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
- if (!mr->map)
- goto err1;
-
- for (i = 0; i < num_map; i++) {
- mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
- if (!mr->map[i])
- goto err2;
- }
-
- BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
-
- mr->map_shift = ilog2(RXE_BUF_PER_MAP);
- mr->map_mask = RXE_BUF_PER_MAP - 1;
-
- mr->num_buf = num_buf;
- mr->num_map = num_map;
- mr->max_buf = num_map * RXE_BUF_PER_MAP;
-
- return 0;
-
-err2:
- for (i--; i >= 0; i--)
- kfree(mr->map[i]);
-
- kfree(mr->map);
- mr->map = NULL;
-err1:
- return -ENOMEM;
+ do {
+ xas_lock(&xas);
+ while (i != num_buf) {
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ i++;
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ return xas_error(&xas);
}
void rxe_mr_init_dma(int access, struct rxe_mr *mr)
@@ -111,75 +93,66 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr)
mr->ibmr.type = IB_MR_TYPE_DMA;
}
+static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
+{
+ XA_STATE(xas, &mr->pages, 0);
+ struct sg_page_iter sg_iter;
+
+ __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
+ if (!__sg_page_iter_next(&sg_iter))
+ return 0;
+ do {
+ xas_lock(&xas);
+ while (true) {
+ if (xas.xa_index &&
+ WARN_ON(sg_iter.sg_pgoffset % PAGE_SIZE)) {
+ xas_set_err(&xas, -EINVAL);
+ break;
+ }
+ xas_store(&xas, sg_page_iter_page(&sg_iter));
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ if (!__sg_page_iter_next(&sg_iter))
+ break;
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ return xas_error(&xas);
+}
+
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
- struct rxe_map **map;
- struct rxe_phys_buf *buf = NULL;
- struct ib_umem *umem;
- struct sg_page_iter sg_iter;
- int num_buf;
- void *vaddr;
+ struct ib_umem *umem;
int err;
+ xa_init(&mr->pages);
+
umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
(int)PTR_ERR(umem));
- err = PTR_ERR(umem);
- goto err_out;
- }
-
- num_buf = ib_umem_num_pages(umem);
-
- rxe_mr_init(access, mr);
-
- err = rxe_mr_alloc(mr, num_buf);
- if (err) {
- rxe_dbg_mr(mr, "Unable to allocate memory for map\n");
- goto err_release_umem;
+ return PTR_ERR(umem);
}
mr->page_shift = PAGE_SHIFT;
mr->page_mask = PAGE_SIZE - 1;
+ err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
+ if (err)
+ goto err_release_umem;
- num_buf = 0;
- map = mr->map;
- if (length > 0) {
- buf = map[0]->buf;
-
- for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
- if (num_buf >= RXE_BUF_PER_MAP) {
- map++;
- buf = map[0]->buf;
- num_buf = 0;
- }
-
- vaddr = page_address(sg_page_iter_page(&sg_iter));
- if (!vaddr) {
- rxe_dbg_mr(mr, "Unable to get virtual address\n");
- err = -ENOMEM;
- goto err_release_umem;
- }
- buf->addr = (uintptr_t)vaddr;
- buf->size = PAGE_SIZE;
- num_buf++;
- buf++;
-
- }
- }
-
+ rxe_mr_init(access, mr);
mr->umem = umem;
mr->access = access;
mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->ibmr.type = IB_MR_TYPE_USER;
-
return 0;
err_release_umem:
ib_umem_release(umem);
-err_out:
return err;
}
@@ -204,77 +177,44 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
return err;
}
-static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
- size_t *offset_out)
+static int rxe_mr_copy_xarray(struct rxe_mr *mr, void *mem,
+ unsigned long start_index,
+ unsigned int start_offset, unsigned int length,
+ enum rxe_mr_copy_dir dir)
{
- size_t offset = iova - mr->ibmr.iova + mr->offset;
- int map_index;
- int buf_index;
- u64 length;
-
- if (likely(mr->page_shift)) {
- *offset_out = offset & mr->page_mask;
- offset >>= mr->page_shift;
- *n_out = offset & mr->map_mask;
- *m_out = offset >> mr->map_shift;
- } else {
- map_index = 0;
- buf_index = 0;
-
- length = mr->map[map_index]->buf[buf_index].size;
-
- while (offset >= length) {
- offset -= length;
- buf_index++;
-
- if (buf_index == RXE_BUF_PER_MAP) {
- map_index++;
- buf_index = 0;
- }
- length = mr->map[map_index]->buf[buf_index].size;
- }
+ XA_STATE(xas, &mr->pages, start_index);
+ struct page *entry;
- *m_out = map_index;
- *n_out = buf_index;
- *offset_out = offset;
- }
-}
+ rcu_read_lock();
+ while (length) {
+ unsigned int nbytes;
+ void *vpage;
-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
-{
- size_t offset;
- int m, n;
- void *addr;
-
- if (mr->state != RXE_MR_STATE_VALID) {
- rxe_dbg_mr(mr, "Not in valid state\n");
- addr = NULL;
- goto out;
- }
+ entry = xas_next(&xas);
+ if (xas_retry(&xas, entry))
+ continue;
- if (!mr->map) {
- addr = (void *)(uintptr_t)iova;
- goto out;
- }
+ /* Walked pass the end of the array */
+ if (WARN_ON(!entry)) {
+ rcu_read_unlock();
+ return -1;
+ }
- if (mr_check_range(mr, iova, length)) {
- rxe_dbg_mr(mr, "Range violation\n");
- addr = NULL;
- goto out;
- }
+ nbytes = min_t(unsigned int, length, PAGE_SIZE - start_offset);
- lookup_iova(mr, iova, &m, &n, &offset);
+ vpage = kmap_local_page(entry);
+ if (dir == RXE_FROM_MR_OBJ)
+ memcpy(mem, vpage + start_offset, nbytes);
+ else
+ memcpy(vpage + start_offset, mem, nbytes);
+ kunmap_local(vpage);
- if (offset + length > mr->map[m]->buf[n].size) {
- rxe_dbg_mr(mr, "Crosses page boundary\n");
- addr = NULL;
- goto out;
+ mem += nbytes;
+ start_offset = 0;
+ length -= nbytes;
}
-
- addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
-
-out:
- return addr;
+ rcu_read_unlock();
+ return 0;
}
/* copy data from a range (vaddr, vaddr+length-1) to or from
@@ -283,75 +223,9 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir)
{
- int err;
- int bytes;
- u8 *va;
- struct rxe_map **map;
- struct rxe_phys_buf *buf;
- int m;
- int i;
- size_t offset;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA) {
- u8 *src, *dest;
-
- src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
-
- dest = (dir == RXE_TO_MR_OBJ) ? ((void *)(uintptr_t)iova) : addr;
-
- memcpy(dest, src, length);
-
- return 0;
- }
-
- WARN_ON_ONCE(!mr->map);
-
- err = mr_check_range(mr, iova, length);
- if (err) {
- err = -EFAULT;
- goto err1;
- }
-
- lookup_iova(mr, iova, &m, &i, &offset);
-
- map = mr->map + m;
- buf = map[0]->buf + i;
-
- while (length > 0) {
- u8 *src, *dest;
-
- va = (u8 *)(uintptr_t)buf->addr + offset;
- src = (dir == RXE_TO_MR_OBJ) ? addr : va;
- dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
-
- bytes = buf->size - offset;
-
- if (bytes > length)
- bytes = length;
-
- memcpy(dest, src, bytes);
-
- length -= bytes;
- addr += bytes;
-
- offset = 0;
- buf++;
- i++;
-
- if (i == RXE_BUF_PER_MAP) {
- i = 0;
- map++;
- buf = map[0]->buf;
- }
- }
-
- return 0;
-
-err1:
- return err;
+ /* FIXME: Check that IOVA & length are valid, permissions, etc */
+ return rxe_mr_copy_xarray(mr, addr, rxe_mr_iova_to_index(iova),
+ iova % PAGE_SIZE, length, dir);
}
/* copy data in or out of a wqe, i.e. sg list
@@ -609,15 +483,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
- int i;
rxe_put(mr_pd(mr));
ib_umem_release(mr->umem);
- if (mr->map) {
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
-
- kfree(mr->map);
- }
+ xa_destroy(&mr->pages);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 6761bcd1d4d8f7..c1ed200e797779 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -631,22 +631,30 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
}
if (!res->replay) {
+ u64 iova = qp->resp.va + qp->resp.offset;
+ unsigned int page_offset = iova % PAGE_SIZE;
+ struct page *page;
+
if (mr->state != RXE_MR_STATE_VALID) {
ret = RESPST_ERR_RKEY_VIOLATION;
goto out;
}
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
- sizeof(u64));
-
/* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
+ if (iova & 7) {
ret = RESPST_ERR_MISALIGNED_ATOMIC;
goto out;
}
+ /*
+ * FIXME: Need to ensure the xarray isn't changing while
+ * this is happening
+ */
+ page = xa_load(&mr->pages, rxe_mr_iova_to_index(iova));
+
+ vaddr = kmap_local_page(page);
spin_lock_bh(&atomic_ops_lock);
- res->atomic.orig_val = value = *vaddr;
+ res->atomic.orig_val = value = *(vaddr + page_offset);
if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
if (value == atmeth_comp(pkt))
@@ -655,8 +663,9 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
value += atmeth_swap_add(pkt);
}
- *vaddr = value;
+ *(vaddr + page_offset) = value;
spin_unlock_bh(&atomic_ops_lock);
+ kunmap_local(vaddr);
qp->resp.msn++;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 025b35bf014e2a..092994a0ec947a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -948,23 +948,44 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return ERR_PTR(err);
}
-static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+static int rxe_mr_fill_pages_from_sgl_prefix(struct rxe_mr *mr,
+ struct scatterlist *sgl,
+ unsigned int sg_nents,
+ unsigned int *sg_offset)
{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(mr->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+ XA_STATE(xas, &mr->pages, 0);
+ struct sg_page_iter sg_iter;
+ struct scatterlist *cur_sg;
+ unsigned int done_sg = 1;
- buf->addr = addr;
- buf->size = ibmr->page_size;
- mr->nbuf++;
+ __sg_page_iter_start(&sg_iter, sgl, sg_nents, *sg_offset);
+ if (!__sg_page_iter_next(&sg_iter))
+ return 0;
+ cur_sg = sg_iter.sg;
+ do {
+ xas_lock(&xas);
+ while (true) {
+ if (xas.xa_index && sg_iter.sg_pgoffset % PAGE_SIZE) {
+ *sg_offset = sg_iter.sg_pgoffset;
+ break;
+ }
+ xas_store(&xas, sg_page_iter_page(&sg_iter));
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ if (!__sg_page_iter_next(&sg_iter))
+ break;
+ if (cur_sg != sg_iter.sg) {
+ done_sg++;
+ cur_sg = sg_iter.sg;
+ }
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
- return 0;
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ return done_sg;
}
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
@@ -974,8 +995,7 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int n;
mr->nbuf = 0;
-
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+ n = rxe_mr_fill_pages_from_sgl_prefix(mr, sg, sg_nents, sg_offset);
mr->page_shift = ilog2(ibmr->page_size);
mr->page_mask = ibmr->page_size - 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 22a299b0a9f0a8..6eebbd7b91a687 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -320,7 +320,7 @@ struct rxe_mr {
atomic_t num_mw;
- struct rxe_map **map;
+ struct xarray pages;
};
enum rxe_mw_state {
next prev parent reply other threads:[~2022-11-19 1:48 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-11 7:51 [PATCH v2 1/2] RDMA/rxe: Remove struct rxe_phys_buf Xiao Yang
2022-11-11 7:51 ` [PATCH v2 2/2] RDMA/rxe: Replace page_address() with kmap_local_page() Xiao Yang
2022-11-18 5:22 ` [PATCH v2 1/2] RDMA/rxe: Remove struct rxe_phys_buf Yang, Xiao/杨 晓
2022-11-19 1:20 ` Jason Gunthorpe [this message]
2022-11-20 13:36 ` Yang, Xiao/杨 晓
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Y3gvZr6/NCii9Avy@nvidia.com \
--to=jgg@nvidia.com \
--cc=ira.weiny@intel.com \
--cc=linux-rdma@vger.kernel.org \
--cc=linyunsheng@huawei.com \
--cc=lizhijian@fujitsu.com \
--cc=yangx.jy@fujitsu.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox