From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, leon@kernel.org, zyjzyj2000@gmail.com,
jhack@hpe.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next 07/17] RDMA/rxe: Extend rxe_mr_copy to support skb frags
Date: Thu, 27 Oct 2022 13:55:01 -0500 [thread overview]
Message-ID: <20221027185510.33808-8-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20221027185510.33808-1-rpearsonhpe@gmail.com>
rxe_mr_copy() currently supports copying between an mr and
a contiguous region of kernel memory.
Rename rxe_mr_copy() to rxe_copy_mr_data().
Extend the operations to support copying between an mr and an skb
fragment list. Fixup calls to rxe_mr_copy() to support the new
API.
This is in preparation for supporting fragmented skbs.
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
drivers/infiniband/sw/rxe/rxe_loc.h | 3 +
drivers/infiniband/sw/rxe/rxe_mr.c | 142 +++++++++++++++++++--------
drivers/infiniband/sw/rxe/rxe_resp.c | 20 ++--
3 files changed, 116 insertions(+), 49 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 87fb052c1d0a..c62fc2613a01 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -71,6 +71,9 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_add_frag(struct sk_buff *skb, struct rxe_phys_buf *buf,
int length, int offset);
int rxe_num_mr_frags(struct rxe_mr *mr, u64 iova, int length);
+int rxe_copy_mr_data(struct sk_buff *skb, struct rxe_mr *mr, u64 iova,
+ void *addr, int skb_offset, int length,
+ enum rxe_mr_copy_op op);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_op op);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index dd4dbe117c91..fd39b3e17f41 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -388,70 +388,130 @@ int rxe_num_mr_frags(struct rxe_mr *mr, u64 iova, int length)
return num_frags;
}
-/* copy data from a range (vaddr, vaddr+length-1) to or from
- * a mr object starting at iova.
+/**
+ * rxe_copy_mr_data() - transfer data between an MR and a packet
+ * @skb: the packet buffer
+ * @mr: the MR
+ * @iova: the address in the MR
+ * @addr: the address in the packet (TO/FROM MR only)
+ * @length: the length to transfer
+ * @op: copy operation (TO MR, FROM MR or FRAG MR)
+ *
+ * Copy data from a range (addr, addr+length-1) in a packet
+ * to or from a range in an MR object at (iova, iova+length-1).
+ * Or, build a frag list referencing the MR range.
+ *
+ * Caller must verify that the access permissions support the
+ * operation.
+ *
+ * Returns: 0 on success or an error
*/
-int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum rxe_mr_copy_op op)
+int rxe_copy_mr_data(struct sk_buff *skb, struct rxe_mr *mr, u64 iova,
+ void *addr, int skb_offset, int length,
+ enum rxe_mr_copy_op op)
{
- int err;
- int bytes;
- u8 *va;
- struct rxe_map **map;
- struct rxe_phys_buf *buf;
- int m;
- int i;
- size_t offset;
+ struct rxe_phys_buf dmabuf;
+ struct rxe_phys_buf *buf;
+ struct rxe_map **map;
+ size_t buf_offset;
+ int bytes;
+ void *va;
+ int m;
+ int i;
+ int err = 0;
if (length == 0)
return 0;
- if (mr->type == IB_MR_TYPE_DMA) {
- u8 *src, *dest;
-
- src = (op == RXE_COPY_TO_MR) ? addr : ((void *)(uintptr_t)iova);
-
- dest = (op == RXE_COPY_TO_MR) ? ((void *)(uintptr_t)iova) : addr;
+ switch (mr->type) {
+ case IB_MR_TYPE_DMA:
+ va = (void *)(uintptr_t)iova;
+ switch (op) {
+ case RXE_COPY_TO_MR:
+ memcpy(va, addr, length);
+ break;
+ case RXE_COPY_FROM_MR:
+ memcpy(addr, va, length);
+ break;
+ case RXE_FRAG_TO_MR:
+ err = skb_copy_bits(skb, skb_offset, va, length);
+ if (err)
+ return err;
+ break;
+ case RXE_FRAG_FROM_MR:
+ /* limit frag length to PAGE_SIZE */
+ while (length) {
+ dmabuf.addr = iova & PAGE_MASK;
+ buf_offset = iova & ~PAGE_MASK;
+ bytes = PAGE_SIZE - buf_offset;
+ if (bytes > length)
+ bytes = length;
+ err = rxe_add_frag(skb, &dmabuf, bytes,
+ buf_offset);
+ if (err)
+ return err;
+ iova += bytes;
+ length -= bytes;
+ }
+ break;
+ }
+ return 0;
- memcpy(dest, src, length);
+ case IB_MR_TYPE_MEM_REG:
+ case IB_MR_TYPE_USER:
+ break;
- return 0;
+ default:
+ pr_warn("%s: mr type (%d) not supported\n",
+ __func__, mr->type);
+ return -EINVAL;
}
WARN_ON_ONCE(!mr->map);
err = mr_check_range(mr, iova, length);
- if (err) {
- err = -EFAULT;
- goto err1;
- }
+ if (err)
+ return -EFAULT;
- lookup_iova(mr, iova, &m, &i, &offset);
+ lookup_iova(mr, iova, &m, &i, &buf_offset);
map = mr->map + m;
- buf = map[0]->buf + i;
+ buf = map[0]->buf + i;
while (length > 0) {
- u8 *src, *dest;
-
- va = (u8 *)(uintptr_t)buf->addr + offset;
- src = (op == RXE_COPY_TO_MR) ? addr : va;
- dest = (op == RXE_COPY_TO_MR) ? va : addr;
-
- bytes = buf->size - offset;
-
+ va = (void *)(uintptr_t)buf->addr + buf_offset;
+ bytes = buf->size - buf_offset;
if (bytes > length)
bytes = length;
- memcpy(dest, src, bytes);
+ switch (op) {
+ case RXE_COPY_TO_MR:
+ memcpy(va, addr, bytes);
+ break;
+ case RXE_COPY_FROM_MR:
+ memcpy(addr, va, bytes);
+ break;
+ case RXE_FRAG_TO_MR:
+ err = skb_copy_bits(skb, skb_offset, va, bytes);
+ if (err)
+ return err;
+ break;
+ case RXE_FRAG_FROM_MR:
+ err = rxe_add_frag(skb, buf, bytes, buf_offset);
+ if (err)
+ return err;
+ break;
+ }
- length -= bytes;
- addr += bytes;
+ length -= bytes;
+ addr += bytes;
- offset = 0;
+ buf_offset = 0;
+ skb_offset += bytes;
buf++;
i++;
+ /* we won't overrun since we checked range above */
if (i == RXE_BUF_PER_MAP) {
i = 0;
map++;
@@ -460,9 +520,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
}
return 0;
-
-err1:
- return err;
}
/* copy data in or out of a wqe, i.e. sg list
@@ -535,7 +592,8 @@ int copy_data(
if (bytes > 0) {
iova = sge->addr + offset;
- err = rxe_mr_copy(mr, iova, addr, bytes, op);
+ err = rxe_copy_mr_data(NULL, mr, iova, addr,
+ 0, bytes, op);
if (err)
goto err2;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4b185ddac887..ba359242118a 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -535,12 +535,15 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
static enum resp_states write_data_in(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
enum resp_states rc = RESPST_NONE;
- int err;
int data_len = payload_size(pkt);
+ int err;
+ int skb_offset = 0;
- err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
- payload_addr(pkt), data_len, RXE_COPY_TO_MR);
+ err = rxe_copy_mr_data(skb, qp->resp.mr, qp->resp.va + qp->resp.offset,
+ payload_addr(pkt), skb_offset, data_len,
+ RXE_COPY_TO_MR);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
@@ -766,6 +769,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
int err;
struct resp_res *res = qp->resp.res;
struct rxe_mr *mr;
+ int skb_offset = 0;
if (!res) {
res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
@@ -806,15 +810,17 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
- payload, RXE_COPY_FROM_MR);
- if (mr)
- rxe_put(mr);
+ err = rxe_copy_mr_data(skb, mr, res->read.va, payload_addr(&ack_pkt),
+ skb_offset, payload, RXE_COPY_FROM_MR);
if (err) {
kfree_skb(skb);
+ rxe_put(mr);
return RESPST_ERR_RKEY_VIOLATION;
}
+ if (mr)
+ rxe_put(mr);
+
if (bth_pad(&ack_pkt)) {
u8 *pad = payload_addr(&ack_pkt) + payload;
--
2.34.1
next prev parent reply other threads:[~2022-10-27 18:56 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-27 18:54 [PATCH for-next 00/17] RDMA/rxe: Enable scatter/gather support for skbs Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 01/17] RDMA/rxe: Isolate code to fill request roce headers Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 02/17] RDMA/rxe: Isolate request payload code in a subroutine Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 03/17] RDMA/rxe: Isolate code to build request packet Bob Pearson
2022-10-30 18:52 ` kernel test robot
2022-10-27 18:54 ` [PATCH for-next 04/17] RDMA/rxe: Add sg fragment ops Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 05/17] RDMA/rxe: Add rxe_add_frag() to rxe_mr.c Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 06/17] RDMA/rxe: Add routine to compute the number of frags Bob Pearson
2022-10-27 18:55 ` Bob Pearson [this message]
2022-10-27 18:55 ` [PATCH for-next 08/17] RDMA/rxe: Add routine to compute number of frags for dma Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 09/17] RDMA/rxe: Extend copy_data to support skb frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 10/17] RDMA/rxe: Replace rxe by qp as a parameter Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 11/17] RDMA/rxe: Extend rxe_init_packet() to support frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 12/17] RDMA/rxe: Extend rxe_icrc.c " Bob Pearson
2022-10-27 20:29 ` kernel test robot
2022-10-30 19:33 ` kernel test robot
2022-10-27 18:55 ` [PATCH for-next 13/17] RDMA/rxe: Extend rxe_init_req_packet() for frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 14/17] RDMA/rxe: Extend response packets " Bob Pearson
2022-10-30 20:13 ` kernel test robot
2022-10-27 18:55 ` [PATCH for-next 15/17] RDMA/rxe: Extend send/write_data_in() " Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 16/17] RDMA/rxe: Extend do_read() in rxe_comp,c " Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 17/17] RDMA/rxe: Enable sg code in rxe Bob Pearson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221027185510.33808-8-rpearsonhpe@gmail.com \
--to=rpearsonhpe@gmail.com \
--cc=jgg@nvidia.com \
--cc=jhack@hpe.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox