linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: maxg@mellanox.com (Max Gurtovoy)
Subject: [PATCH 14/17] nvme-rdma: refactor cmd mapping/unmapping mechanism
Date: Sun, 27 May 2018 18:50:19 +0300	[thread overview]
Message-ID: <1527436222-15494-15-git-send-email-maxg@mellanox.com> (raw)
In-Reply-To: <1527436222-15494-1-git-send-email-maxg@mellanox.com>

Split the IO request mapping for RDMA operation to a structured
procedure:
- map the rq data sgl to rdma data_sgl
- map the rdma data_sgl to nvme descriptor

This is a preparation patch for adding T10-PI support.

Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
---
 drivers/nvme/host/rdma.c | 114 +++++++++++++++++++++++++++++++----------------
 1 file changed, 75 insertions(+), 39 deletions(-)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3b63811..a54de37 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1055,28 +1055,35 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, u32 rkey,
 	return ib_post_send(queue->qp, &wr, &bad_wr);
 }
 
-static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
-		struct request *rq)
+static void nvme_rdma_unmap_data_sgl(struct nvme_rdma_queue *queue,
+		struct nvme_rdma_sgl *sgl, struct request *rq)
 {
-	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-	struct nvme_rdma_sgl *sgl = &req->data_sgl;
 	struct nvme_rdma_device *dev = queue->device;
 	struct ib_device *ibdev = dev->dev;
 
+	ib_dma_unmap_sg(ibdev, sgl->sg_table.sgl,
+			sgl->nents, rq_data_dir(rq) ==
+				    WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	sg_free_table_chained(&sgl->sg_table, true);
+}
+
+static void nvme_rdma_unmap_cmd(struct nvme_rdma_queue *queue,
+		struct request *rq)
+{
+	struct nvme_rdma_request *req;
+
 	if (!blk_rq_payload_bytes(rq))
 		return;
 
-	if (sgl->mr) {
-		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, sgl->mr);
-		sgl->mr = NULL;
+	req = blk_mq_rq_to_pdu(rq);
+	if (req->data_sgl.mr) {
+		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->data_sgl.mr);
+		req->data_sgl.mr = NULL;
 	}
 
-	ib_dma_unmap_sg(ibdev, sgl->sg_table.sgl,
-			sgl->nents, rq_data_dir(rq) ==
-				    WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
+	nvme_rdma_unmap_data_sgl(queue, &req->data_sgl, rq);
 	nvme_cleanup_cmd(rq);
-	sg_free_table_chained(&sgl->sg_table, true);
 }
 
 static void nvme_rdma_set_keyed_sgl(u64 addr, u64 length, u32 key,
@@ -1171,7 +1178,49 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
 	return 0;
 }
 
-static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+static int nvme_rdma_map_data_sgl(struct nvme_rdma_sgl *sgl,
+		struct request *rq, struct ib_device *ibdev, int *count)
+{
+	int ret;
+
+	sgl->sg_table.sgl = sgl->first_sgl;
+	ret = sg_alloc_table_chained(&sgl->sg_table,
+			blk_rq_nr_phys_segments(rq), sgl->sg_table.sgl);
+	if (unlikely(ret))
+		return -ENOMEM;
+
+	sgl->nents = blk_rq_map_sg(rq->q, rq, sgl->sg_table.sgl);
+
+	*count = ib_dma_map_sg(ibdev, sgl->sg_table.sgl, sgl->nents,
+		    rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	if (unlikely(*count <= 0)) {
+		ret = -EIO;
+		goto out_free_table;
+	}
+
+	return 0;
+
+out_free_table:
+	sg_free_table_chained(&sgl->sg_table, true);
+	return ret;
+}
+
+static int nvme_rdma_map_rq(struct nvme_rdma_queue *queue,
+		struct nvme_rdma_request *req, struct request *rq,
+		struct nvme_command *c, int nents)
+{
+	if (nents == 1) {
+		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+		    blk_rq_payload_bytes(rq) <=	nvme_rdma_inline_data_size(queue))
+			return nvme_rdma_map_sg_inline(queue, req, c);
+		if (queue->device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+			return nvme_rdma_map_sg_single(queue, req, c);
+	}
+
+	return nvme_rdma_map_sg_fr(queue, req, c, nents);
+}
+
+static int nvme_rdma_setup_cmd(struct nvme_rdma_queue *queue,
 		struct request *rq, struct nvme_command *c)
 {
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
@@ -1188,32 +1237,19 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 	if (!blk_rq_payload_bytes(rq))
 		return nvme_rdma_set_sg_null(c);
 
-	sgl->sg_table.sgl = sgl->first_sgl;
-	ret = sg_alloc_table_chained(&sgl->sg_table,
-			blk_rq_nr_phys_segments(rq), sgl->sg_table.sgl);
-	if (ret)
-		return -ENOMEM;
-
-	sgl->nents = blk_rq_map_sg(rq->q, rq, sgl->sg_table.sgl);
-
-	count = ib_dma_map_sg(ibdev, sgl->sg_table.sgl, sgl->nents,
-		    rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-	if (unlikely(count <= 0)) {
-		sg_free_table_chained(&sgl->sg_table, true);
-		return -EIO;
-	}
+	ret = nvme_rdma_map_data_sgl(sgl, rq, ibdev, &count);
+	if (unlikely(ret))
+		return ret;
 
-	if (count == 1) {
-		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
-		    blk_rq_payload_bytes(rq) <=
-				nvme_rdma_inline_data_size(queue))
-			return nvme_rdma_map_sg_inline(queue, req, c);
+	ret = nvme_rdma_map_rq(queue, req, rq, c, count);
+	if (unlikely(ret))
+		goto out_unmap_data_sgl;
 
-		if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
-			return nvme_rdma_map_sg_single(queue, req, c);
-	}
+	return 0;
 
-	return nvme_rdma_map_sg_fr(queue, req, c, count);
+out_unmap_data_sgl:
+	nvme_rdma_unmap_data_sgl(queue, sgl, rq);
+	return ret;
 }
 
 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1645,7 +1681,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	blk_mq_start_request(rq);
 
-	err = nvme_rdma_map_data(queue, rq, c);
+	err = nvme_rdma_setup_cmd(queue, rq, c);
 	if (unlikely(err < 0)) {
 		dev_err(queue->ctrl->ctrl.device,
 			     "Failed to map data (%d)\n", err);
@@ -1661,7 +1697,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
 			req->data_sgl.mr ? &req->data_sgl.reg_wr.wr : NULL);
 	if (unlikely(err)) {
-		nvme_rdma_unmap_data(queue, rq);
+		nvme_rdma_unmap_cmd(queue, rq);
 		goto err;
 	}
 
@@ -1697,7 +1733,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
 {
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
 
-	nvme_rdma_unmap_data(req->queue, rq);
+	nvme_rdma_unmap_cmd(req->queue, rq);
 	nvme_complete_rq(rq);
 }
 
-- 
1.8.3.1

  parent reply	other threads:[~2018-05-27 15:50 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-27 15:50 [RFC PATCH 00/17] T10-PI support for NVMeoF/RDMA host Max Gurtovoy
2018-05-27 15:50 ` [PATCH 01/17] IB/isert: fix T10-pi check mask setting Max Gurtovoy
2018-05-28  7:21   ` Christoph Hellwig
2018-05-28 11:54     ` Max Gurtovoy
2018-05-28 12:03       ` Christoph Hellwig
2018-05-28 12:04         ` Max Gurtovoy
2018-05-28 16:33           ` Jason Gunthorpe
2018-05-29  3:01             ` Martin K. Petersen
2018-05-29 12:08               ` Max Gurtovoy
2018-05-29 19:23                 ` Jason Gunthorpe
2018-05-29 22:11                   ` Martin K. Petersen
2018-05-29 22:19                     ` Jason Gunthorpe
2018-05-29 22:41                       ` Martin K. Petersen
2018-05-30  8:07                       ` Max Gurtovoy
2018-05-30 15:30                         ` Jason Gunthorpe
2018-05-30 21:47   ` Sagi Grimberg
2018-05-30 21:49   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 02/17] RDMA/core: introduce check masks for T10-PI offload Max Gurtovoy
2018-05-28  7:21   ` Christoph Hellwig
2018-05-30 21:56   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 03/17] IB/iser: use T10-PI check mask definitions from core layer Max Gurtovoy
2018-05-28  7:22   ` Christoph Hellwig
2018-05-30 21:57   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 04/17] IB/isert: " Max Gurtovoy
2018-05-28  7:22   ` Christoph Hellwig
2018-05-30 10:48     ` Max Gurtovoy
2018-05-30 12:08       ` Christoph Hellwig
2018-05-30 15:24         ` Jason Gunthorpe
2018-05-30 21:59           ` Sagi Grimberg
2018-05-30 21:58   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 05/17] nvme: Fix extended data LBA supported setting Max Gurtovoy
2018-05-28  7:22   ` Christoph Hellwig
2018-05-29 12:47     ` Max Gurtovoy
2018-05-30 22:00   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 06/17] nvme: Add WARN in case fabrics ctrl was set with wrong metadata caps Max Gurtovoy
2018-05-28  7:24   ` Christoph Hellwig
2018-05-28 14:56     ` Max Gurtovoy
2018-05-30 22:05     ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 07/17] nvme: introduce max_integrity_segments ctrl attribute Max Gurtovoy
2018-05-30 22:08   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 08/17] nvme: limit integrity segments to be <= data segments Max Gurtovoy
2018-05-30 22:09   ` Sagi Grimberg
2018-06-07 13:02     ` Max Gurtovoy
2018-06-07 15:23       ` Sagi Grimberg
2018-06-07 23:50       ` Martin K. Petersen
2018-06-09  1:33         ` Max Gurtovoy
2018-06-13  0:35           ` Martin K. Petersen
2018-05-27 15:50 ` [PATCH 09/17] nvme: reduce the metadata size in case the ctrl doesn't support it Max Gurtovoy
2018-05-28  7:25   ` Christoph Hellwig
2018-05-27 15:50 ` [PATCH 10/17] nvme: export nvme_ns_has_pi function Max Gurtovoy
2018-05-28  7:25   ` Christoph Hellwig
2018-05-28 12:41     ` Max Gurtovoy
2018-05-30 22:19   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 11/17] nvme-rdma: Introduce cqe for local invalidation Max Gurtovoy
2018-05-28  7:25   ` Christoph Hellwig
2018-05-30 22:26   ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 12/17] nvme-rdma: Introduce nvme_rdma_set_keyed_sgl helper func Max Gurtovoy
2018-05-28  7:26   ` Christoph Hellwig
2018-05-30 22:27     ` Sagi Grimberg
2018-05-27 15:50 ` [PATCH 13/17] nvme-rdma: introduce nvme_rdma_sgl structure Max Gurtovoy
2018-05-27 15:50 ` Max Gurtovoy [this message]
2018-05-30 22:33   ` [PATCH 14/17] nvme-rdma: refactor cmd mapping/unmapping mechanism Sagi Grimberg
2018-05-27 15:50 ` [PATCH 15/17] nvme-rdma: Add helper function for preparing sg list to RDMA operation Max Gurtovoy
2018-05-27 15:50 ` [PATCH 16/17] nvme-rdma: Introduce nvme_rdma_first_wr helper function Max Gurtovoy
2018-05-27 15:50 ` [PATCH 17/17] nvme-rdma: Add T10-PI support Max Gurtovoy
2018-05-28  7:28   ` Christoph Hellwig
2018-05-30 23:05   ` Sagi Grimberg
2018-06-03  8:51     ` Max Gurtovoy
2018-06-03 11:30       ` Sagi Grimberg
2018-06-03 14:01         ` Oren Duer
2018-06-03 14:04           ` Oren Duer
2018-06-03 16:30           ` Sagi Grimberg
2018-06-05  6:35             ` Oren Duer
2018-06-07 15:30               ` Sagi Grimberg
2018-06-06 12:33         ` Max Gurtovoy
2018-06-07 15:26           ` Sagi Grimberg
2018-05-30 21:47 ` [RFC PATCH 00/17] T10-PI support for NVMeoF/RDMA host Sagi Grimberg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1527436222-15494-15-git-send-email-maxg@mellanox.com \
    --to=maxg@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).