public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Logan Gunthorpe <logang@deltatee.com>
To: linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org
Cc: Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	James Smart <james.smart@broadcom.com>,
	Logan Gunthorpe <logang@deltatee.com>
Subject: [PATCH 4/4] nvmet-fc: Use new SGL alloc/free helper for requests
Date: Thu, 29 Mar 2018 10:07:21 -0600	[thread overview]
Message-ID: <20180329160721.4691-5-logang@deltatee.com> (raw)
In-Reply-To: <20180329160721.4691-1-logang@deltatee.com>

Use the new helpers introduced earlier to allocate the SGLs for
the request.

To do this, we drop the appearantly redundant data_sg and data_sg_cnt
members as they are identical to the existing req.sg and req.sg_cnt.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Cc: James Smart <james.smart@broadcom.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
---
 drivers/nvme/target/fc.c | 38 +++++++++++---------------------------
 1 file changed, 11 insertions(+), 27 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 9f2f8ab83158..00135ff7d1c2 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -74,8 +74,6 @@ struct nvmet_fc_fcp_iod {
 	struct nvme_fc_cmd_iu		cmdiubuf;
 	struct nvme_fc_ersp_iu		rspiubuf;
 	dma_addr_t			rspdma;
-	struct scatterlist		*data_sg;
-	int				data_sg_cnt;
 	u32				offset;
 	enum nvmet_fcp_datadir		io_dir;
 	bool				active;
@@ -1696,43 +1694,34 @@ EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
 static int
 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
 {
-	struct scatterlist *sg;
-	unsigned int nent;
 	int ret;
 
-	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
-	if (!sg)
-		goto out;
+	ret = nvmet_req_alloc_sgl(&fod->req, &fod->queue->nvme_sq);
+	if (ret < 0)
+		return NVME_SC_INTERNAL;
 
-	fod->data_sg = sg;
-	fod->data_sg_cnt = nent;
-	ret = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+	ret = fc_dma_map_sg(fod->tgtport->dev, fod->req.sg, fod->req.sg_cnt,
 			    ((fod->io_dir == NVMET_FCP_WRITE) ?
 				    DMA_FROM_DEVICE : DMA_TO_DEVICE));
 			    /* note: write from initiator perspective */
 	if (!ret)
-		goto out;
+		return NVME_SC_INTERNAL;
 
 	return 0;
-
-out:
-	return NVME_SC_INTERNAL;
 }
 
 static void
 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
 {
-	if (!fod->data_sg || !fod->data_sg_cnt)
+	if (!fod->req.sg || !fod->req.sg_cnt)
 		return;
 
-	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+	fc_dma_unmap_sg(fod->tgtport->dev, fod->req.sg, fod->req.sg_cnt,
 				((fod->io_dir == NVMET_FCP_WRITE) ?
 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
-	sgl_free(fod->data_sg);
-	fod->data_sg = NULL;
-	fod->data_sg_cnt = 0;
-}
 
+	nvmet_req_free_sgl(&fod->req);
+}
 
 static bool
 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
@@ -1871,7 +1860,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
 	fcpreq->fcp_error = 0;
 	fcpreq->rsplen = 0;
 
-	fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
+	fcpreq->sg = &fod->req.sg[fod->offset / PAGE_SIZE];
 	fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
 
 	/*
@@ -2083,7 +2072,7 @@ __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
 		 * There may be a status where data still was intended to
 		 * be moved
 		 */
-		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
+		if ((fod->io_dir == NVMET_FCP_READ) && (fod->req.sg_cnt)) {
 			/* push the data over before sending rsp */
 			nvmet_fc_transfer_fcp_data(tgtport, fod,
 						NVMET_FCOP_READDATA);
@@ -2153,9 +2142,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 	/* clear any response payload */
 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
 
-	fod->data_sg = NULL;
-	fod->data_sg_cnt = 0;
-
 	ret = nvmet_req_init(&fod->req,
 				&fod->queue->nvme_cq,
 				&fod->queue->nvme_sq,
@@ -2178,8 +2164,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 			return;
 		}
 	}
-	fod->req.sg = fod->data_sg;
-	fod->req.sg_cnt = fod->data_sg_cnt;
 	fod->offset = 0;
 
 	if (fod->io_dir == NVMET_FCP_WRITE) {
-- 
2.11.0

  parent reply	other threads:[~2018-03-29 16:07 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-29 16:07 [PATCH 0/4] SGL alloc and free helper functions for requests Logan Gunthorpe
2018-03-29 16:07 ` [PATCH 1/4] nvmet: Introduce helper functions to allocate and free request SGLs Logan Gunthorpe
2018-03-29 16:07 ` [PATCH 2/4] nvmet-rdma: Use new SGL alloc/free helper for requests Logan Gunthorpe
2018-04-04 12:43   ` Sagi Grimberg
2018-04-04 16:47     ` Logan Gunthorpe
2018-03-29 16:07 ` [PATCH 3/4] nvmet-fc: Don't use the count returned by the dma_map_sg call Logan Gunthorpe
2018-03-29 16:14   ` Bart Van Assche
2018-03-29 16:15     ` Logan Gunthorpe
2018-03-29 16:38     ` Logan Gunthorpe
2018-03-29 16:24   ` James Smart
2018-03-29 16:30     ` Logan Gunthorpe
2018-03-29 16:34       ` James Smart
2018-04-04 12:45   ` Sagi Grimberg
2018-03-29 16:07 ` Logan Gunthorpe [this message]
2018-03-29 16:52   ` [PATCH 4/4] nvmet-fc: Use new SGL alloc/free helper for requests James Smart
2018-03-29 17:02     ` Logan Gunthorpe
2018-03-29 17:39       ` James Smart
2018-03-29 18:15       ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180329160721.4691-5-logang@deltatee.com \
    --to=logang@deltatee.com \
    --cc=hch@lst.de \
    --cc=james.smart@broadcom.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox