linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: hch@lst.de (Christoph Hellwig)
Subject: [PATCH 6/7] nvme: properly free resources for cancelled command
Date: Mon, 21 Sep 2015 11:40:52 -0700	[thread overview]
Message-ID: <1442860853-26447-7-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1442860853-26447-1-git-send-email-hch@lst.de>

We need to move freeing of resources to the ->complete handler to ensure
they are also freed when we cancel the command.

Clear the QUEUE_FLAG_SAME_COMP flag to ensure we don't try to bounce to
another CPU because we now have a ->complete handler.

Signed-off-by: Christoph Hellwig <hch at lst.de>
---
 drivers/block/nvme-core.c | 57 +++++++++++++++++++++++++++++++----------------
 1 file changed, 38 insertions(+), 19 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 5649f8f..6cc8f58 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -471,8 +471,6 @@ static void nvme_finish_cmd(struct nvme_queue *nvmeq,
 		struct nvme_completion *cqe)
 {
 	struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
-	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
-	struct nvme_iod *iod = cmd_rq->iod;
 	u16 status = le16_to_cpup(&cqe->status) >> 1;
 
 	if (unlikely(status)) {
@@ -497,23 +495,6 @@ static void nvme_finish_cmd(struct nvme_queue *nvmeq,
 		req->special = (void *)(uintptr_t)result;
 	}
 
-	if (cmd_rq->aborted)
-		dev_warn(nvmeq->dev->dev,
-			"completing aborted command with status:%04x\n",
-			status);
-
-	if (iod->nents) {
-		dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
-			rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		if (blk_integrity_rq(req)) {
-			if (!rq_data_dir(req))
-				nvme_dif_remap(req, nvme_dif_complete);
-			dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
-				rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		}
-	}
-	nvme_free_iod(nvmeq->dev, iod);
-
 	blk_mq_complete_request(req, status);
 }
 
@@ -808,6 +789,34 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_MQ_RQ_QUEUE_BUSY;
 }
 
+static void nvme_complete_rq(struct request *req)
+{
+	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+	struct nvme_queue *nvmeq = cmd->nvmeq;
+	struct nvme_iod *iod = cmd->iod;
+
+	if (cmd->aborted) {
+		dev_warn(nvmeq->dev->dev,
+			"completing aborted command with status:%04x\n",
+			req->errors);
+	}
+
+	if (iod->nents) {
+		enum dma_data_direction dir = rq_data_dir(req) ?
+					DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+		dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, dir);
+		if (blk_integrity_rq(req)) {
+			if (!rq_data_dir(req))
+				nvme_dif_remap(req, nvme_dif_complete);
+			dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1, dir);
+		}
+	}
+
+	nvme_free_iod(nvmeq->dev, iod);
+	blk_mq_end_request(req, req->errors);
+}
+
 static int nvme_process_cq(struct nvme_queue *nvmeq)
 {
 	u16 head, phase;
@@ -1543,6 +1552,7 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
 
 static struct blk_mq_ops nvme_mq_admin_ops = {
 	.queue_rq	= nvme_queue_rq,
+	.complete	= nvme_complete_rq,
 	.map_queue	= blk_mq_map_queue,
 	.init_hctx	= nvme_admin_init_hctx,
 	.exit_hctx      = nvme_admin_exit_hctx,
@@ -1552,6 +1562,7 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
 
 static struct blk_mq_ops nvme_mq_ops = {
 	.queue_rq	= nvme_queue_rq,
+	.complete	= nvme_complete_rq,
 	.map_queue	= blk_mq_map_queue,
 	.init_hctx	= nvme_init_hctx,
 	.init_request	= nvme_init_request,
@@ -1591,6 +1602,10 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 			dev->admin_q = NULL;
 			return -ENODEV;
 		}
+
+		/* we assume that we always have a local completion queue */
+		queue_flag_clear_unlocked(QUEUE_FLAG_SAME_COMP, dev->admin_q);
+
 		dev->admin_q->queuedata = dev;
 	} else
 		blk_mq_unfreeze_queue(dev->admin_q);
@@ -2017,6 +2032,10 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
 		goto out_free_ns;
 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+
+	/* we assume that we always have a local completion queue */
+	queue_flag_clear_unlocked(QUEUE_FLAG_SAME_COMP, ns->queue);
+
 	ns->dev = dev;
 	ns->queue->queuedata = ns;
 
-- 
1.9.1

  parent reply	other threads:[~2015-09-21 18:40 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-21 18:40 nvme completion path optimizations and fixes Christoph Hellwig
2015-09-21 18:40 ` [PATCH 1/7] blk-mq: fix racy updates of rq->errors Christoph Hellwig
2015-09-27  7:35   ` Sagi Grimberg
2015-09-21 18:40 ` [PATCH 2/7] nvme: switch AEN processing to use blk_execute_rq_nowait Christoph Hellwig
2015-09-23 23:16   ` Keith Busch
2015-09-24 13:38     ` Christoph Hellwig
2015-09-24 13:51       ` Keith Busch
2015-09-25 22:11         ` Christoph Hellwig
2015-09-21 18:40 ` [PATCH 3/7] nvme: switch delete SQ/CQ to blk_execute_rq_nowait Christoph Hellwig
2015-09-21 18:40 ` [PATCH 4/7] nvme: switch abort " Christoph Hellwig
2015-09-21 18:40 ` [PATCH 5/7] nvme: simplify completion handling Christoph Hellwig
2015-09-21 18:40 ` Christoph Hellwig [this message]
2015-09-21 18:40 ` [PATCH 7/7] nvme: micro optimize nvme_submit_priv Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1442860853-26447-7-git-send-email-hch@lst.de \
    --to=hch@lst.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).