From: hch@lst.de (Christoph Hellwig)
Subject: [PATCH 09/10] nvme: properly free resources for cancelled command
Date: Sun, 27 Sep 2015 21:01:57 +0200 [thread overview]
Message-ID: <1443380518-6829-10-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1443380518-6829-1-git-send-email-hch@lst.de>
We need to move freeing of resources to the ->complete handler to ensure
they are also freed when we cancel the command.
Clear the QUEUE_FLAG_SAME_COMP flag to ensure we don't try to bounce to
another CPU because we now have a ->complete handler.
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
drivers/block/nvme-core.c | 57 +++++++++++++++++++++++++++++++----------------
1 file changed, 38 insertions(+), 19 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 94c1ec2..e882915 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -471,8 +471,6 @@ static void nvme_finish_cmd(struct nvme_queue *nvmeq,
struct nvme_completion *cqe)
{
struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
- struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
- struct nvme_iod *iod = cmd_rq->iod;
u16 status = le16_to_cpup(&cqe->status) >> 1;
if (unlikely(status)) {
@@ -497,23 +495,6 @@ static void nvme_finish_cmd(struct nvme_queue *nvmeq,
req->special = (void *)(uintptr_t)result;
}
- if (cmd_rq->aborted)
- dev_warn(nvmeq->dev->dev,
- "completing aborted command with status:%04x\n",
- status);
-
- if (iod->nents) {
- dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
- rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (blk_integrity_rq(req)) {
- if (!rq_data_dir(req))
- nvme_dif_remap(req, nvme_dif_complete);
- dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
- rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- }
- }
- nvme_free_iod(nvmeq->dev, iod);
-
blk_mq_complete_request(req, status);
}
@@ -808,6 +789,34 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_BUSY;
}
+static void nvme_complete_rq(struct request *req)
+{
+ struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = cmd->nvmeq;
+ struct nvme_iod *iod = cmd->iod;
+
+ if (cmd->aborted) {
+ dev_warn(nvmeq->dev->dev,
+ "completing aborted command with status:%04x\n",
+ req->errors);
+ }
+
+ if (iod->nents) {
+ enum dma_data_direction dir = rq_data_dir(req) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, dir);
+ if (blk_integrity_rq(req)) {
+ if (!rq_data_dir(req))
+ nvme_dif_remap(req, nvme_dif_complete);
+ dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1, dir);
+ }
+ }
+
+ nvme_free_iod(nvmeq->dev, iod);
+ blk_mq_end_request(req, req->errors);
+}
+
static int nvme_process_cq(struct nvme_queue *nvmeq)
{
u16 head, phase;
@@ -1549,6 +1558,7 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
static struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
+ .complete = nvme_complete_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = nvme_admin_init_hctx,
.exit_hctx = nvme_admin_exit_hctx,
@@ -1558,6 +1568,7 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
static struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
+ .complete = nvme_complete_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
@@ -1597,6 +1608,10 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_q = NULL;
return -ENODEV;
}
+
+ /* we assume that we always have a local completion queue */
+ queue_flag_clear_unlocked(QUEUE_FLAG_SAME_COMP, dev->admin_q);
+
dev->admin_q->queuedata = dev;
} else
blk_mq_unfreeze_queue(dev->admin_q);
@@ -2023,6 +2038,10 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+
+ /* we assume that we always have a local completion queue */
+ queue_flag_clear_unlocked(QUEUE_FLAG_SAME_COMP, ns->queue);
+
ns->dev = dev;
ns->queue->queuedata = ns;
--
1.9.1
next prev parent reply other threads:[~2015-09-27 19:01 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-09-27 19:01 nvme completion path optimizations and fixes V2 Christoph Hellwig
2015-09-27 19:01 ` [PATCH 01/10] blk-mq: avoid setting hctx->tags->cpumask before allocation Christoph Hellwig
2015-09-27 19:01 ` [PATCH 02/10] blk-mq: fix racy updates of rq->errors Christoph Hellwig
2015-10-01 7:59 ` Christoph Hellwig
2015-10-01 8:12 ` Jens Axboe
2015-09-27 19:01 ` [PATCH 03/10] blk-mq: factor out a helper to iterate all tags for a request_queue Christoph Hellwig
2015-09-27 19:01 ` [PATCH 04/10] blk-mq: kill undead requests during CPU hotplug notify Christoph Hellwig
2015-09-28 17:39 ` Keith Busch
2015-09-28 17:46 ` Christoph Hellwig
2015-09-28 18:15 ` Keith Busch
2015-10-01 7:39 ` Christoph Hellwig
2015-09-27 19:01 ` [PATCH 05/10] nvme: switch AEN processing to use blk_execute_rq_nowait Christoph Hellwig
2015-09-27 19:01 ` [PATCH 06/10] nvme: switch delete SQ/CQ to blk_execute_rq_nowait Christoph Hellwig
2015-09-27 19:01 ` [PATCH 07/10] nvme: switch abort " Christoph Hellwig
2015-09-27 19:01 ` [PATCH 08/10] nvme: simplify completion handling Christoph Hellwig
2015-09-27 19:01 ` Christoph Hellwig [this message]
2015-09-27 19:01 ` [PATCH 10/10] nvme: micro optimize nvme_submit_priv Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1443380518-6829-10-git-send-email-hch@lst.de \
--to=hch@lst.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).