From: Jens Axboe <axboe@kernel.dk>
To: linux-block@vger.kernel.org
Cc: hch@infradead.org, Jens Axboe <axboe@kernel.dk>
Subject: [PATCH 4/4] nvme: add support for mq_ops->queue_rqs()
Date: Tue, 16 Nov 2021 20:38:07 -0700 [thread overview]
Message-ID: <20211117033807.185715-5-axboe@kernel.dk> (raw)
In-Reply-To: <20211117033807.185715-1-axboe@kernel.dk>
This enables the block layer to send us a full plug list of requests
that need submitting. The block layer guarantees that they all belong
to the same queue, but we do have to check the hardware queue mapping
for each request.
If errors are encountered, leave them in the passed in list. Then the
block layer will handle them individually.
This is good for about a 4% improvement in peak performance, taking us
from 9.6M to 10M IOPS/core.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
drivers/nvme/host/pci.c | 67 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 67 insertions(+)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d2b654fc3603..2eedd04b1f90 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1004,6 +1004,72 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
}
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+{
+ spin_lock(&nvmeq->sq_lock);
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_copy_cmd(nvmeq, absolute_pointer(&iod->cmd));
+ }
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static void nvme_queue_rqs(struct request **rqlist)
+{
+ struct request *requeue_list = NULL, *req, *prev = NULL;
+ struct blk_mq_hw_ctx *hctx;
+ struct nvme_queue *nvmeq;
+ struct nvme_ns *ns;
+
+restart:
+ req = rq_list_peek(rqlist);
+ hctx = req->mq_hctx;
+ nvmeq = hctx->driver_data;
+ ns = hctx->queue->queuedata;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return;
+
+ rq_list_for_each(rqlist, req) {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
+
+ if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
+ goto requeue;
+
+ if (req->mq_hctx != hctx) {
+ /* detach rest of list, and submit */
+ prev->rq_next = NULL;
+ nvme_submit_cmds(nvmeq, rqlist);
+ /* req now start of new list for this hw queue */
+ *rqlist = req;
+ goto restart;
+ }
+
+ hctx->tags->rqs[req->tag] = req;
+ ret = nvme_prep_rq(nvmeq->dev, ns, req, &iod->cmd);
+ if (ret == BLK_STS_OK) {
+ prev = req;
+ continue;
+ }
+requeue:
+ /* detach 'req' and add to remainder list */
+ if (prev)
+ prev->rq_next = req->rq_next;
+ rq_list_add(&requeue_list, req);
+ }
+
+ nvme_submit_cmds(nvmeq, rqlist);
+ *rqlist = requeue_list;
+}
+
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1741,6 +1807,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
static const struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
+ .queue_rqs = nvme_queue_rqs,
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
--
2.33.1
next prev parent reply other threads:[~2021-11-17 3:38 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-17 3:38 [PATCHSET 0/4] Add support for list issue Jens Axboe
2021-11-17 3:38 ` [PATCH 1/4] block: add mq_ops->queue_rqs hook Jens Axboe
2021-11-17 6:25 ` Christoph Hellwig
2021-11-17 15:41 ` Jens Axboe
2021-11-17 8:20 ` Ming Lei
2021-11-17 15:43 ` Jens Axboe
2021-11-17 20:48 ` Keith Busch
2021-11-17 23:59 ` Ming Lei
2021-11-17 20:41 ` Keith Busch
2021-11-18 0:18 ` Ming Lei
2021-11-18 2:02 ` Keith Busch
2021-11-18 2:14 ` Ming Lei
2021-11-17 3:38 ` [PATCH 2/4] nvme: split command copy into a helper Jens Axboe
2021-11-17 6:15 ` Christoph Hellwig
2021-11-17 15:44 ` Jens Axboe
2021-11-18 7:54 ` Chaitanya Kulkarni
2021-11-17 3:38 ` [PATCH 3/4] nvme: separate command prep and issue Jens Axboe
2021-11-17 6:17 ` Christoph Hellwig
2021-11-17 15:45 ` Jens Axboe
2021-11-18 7:59 ` Chaitanya Kulkarni
2021-11-17 3:38 ` Jens Axboe [this message]
2021-11-17 8:39 ` [PATCH 4/4] nvme: add support for mq_ops->queue_rqs() Christoph Hellwig
2021-11-17 15:55 ` Jens Axboe
2021-11-17 15:58 ` Jens Axboe
2021-11-17 19:41 ` Keith Busch
-- strict thread matches above, loose matches on Subject: below --
2021-12-03 21:45 [PATCHSET v2 0/4] Add support for list issue Jens Axboe
2021-12-03 21:45 ` [PATCH 4/4] nvme: add support for mq_ops->queue_rqs() Jens Axboe
2021-12-04 10:47 ` Hannes Reinecke
2021-12-06 7:40 ` Christoph Hellwig
2021-12-06 16:33 ` Jens Axboe
2021-12-16 16:05 [PATCHSET v4 0/4] Add support for list issue Jens Axboe
2021-12-16 16:05 ` [PATCH 4/4] nvme: add support for mq_ops->queue_rqs() Jens Axboe
2021-12-16 16:38 [PATCHSET v5 0/4] Add support for list issue Jens Axboe
2021-12-16 16:39 ` [PATCH 4/4] nvme: add support for mq_ops->queue_rqs() Jens Axboe
2021-12-16 17:53 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211117033807.185715-5-axboe@kernel.dk \
--to=axboe@kernel.dk \
--cc=hch@infradead.org \
--cc=linux-block@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).