From: Jens Axboe <axboe@kernel.dk>
To: linux-block@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>
Subject: [PATCH 08/11] blk-mq: when polling for IO, look for any completion
Date: Tue, 13 Nov 2018 08:42:30 -0700 [thread overview]
Message-ID: <20181113154233.15256-9-axboe@kernel.dk> (raw)
In-Reply-To: <20181113154233.15256-1-axboe@kernel.dk>
If we want to support async IO polling, then we have to allow
finding completions that aren't just for the one we are
looking for. Always pass in -1 to the mq_ops->poll() helper,
and have that return how many events were found in this poll
loop.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
block/blk-mq.c | 69 +++++++++++++++++++++++------------------
drivers/nvme/host/pci.c | 32 +++++++++----------
2 files changed, 54 insertions(+), 47 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f8c2e6544903..03b1af0151ca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3266,9 +3266,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
* 0: use half of prev avg
* >0: use this specific value
*/
- if (q->poll_nsec == -1)
- return false;
- else if (q->poll_nsec > 0)
+ if (q->poll_nsec > 0)
nsecs = q->poll_nsec;
else
nsecs = blk_mq_poll_nsecs(q, hctx, rq);
@@ -3305,21 +3303,36 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
return true;
}
-static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static bool blk_mq_poll_hybrid(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
+{
+ struct request *rq;
+
+ if (q->poll_nsec == -1)
+ return false;
+
+ if (!blk_qc_t_is_internal(cookie))
+ rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+ else {
+ rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
+ /*
+ * With scheduling, if the request has completed, we'll
+ * get a NULL return here, as we clear the sched tag when
+ * that happens. The request still remains valid, like always,
+ * so we should be safe with just the NULL check.
+ */
+ if (!rq)
+ return false;
+ }
+
+ return blk_mq_poll_hybrid_sleep(q, hctx, rq);
+}
+
+static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
long state;
- /*
- * If we sleep, have the caller restart the poll loop to reset
- * the state. Like for the other success return cases, the
- * caller is responsible for checking if the IO completed. If
- * the IO isn't complete, we'll get called again and will go
- * straight to the busy poll loop.
- */
- if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
- return 1;
-
hctx->poll_considered++;
state = current->state;
@@ -3328,7 +3341,7 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
hctx->poll_invoked++;
- ret = q->mq_ops.poll(hctx, rq->tag);
+ ret = q->mq_ops.poll(hctx, -1U);
if (ret > 0) {
hctx->poll_success++;
set_current_state(TASK_RUNNING);
@@ -3352,27 +3365,23 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_mq_hw_ctx *hctx;
- struct request *rq;
if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
- if (!blk_qc_t_is_internal(cookie))
- rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
- else {
- rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
- /*
- * With scheduling, if the request has completed, we'll
- * get a NULL return here, as we clear the sched tag when
- * that happens. The request still remains valid, like always,
- * so we should be safe with just the NULL check.
- */
- if (!rq)
- return 0;
- }
- return __blk_mq_poll(hctx, rq);
+ /*
+ * If we sleep, have the caller restart the poll loop to reset
+ * the state. Like for the other success return cases, the
+ * caller is responsible for checking if the IO completed. If
+ * the IO isn't complete, we'll get called again and will go
+ * straight to the busy poll loop.
+ */
+ if (blk_mq_poll_hybrid(q, hctx, cookie))
+ return 1;
+
+ return __blk_mq_poll(hctx);
}
unsigned int blk_mq_rq_cpu(struct request *rq)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bb22ae567208..adeb8f516bf9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -995,13 +995,18 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
nvme_end_request(req, cqe->status, cqe->result);
}
-static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
+static int nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
{
+ int nr = 0;
+
while (start != end) {
+ nr++;
nvme_handle_cqe(nvmeq, start);
if (++start == nvmeq->q_depth)
start = 0;
}
+
+ return nr;
}
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
@@ -1012,22 +1017,17 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline bool nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
- u16 *end, int tag)
+static inline void nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
+ u16 *end)
{
- bool found = false;
-
*start = nvmeq->cq_head;
- while (!found && nvme_cqe_pending(nvmeq)) {
- if (nvmeq->cqes[nvmeq->cq_head].command_id == tag)
- found = true;
+ while (nvme_cqe_pending(nvmeq))
nvme_update_cq_head(nvmeq);
- }
+
*end = nvmeq->cq_head;
if (*start != *end)
nvme_ring_cq_doorbell(nvmeq);
- return found;
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -1039,7 +1039,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
spin_lock(&nvmeq->cq_lock);
if (nvmeq->cq_head != nvmeq->last_cq_head)
ret = IRQ_HANDLED;
- nvme_process_cq(nvmeq, &start, &end, -1);
+ nvme_process_cq(nvmeq, &start, &end);
nvmeq->last_cq_head = nvmeq->cq_head;
spin_unlock(&nvmeq->cq_lock);
@@ -1062,7 +1062,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
{
u16 start, end;
- bool found;
if (!nvme_cqe_pending(nvmeq))
return 0;
@@ -1074,14 +1073,13 @@ static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
local_irq_disable();
spin_lock(&nvmeq->cq_lock);
- found = nvme_process_cq(nvmeq, &start, &end, tag);
+ nvme_process_cq(nvmeq, &start, &end);
spin_unlock(&nvmeq->cq_lock);
if (!nvmeq->polled)
local_irq_enable();
- nvme_complete_cqes(nvmeq, start, end);
- return found;
+ return nvme_complete_cqes(nvmeq, start, end);
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
@@ -1414,7 +1412,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
spin_lock_irq(&nvmeq->cq_lock);
- nvme_process_cq(nvmeq, &start, &end, -1);
+ nvme_process_cq(nvmeq, &start, &end);
spin_unlock_irq(&nvmeq->cq_lock);
nvme_complete_cqes(nvmeq, start, end);
@@ -2209,7 +2207,7 @@ static void nvme_del_cq_end(struct request *req, blk_status_t error)
unsigned long flags;
spin_lock_irqsave(&nvmeq->cq_lock, flags);
- nvme_process_cq(nvmeq, &start, &end, -1);
+ nvme_process_cq(nvmeq, &start, &end);
spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
nvme_complete_cqes(nvmeq, start, end);
--
2.17.1
next prev parent reply other threads:[~2018-11-13 15:42 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-13 15:42 [PATCHSET v2 0/11] Various block optimizations Jens Axboe
2018-11-13 15:42 ` [PATCH 01/11] nvme: don't disable local ints for polled queue Jens Axboe
2018-11-13 15:42 ` [PATCH 02/11] block: add queue_is_mq() helper Jens Axboe
2018-11-14 15:23 ` Christoph Hellwig
2018-11-13 15:42 ` [PATCH 03/11] blk-mq: embed blk_mq_ops directly in the request queue Jens Axboe
2018-11-13 15:42 ` [PATCH 04/11] blk-rq-qos: inline check for q->rq_qos functions Jens Axboe
2018-11-13 15:42 ` [PATCH 05/11] block: avoid ordered task state change for polled IO Jens Axboe
2018-11-14 2:29 ` jianchao.wang
2018-11-14 2:35 ` Jens Axboe
2018-11-13 15:42 ` [PATCH 06/11] block: add polled wakeup task helper Jens Axboe
2018-11-13 15:52 ` Keith Busch
2018-11-13 15:58 ` Jens Axboe
2018-11-13 16:59 ` Jens Axboe
2018-11-13 15:42 ` [PATCH 07/11] block: have ->poll_fn() return number of entries polled Jens Axboe
2018-11-13 15:42 ` Jens Axboe [this message]
2018-11-13 15:42 ` [PATCH 09/11] block: make blk_poll() take a parameter on whether to spin or not Jens Axboe
2018-11-13 15:42 ` [PATCH 10/11] block: for async O_DIRECT, mark us as polling if asked to Jens Axboe
2018-11-13 15:42 ` [PATCH 11/11] block: don't plug for aio/O_DIRECT HIPRI IO Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181113154233.15256-9-axboe@kernel.dk \
--to=axboe@kernel.dk \
--cc=linux-block@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).