From: Christoph Hellwig <hch@lst.de>
To: Ming Lei <ming.lei@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>, Jens Axboe <axboe@kernel.dk>,
linux-block@vger.kernel.org, John Garry <john.garry@huawei.com>,
Bart Van Assche <bvanassche@acm.org>,
Hannes Reinecke <hare@suse.com>,
Thomas Gleixner <tglx@linutronix.de>,
will@kernel.org, peterz@infradead.org, paulmck@kernel.org
Subject: Re: [PATCH V8 07/11] blk-mq: stop to handle IO and drain IO before hctx becomes inactive
Date: Sat, 25 Apr 2020 17:48:32 +0200 [thread overview]
Message-ID: <20200425154832.GA16004@lst.de> (raw)
In-Reply-To: <20200425095351.GC495669@T590>
FYI, here is what I think we should be doing (but the memory model
experts please correct me):
- just drop the direct_issue flag and check for the CPU, which is
cheap enough
- replace the raw_smp_processor_id with a get_cpu to make sure we
don't hit the tiny migration windows
- a bunch of random cleanups to make the code easier to read, mostly
by being more self-documenting or improving the comments.
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bfa4020256ae9..da749865f6eed 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1049,28 +1049,16 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
atomic_inc(&data.hctx->nr_active);
}
data.hctx->tags->rqs[rq->tag] = rq;
- return true;
-}
-
-static bool blk_mq_get_driver_tag(struct request *rq, bool direct_issue)
-{
- if (rq->tag != -1)
- return true;
- if (!__blk_mq_get_driver_tag(rq))
- return false;
/*
- * Add one memory barrier in case that direct issue IO process is
- * migrated to other CPU which may not belong to this hctx, so we can
- * order driver tag assignment and checking BLK_MQ_S_INACTIVE.
- * Otherwise, barrier() is enough given both setting BLK_MQ_S_INACTIVE
- * and driver tag assignment are run on the same CPU in case that
- * BLK_MQ_S_INACTIVE is set.
+ * Ensure updates to rq->tag and tags->rqs[] are seen by
+ * blk_mq_tags_inflight_rqs. This pairs with the smp_mb__after_atomic
+ * in blk_mq_hctx_notify_offline. This only matters in case a process
+ * gets migrated to another CPU that is not mapped to this hctx.
*/
- if (unlikely(direct_issue && rq->mq_ctx->cpu != raw_smp_processor_id()))
+ if (rq->mq_ctx->cpu != get_cpu())
smp_mb();
- else
- barrier();
+ put_cpu();
if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &rq->mq_hctx->state))) {
blk_mq_put_driver_tag(rq);
@@ -1079,6 +1067,13 @@ static bool blk_mq_get_driver_tag(struct request *rq, bool direct_issue)
return true;
}
+static bool blk_mq_get_driver_tag(struct request *rq)
+{
+ if (rq->tag != -1)
+ return true;
+ return __blk_mq_get_driver_tag(rq);
+}
+
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
int flags, void *key)
{
@@ -1125,7 +1120,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
* Don't clear RESTART here, someone else could have set it.
* At most this will cost an extra queue run.
*/
- return blk_mq_get_driver_tag(rq, false);
+ return blk_mq_get_driver_tag(rq);
}
wait = &hctx->dispatch_wait;
@@ -1151,7 +1146,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
* allocation failure and adding the hardware queue to the wait
* queue.
*/
- ret = blk_mq_get_driver_tag(rq, false);
+ ret = blk_mq_get_driver_tag(rq);
if (!ret) {
spin_unlock(&hctx->dispatch_wait_lock);
spin_unlock_irq(&wq->lock);
@@ -1252,7 +1247,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
break;
}
- if (!blk_mq_get_driver_tag(rq, false)) {
+ if (!blk_mq_get_driver_tag(rq)) {
/*
* The initial allocation attempt failed, so we need to
* rerun the hardware queue when a tag is freed. The
@@ -1284,7 +1279,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bd.last = true;
else {
nxt = list_first_entry(list, struct request, queuelist);
- bd.last = !blk_mq_get_driver_tag(nxt, false);
+ bd.last = !blk_mq_get_driver_tag(nxt);
}
ret = q->mq_ops->queue_rq(hctx, &bd);
@@ -1886,7 +1881,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (!blk_mq_get_dispatch_budget(hctx))
goto insert;
- if (!blk_mq_get_driver_tag(rq, true)) {
+ if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
goto insert;
}
@@ -2327,23 +2322,24 @@ static bool blk_mq_inflight_rq(struct request *rq, void *data,
static unsigned blk_mq_tags_inflight_rqs(struct blk_mq_hw_ctx *hctx)
{
struct count_inflight_data count_data = {
- .count = 0,
.hctx = hctx,
};
blk_mq_all_tag_busy_iter(hctx->tags, blk_mq_count_inflight_rq,
blk_mq_inflight_rq, &count_data);
-
return count_data.count;
}
-static void blk_mq_hctx_drain_inflight_rqs(struct blk_mq_hw_ctx *hctx)
+static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
+ struct blk_mq_hw_ctx *hctx)
{
- while (1) {
- if (!blk_mq_tags_inflight_rqs(hctx))
- break;
- msleep(5);
- }
+ if (!cpumask_test_cpu(cpu, hctx->cpumask))
+ return false;
+ if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
+ return false;
+ if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
+ return false;
+ return true;
}
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
@@ -2351,25 +2347,19 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
- if (!cpumask_test_cpu(cpu, hctx->cpumask))
- return 0;
-
- if ((cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) ||
- (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids))
+ if (!blk_mq_last_cpu_in_hctx(cpu, hctx))
return 0;
/*
- * The current CPU is the last one in this hctx, S_INACTIVE
- * can be observed in dispatch path without any barrier needed,
- * cause both are run on one same CPU.
+ * Order setting BLK_MQ_S_INACTIVE versus checking rq->tag and rqs[tag],
+ * in blk_mq_tags_inflight_rqs. It pairs with the smp_mb() in
+ * blk_mq_get_driver_tag.
*/
set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
- /*
- * Order setting BLK_MQ_S_INACTIVE and checking rq->tag & rqs[tag],
- * and its pair is the smp_mb() in blk_mq_get_driver_tag
- */
smp_mb__after_atomic();
- blk_mq_hctx_drain_inflight_rqs(hctx);
+
+ while (blk_mq_tags_inflight_rqs(hctx))
+ msleep(5);
return 0;
}
next prev parent reply other threads:[~2020-04-25 15:48 UTC|newest]
Thread overview: 80+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-24 10:23 [PATCH V8 00/11] blk-mq: improvement CPU hotplug Ming Lei
2020-04-24 10:23 ` [PATCH V8 01/11] block: clone nr_integrity_segments and write_hint in blk_rq_prep_clone Ming Lei
2020-04-24 10:32 ` Christoph Hellwig
2020-04-24 12:43 ` Hannes Reinecke
2020-04-24 16:11 ` Martin K. Petersen
2020-04-24 10:23 ` [PATCH V8 02/11] block: add helper for copying request Ming Lei
2020-04-24 10:35 ` Christoph Hellwig
2020-04-24 12:43 ` Hannes Reinecke
2020-04-24 16:12 ` Martin K. Petersen
2020-04-24 10:23 ` [PATCH V8 03/11] blk-mq: mark blk_mq_get_driver_tag as static Ming Lei
2020-04-24 12:44 ` Hannes Reinecke
2020-04-24 16:13 ` Martin K. Petersen
2020-04-24 10:23 ` [PATCH V8 04/11] blk-mq: assign rq->tag in blk_mq_get_driver_tag Ming Lei
2020-04-24 10:35 ` Christoph Hellwig
2020-04-24 13:02 ` Hannes Reinecke
2020-04-25 2:54 ` Ming Lei
2020-04-25 18:26 ` Hannes Reinecke
2020-04-24 10:23 ` [PATCH V8 05/11] blk-mq: support rq filter callback when iterating rqs Ming Lei
2020-04-24 13:17 ` Hannes Reinecke
2020-04-25 3:04 ` Ming Lei
2020-04-24 10:23 ` [PATCH V8 06/11] blk-mq: prepare for draining IO when hctx's all CPUs are offline Ming Lei
2020-04-24 13:23 ` Hannes Reinecke
2020-04-25 3:24 ` Ming Lei
2020-04-24 10:23 ` [PATCH V8 07/11] blk-mq: stop to handle IO and drain IO before hctx becomes inactive Ming Lei
2020-04-24 10:38 ` Christoph Hellwig
2020-04-25 3:17 ` Ming Lei
2020-04-25 8:32 ` Christoph Hellwig
2020-04-25 9:34 ` Ming Lei
2020-04-25 9:53 ` Ming Lei
2020-04-25 15:48 ` Christoph Hellwig [this message]
2020-04-26 2:06 ` Ming Lei
2020-04-26 8:19 ` John Garry
2020-04-27 15:36 ` Christoph Hellwig
2020-04-28 1:10 ` Ming Lei
2020-04-27 19:03 ` Paul E. McKenney
2020-04-28 6:54 ` Christoph Hellwig
2020-04-28 15:58 ` Peter Zijlstra
2020-04-29 2:16 ` Ming Lei
2020-04-29 8:07 ` Will Deacon
2020-04-29 9:46 ` Ming Lei
2020-04-29 12:27 ` Will Deacon
2020-04-29 13:43 ` Ming Lei
2020-04-29 17:34 ` Will Deacon
2020-04-30 0:39 ` Ming Lei
2020-04-30 11:04 ` Will Deacon
2020-04-30 14:02 ` Ming Lei
2020-05-05 15:46 ` Christoph Hellwig
2020-05-06 1:24 ` Ming Lei
2020-05-06 7:28 ` Will Deacon
2020-05-06 8:07 ` Ming Lei
2020-05-06 9:56 ` Will Deacon
2020-05-06 10:22 ` Ming Lei
2020-04-29 17:46 ` Paul E. McKenney
2020-04-30 0:43 ` Ming Lei
2020-04-24 13:27 ` Hannes Reinecke
2020-04-25 3:30 ` Ming Lei
2020-04-24 13:42 ` John Garry
2020-04-25 3:41 ` Ming Lei
2020-04-24 10:23 ` [PATCH V8 08/11] block: add blk_end_flush_machinery Ming Lei
2020-04-24 10:41 ` Christoph Hellwig
2020-04-25 3:44 ` Ming Lei
2020-04-25 8:11 ` Christoph Hellwig
2020-04-25 9:51 ` Ming Lei
2020-04-24 13:47 ` Hannes Reinecke
2020-04-25 3:47 ` Ming Lei
2020-04-24 10:23 ` [PATCH V8 09/11] blk-mq: add blk_mq_hctx_handle_dead_cpu for handling cpu dead Ming Lei
2020-04-24 10:42 ` Christoph Hellwig
2020-04-25 3:48 ` Ming Lei
2020-04-24 13:48 ` Hannes Reinecke
2020-04-24 10:23 ` [PATCH V8 10/11] blk-mq: re-submit IO in case that hctx is inactive Ming Lei
2020-04-24 10:44 ` Christoph Hellwig
2020-04-25 3:52 ` Ming Lei
2020-04-24 13:55 ` Hannes Reinecke
2020-04-25 3:59 ` Ming Lei
2020-04-24 10:23 ` [PATCH V8 11/11] block: deactivate hctx when the hctx is actually inactive Ming Lei
2020-04-24 10:43 ` Christoph Hellwig
2020-04-24 13:56 ` Hannes Reinecke
2020-04-24 15:23 ` [PATCH V8 00/11] blk-mq: improvement CPU hotplug Jens Axboe
2020-04-24 15:40 ` Christoph Hellwig
2020-04-24 15:41 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200425154832.GA16004@lst.de \
--to=hch@lst.de \
--cc=axboe@kernel.dk \
--cc=bvanassche@acm.org \
--cc=hare@suse.com \
--cc=john.garry@huawei.com \
--cc=linux-block@vger.kernel.org \
--cc=ming.lei@redhat.com \
--cc=paulmck@kernel.org \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).