From: Mike Snitzer <snitzer@redhat.com>
To: axboe@kernel.dk, dm-devel@redhat.com
Cc: linux-block@vger.kernel.org
Subject: [PATCH v2 2/6] dm rq: add DM_MAPIO_DELAY_REQUEUE to delay requeue of blk-mq requests
Date: Wed, 14 Sep 2016 12:29:32 -0400 [thread overview]
Message-ID: <1473870576-54331-3-git-send-email-snitzer@redhat.com> (raw)
In-Reply-To: <1473870576-54331-1-git-send-email-snitzer@redhat.com>
Otherwise blk-mq will immediately dispatch requests that are requeued
via a BLK_MQ_RQ_QUEUE_BUSY return from blk_mq_ops .queue_rq.
Delayed requeue is implemented using blk_mq_delay_kick_requeue_list()
with a delay of 5 secs. In the context of DM multipath (all paths down)
it doesn't make any sense to requeue more quickly.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
---
drivers/md/dm-rq.c | 32 ++++++++++++++++++--------------
include/linux/device-mapper.h | 1 +
2 files changed, 19 insertions(+), 14 deletions(-)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 0d301d5..9eebc8d 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -336,20 +336,21 @@ static void dm_old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void dm_mq_requeue_request(struct request *rq)
+static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
struct request_queue *q = rq->q;
unsigned long flags;
blk_mq_requeue_request(rq);
+
spin_lock_irqsave(q->queue_lock, flags);
if (!blk_queue_stopped(q))
- blk_mq_kick_requeue_list(q);
+ blk_mq_delay_kick_requeue_list(q, msecs_to_jiffies(msecs));
spin_unlock_irqrestore(q->queue_lock, flags);
}
static void dm_requeue_original_request(struct mapped_device *md,
- struct request *rq)
+ struct request *rq, bool delay_requeue)
{
int rw = rq_data_dir(rq);
@@ -359,7 +360,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
if (!rq->q->mq_ops)
dm_old_requeue_request(rq);
else
- dm_mq_requeue_request(rq);
+ dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
rq_completed(md, rw, false);
}
@@ -389,7 +390,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
return;
else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */
- dm_requeue_original_request(tio->md, tio->orig);
+ dm_requeue_original_request(tio->md, tio->orig, false);
else {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -629,8 +630,8 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
/*
* Returns:
- * 0 : the request has been processed
- * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * DM_MAPIO_* : the request has been processed as indicated
+ * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
* < 0 : the request was completed due to failure
*/
static int map_request(struct dm_rq_target_io *tio, struct request *rq,
@@ -643,6 +644,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
if (tio->clone) {
clone = tio->clone;
r = ti->type->map_rq(ti, clone, &tio->info);
+ if (r == DM_MAPIO_DELAY_REQUEUE)
+ return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
} else {
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
if (r < 0) {
@@ -650,9 +653,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
dm_kill_unmapped_request(rq, r);
return r;
}
- if (r != DM_MAPIO_REMAPPED)
- return r;
- if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ if (r == DM_MAPIO_REMAPPED &&
+ setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
@@ -671,7 +673,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
- dm_requeue_original_request(md, tio->orig);
+ break;
+ case DM_MAPIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(md, tio->orig, true);
break;
default:
if (r > 0) {
@@ -681,10 +686,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
/* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, r);
- return r;
}
- return 0;
+ return r;
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
@@ -727,7 +731,7 @@ static void map_tio_request(struct kthread_work *work)
struct mapped_device *md = tio->md;
if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(md, rq);
+ dm_requeue_original_request(md, rq, false);
}
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 91acfce..ef7962e 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
+#define DM_MAPIO_DELAY_REQUEUE 3
#define dm_sector_div64(x, y)( \
{ \
--
2.7.4 (Apple Git-66)
next prev parent reply other threads:[~2016-09-14 16:29 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-14 16:29 [PATCH for-4.9 v2 0/6] [PATCH for-4.9 v2 0/3] allow delayed requeue of blk-mq requests Mike Snitzer
2016-09-14 16:29 ` [PATCH v2 1/6] blk-mq: introduce blk_mq_delay_kick_requeue_list() Mike Snitzer
2016-09-14 16:34 ` Jens Axboe
2016-09-14 17:28 ` [PATCH v3 " Mike Snitzer
2016-09-14 17:49 ` Jens Axboe
2016-09-15 6:10 ` [PATCH v2 " Hannes Reinecke
2016-09-14 16:29 ` Mike Snitzer [this message]
2016-09-15 6:14 ` [PATCH v2 2/6] dm rq: add DM_MAPIO_DELAY_REQUEUE to delay requeue of blk-mq requests Hannes Reinecke
2016-09-15 12:54 ` Mike Snitzer
2016-09-14 16:29 ` [PATCH v2 3/6] dm rq: reduce arguments passed to map_request() and dm_requeue_original_request() Mike Snitzer
2016-09-15 6:15 ` Hannes Reinecke
2016-09-14 16:29 ` [PATCH v2 4/6] dm rq: introduce dm_mq_kick_requeue_list() Mike Snitzer
2016-09-15 6:16 ` Hannes Reinecke
2016-09-14 16:29 ` [PATCH v2 5/6] dm mpath: use dm_mq_kick_requeue_list() Mike Snitzer
2016-09-15 6:16 ` Hannes Reinecke
2016-09-14 16:29 ` [PATCH v2 6/6] dm mpath: delay the requeue of blk-mq requests while all paths down Mike Snitzer
2016-09-15 6:18 ` Hannes Reinecke
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1473870576-54331-3-git-send-email-snitzer@redhat.com \
--to=snitzer@redhat.com \
--cc=axboe@kernel.dk \
--cc=dm-devel@redhat.com \
--cc=linux-block@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).