From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@fb.com>
Cc: Mike Snitzer <snitzer@redhat.com>,
Junichi Nomura <j-nomura@ce.jp.nec.com>,
linux-block@vger.kernel.org, linux-scsi@vger.kernel.org,
linux-raid@vger.kernel.org, dm-devel@redhat.com
Subject: [PATCH 01/18] block: add a op_is_flush helper
Date: Wed, 25 Jan 2017 18:25:09 +0100 [thread overview]
Message-ID: <1485365126-23210-2-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1485365126-23210-1-git-send-email-hch@lst.de>
This centralizes the checks for bios that needs to be go into the flush
state machine.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-core.c | 8 ++++----
block/blk-mq-sched.c | 5 ++---
block/blk-mq.c | 4 ++--
drivers/md/bcache/request.c | 2 +-
drivers/md/dm-cache-target.c | 13 +++----------
drivers/md/dm-thin.c | 13 +++++--------
include/linux/blk_types.h | 9 +++++++++
7 files changed, 26 insertions(+), 28 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index a61f140..b830e14 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1035,7 +1035,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
- if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
+ if (op_is_flush(bio->bi_opf))
return false;
return true;
@@ -1641,7 +1641,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
+ if (op_is_flush(bio->bi_opf)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
@@ -2145,7 +2145,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/
BUG_ON(blk_queued_rq(rq));
- if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
+ if (op_is_flush(rq->cmd_flags))
where = ELEVATOR_INSERT_FLUSH;
add_acct_request(q, rq, where);
@@ -3256,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* rq is already accounted, so use raw insert
*/
- if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
+ if (op_is_flush(rq->cmd_flags))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index d05061f..3bd66e5 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -111,7 +111,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct request *rq;
- const bool is_flush = op & (REQ_PREFLUSH | REQ_FUA);
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
@@ -126,7 +125,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
* Flush requests are special and go directly to the
* dispatch list.
*/
- if (!is_flush && e->type->ops.mq.get_request) {
+ if (!op_is_flush(op) && e->type->ops.mq.get_request) {
rq = e->type->ops.mq.get_request(q, op, data);
if (rq)
rq->rq_flags |= RQF_QUEUED;
@@ -138,7 +137,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
}
if (rq) {
- if (!is_flush) {
+ if (!op_is_flush(op)) {
rq->elv.icq = NULL;
if (e && e->type->icq_cache)
blk_mq_sched_assign_ioc(q, rq, bio);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ee69e5e..e229f8a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1378,7 +1378,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
- const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
+ const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data;
struct request *rq;
unsigned int request_count = 0, srcu_idx;
@@ -1498,7 +1498,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
- const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
+ const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_plug *plug;
unsigned int request_count = 0;
struct blk_mq_alloc_data data;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 76d2087..01035e7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
- s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
+ s->iop.flush_journal = op_is_flush(bio->bi_opf);
s->iop.wq = bcache_wq;
return s;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index e04c61e..5b9cf56 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
spin_lock_irqsave(&cache->lock, flags);
- if (cache->need_tick_bio &&
- !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
+ if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
return to_oblock(block_nr);
}
-static int bio_triggers_commit(struct cache *cache, struct bio *bio)
-{
- return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
-}
-
/*
* You must increment the deferred set whilst the prison cell is held. To
* encourage this, we ask for 'cell' to be passed in.
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- if (!bio_triggers_commit(cache, bio)) {
+ if (!op_is_flush(bio->bi_opf)) {
accounted_request(cache, bio);
return;
}
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
- return bio_op(bio) == REQ_OP_DISCARD ||
- bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
+ return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c1..110982d 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
- return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
+ return op_is_flush(bio->bi_opf) &&
dm_thin_changed_this_transaction(tc->td);
}
@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
- bio_op(bio) == REQ_OP_DISCARD)
+ if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
inc_all_io_entry(info->tc->pool, bio);
@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
- bio_op(bio) == REQ_OP_DISCARD))
+ if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
+ bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
- bio_op(bio) == REQ_OP_DISCARD) {
+ if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0e5b1cd..37c9a43 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -221,6 +221,15 @@ static inline bool op_is_write(unsigned int op)
}
/*
+ * Check if the bio or request is one that needs special treatment in the
+ * flush state machine.
+ */
+static inline bool op_is_flush(unsigned int op)
+{
+ return op & (REQ_FUA | REQ_PREFLUSH);
+}
+
+/*
* Reads are always treated as synchronous, as are requests with the FUA or
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
--
2.1.4
next prev parent reply other threads:[~2017-01-25 17:25 UTC|newest]
Thread overview: 99+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-01-25 17:25 split scsi passthrough fields out of struct request V2 Christoph Hellwig
2017-01-25 17:25 ` Christoph Hellwig [this message]
2017-01-26 2:58 ` [PATCH 01/18] block: add a op_is_flush helper Martin K. Petersen
2017-01-26 22:38 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 02/18] md: cleanup bio op / flags handling in raid1_write_request Christoph Hellwig
2017-01-26 2:59 ` Martin K. Petersen
2017-01-26 23:18 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 03/18] block: fix elevator init check Christoph Hellwig
2017-01-26 3:01 ` Martin K. Petersen
2017-01-26 23:21 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 04/18] block: simplify blk_init_allocated_queue Christoph Hellwig
2017-01-26 3:02 ` Martin K. Petersen
2017-01-26 23:27 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 05/18] block: allow specifying size for extra command data Christoph Hellwig
2017-01-26 3:15 ` Martin K. Petersen
2017-01-27 16:12 ` Christoph Hellwig
2017-01-27 17:21 ` Bart Van Assche
2017-01-27 17:26 ` Jens Axboe
2017-01-27 17:30 ` Bart Van Assche
2017-01-27 17:33 ` Jens Axboe
2017-01-25 17:25 ` [PATCH 06/18] dm: remove incomple BLOCK_PC support Christoph Hellwig
2017-01-27 17:32 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 07/18] dm: always defer request allocation to the owner of the request_queue Christoph Hellwig
2017-01-27 16:34 ` Mike Snitzer
2017-01-27 16:36 ` Christoph Hellwig
2017-01-27 16:44 ` Mike Snitzer
2017-01-25 17:25 ` [PATCH 08/18] scsi_dh_rdac: switch to scsi_execute_req_flags() Christoph Hellwig
2017-01-26 3:18 ` Martin K. Petersen
2017-01-25 17:25 ` [PATCH 09/18] scsi_dh_emc: " Christoph Hellwig
2017-01-26 3:19 ` Martin K. Petersen
2017-01-25 17:25 ` [PATCH 10/18] scsi_dh_hp_sw: " Christoph Hellwig
2017-01-26 3:20 ` Martin K. Petersen
2017-01-25 17:25 ` [PATCH 11/18] scsi: remove gfp_flags member in scsi_host_cmd_pool Christoph Hellwig
2017-01-26 3:21 ` Martin K. Petersen
2017-01-27 17:38 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 12/18] scsi: respect unchecked_isa_dma for blk-mq Christoph Hellwig
2017-01-26 3:23 ` Martin K. Petersen
2017-01-27 17:45 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 13/18] scsi: remove scsi_cmd_dma_pool Christoph Hellwig
2017-01-26 3:24 ` Martin K. Petersen
2017-01-27 17:51 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 14/18] scsi: remove __scsi_alloc_queue Christoph Hellwig
2017-01-26 3:25 ` Martin K. Petersen
2017-01-27 17:58 ` Bart Van Assche
2017-01-28 8:23 ` hch
2017-01-25 17:25 ` [PATCH 15/18] scsi: allocate scsi_cmnd structures as part of struct request Christoph Hellwig
2017-01-26 3:30 ` Martin K. Petersen
2017-01-27 18:39 ` Bart Van Assche
2017-01-28 8:25 ` hch
2017-01-25 17:25 ` [PATCH 16/18] block/bsg: move queue creation into bsg_setup_queue Christoph Hellwig
2017-01-27 18:48 ` Bart Van Assche
2017-01-25 17:25 ` [PATCH 17/18] block: split scsi_request out of struct request Christoph Hellwig
2017-01-25 17:25 ` [PATCH 18/18] block: don't assign cmd_flags in __blk_rq_prep_clone Christoph Hellwig
2017-01-26 3:31 ` Martin K. Petersen
2017-01-26 18:29 ` split scsi passthrough fields out of struct request V2 Bart Van Assche
2017-01-26 18:44 ` Jens Axboe
2017-01-26 18:52 ` Bart Van Assche
2017-01-26 18:57 ` Jens Axboe
2017-01-26 18:59 ` hch
2017-01-26 19:01 ` Jens Axboe
2017-01-26 20:47 ` [dm-devel] " Bart Van Assche
2017-01-26 20:54 ` Jens Axboe
2017-01-26 21:01 ` [dm-devel] " Bart Van Assche
2017-01-26 21:12 ` Jens Axboe
2017-01-26 21:47 ` Bart Van Assche
2017-01-26 21:51 ` Jens Axboe
2017-01-26 23:14 ` [dm-devel] " Bart Van Assche
2017-01-26 23:26 ` Jens Axboe
2017-01-26 23:47 ` Bart Van Assche
2017-01-26 23:50 ` [dm-devel] " Jens Axboe
2017-01-27 0:33 ` Jens Axboe
2017-01-27 0:38 ` [dm-devel] " Bart Van Assche
2017-01-27 0:41 ` Jens Axboe
2017-01-27 1:15 ` Bart Van Assche
2017-01-27 1:22 ` Jens Axboe
2017-01-27 6:40 ` [dm-devel] " Jens Axboe
2017-01-27 8:04 ` Jens Axboe
2017-01-27 16:52 ` Bart Van Assche
2017-01-27 16:56 ` Jens Axboe
2017-01-27 17:03 ` Bart Van Assche
2017-01-31 1:12 ` [dm-devel] " Bart Van Assche
2017-01-31 1:38 ` Jens Axboe
2017-01-31 4:13 ` Jens Axboe
2017-01-31 21:35 ` [dm-devel] " Bart Van Assche
2017-01-31 21:55 ` Bart Van Assche
2017-01-31 21:58 ` Jens Axboe
2017-01-27 17:02 ` Bart Van Assche
2017-01-27 16:11 ` Jens Axboe
2017-01-27 16:17 ` Christoph Hellwig
2017-01-27 16:21 ` Jens Axboe
2017-01-27 16:23 ` Christoph Hellwig
2017-01-27 16:27 ` Jens Axboe
2017-01-27 16:34 ` Christoph Hellwig
2017-01-27 16:38 ` Jens Axboe
2017-01-27 16:42 ` Christoph Hellwig
2017-01-27 16:58 ` Jens Axboe
2017-01-27 21:27 ` Bart Van Assche
2017-01-28 8:29 ` hch
2017-01-30 6:58 ` Hannes Reinecke
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1485365126-23210-2-git-send-email-hch@lst.de \
--to=hch@lst.de \
--cc=axboe@fb.com \
--cc=dm-devel@redhat.com \
--cc=j-nomura@ce.jp.nec.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=snitzer@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).