From: Tejun Heo <tj@kernel.org>
To: jaxboe@fusionio.com, linux-kernel@vger.kernel.org,
linux-fsdevel@vger.kernel.org, linux-scsi@vger.kernel.org,
linux-ide@vger.kernel.org, linux-raid@vger.kernel.org, hch@l
Cc: Tejun Heo <tj@kernel.org>
Subject: [PATCH 06/30] block: misc cleanups in barrier code
Date: Wed, 25 Aug 2010 17:47:23 +0200 [thread overview]
Message-ID: <1282751267-3530-7-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1282751267-3530-1-git-send-email-tj@kernel.org>
Make the following cleanups in preparation of barrier/flush update.
* blk_do_ordered() declaration is moved from include/linux/blkdev.h to
block/blk.h.
* blk_do_ordered() now returns pointer to struct request, with %NULL
meaning "try the next request" and ERR_PTR(-EAGAIN) "try again
later". The third case will be dropped with further changes.
* In the initialization of proxy barrier request, data direction is
already set by init_request_from_bio(). Drop unnecessary explicit
REQ_WRITE setting and move init_request_from_bio() above REQ_FUA
flag setting.
* add_request() is collapsed into __make_request().
These changes don't make any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
block/blk-barrier.c | 32 ++++++++++++++------------------
block/blk-core.c | 21 ++++-----------------
block/blk.h | 7 +++++--
include/linux/blkdev.h | 1 -
4 files changed, 23 insertions(+), 38 deletions(-)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index ed0aba5..f1be85b 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -110,9 +110,9 @@ static void queue_flush(struct request_queue *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
-static inline bool start_ordered(struct request_queue *q, struct request **rqp)
+static inline struct request *start_ordered(struct request_queue *q,
+ struct request *rq)
{
- struct request *rq = *rqp;
unsigned skip = 0;
q->orderr = 0;
@@ -149,11 +149,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
/* initialize proxy request and queue it */
blk_rq_init(q, rq);
- if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
- rq->cmd_flags |= REQ_WRITE;
+ init_request_from_bio(rq, q->orig_bar_rq->bio);
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
- init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
@@ -171,27 +169,26 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
else
skip |= QUEUE_ORDSEQ_DRAIN;
- *rqp = rq;
-
/*
* Complete skipped sequences. If whole sequence is complete,
- * return false to tell elevator that this request is gone.
+ * return %NULL to tell elevator that this request is gone.
*/
- return !blk_ordered_complete_seq(q, skip, 0);
+ if (blk_ordered_complete_seq(q, skip, 0))
+ rq = NULL;
+ return rq;
}
-bool blk_do_ordered(struct request_queue *q, struct request **rqp)
+struct request *blk_do_ordered(struct request_queue *q, struct request *rq)
{
- struct request *rq = *rqp;
const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
(rq->cmd_flags & REQ_HARDBARRIER);
if (!q->ordseq) {
if (!is_barrier)
- return true;
+ return rq;
if (q->next_ordered != QUEUE_ORDERED_NONE)
- return start_ordered(q, rqp);
+ return start_ordered(q, rq);
else {
/*
* Queue ordering not supported. Terminate
@@ -199,8 +196,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
*/
blk_dequeue_request(rq);
__blk_end_request_all(rq, -EOPNOTSUPP);
- *rqp = NULL;
- return false;
+ return NULL;
}
}
@@ -211,14 +207,14 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
/* Special requests are not subject to ordering rules. */
if (rq->cmd_type != REQ_TYPE_FS &&
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
- return true;
+ return rq;
/* Ordered by draining. Wait for turn. */
WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
- *rqp = NULL;
+ rq = ERR_PTR(-EAGAIN);
- return true;
+ return rq;
}
static void bio_end_empty_barrier(struct bio *bio, int err)
diff --git a/block/blk-core.c b/block/blk-core.c
index f063541..f8d37a8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1037,22 +1037,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_insert_request);
-/*
- * add-request adds a request to the linked list.
- * queue lock is held and interrupts disabled, as we muck with the
- * request queue list.
- */
-static inline void add_request(struct request_queue *q, struct request *req)
-{
- drive_stat_acct(req, 1);
-
- /*
- * elevator indicated where it wants this request to be
- * inserted at elevator_merge time
- */
- __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
-}
-
static void part_round_stats_single(int cpu, struct hd_struct *part,
unsigned long now)
{
@@ -1316,7 +1300,10 @@ get_rq:
req->cpu = blk_cpu_to_group(smp_processor_id());
if (queue_should_plug(q) && elv_queue_empty(q))
blk_plug_device(q);
- add_request(q, req);
+
+ /* insert the request into the elevator */
+ drive_stat_acct(req, 1);
+ __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
out:
if (unplug || !queue_should_plug(q))
__generic_unplug_device(q);
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87..874eb4e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq)
*/
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+struct request *blk_do_ordered(struct request_queue *q, struct request *rq);
+
static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
@@ -58,8 +60,9 @@ static inline struct request *__elv_next_request(struct request_queue *q)
while (1) {
while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
- if (blk_do_ordered(q, &rq))
- return rq;
+ rq = blk_do_ordered(q, rq);
+ if (rq)
+ return !IS_ERR(rq) ? rq : NULL;
}
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e97911d..996549d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -869,7 +869,6 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern bool blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
--
1.7.1
next prev parent reply other threads:[~2010-08-25 15:47 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-08-25 15:47 [PATCHSET 2.6.36-rc2] block, fs: replace HARDBARRIER with FLUSH/FUA Tejun Heo
2010-08-25 15:47 ` [PATCH 01/30] ide: remove unnecessary blk_queue_flushing() test in do_ide_request() Tejun Heo
2010-08-25 15:47 ` [PATCH 02/30] block/loop: queue ordered mode should be DRAIN_FLUSH Tejun Heo
2010-08-25 15:47 ` [PATCH 03/30] block: kill QUEUE_ORDERED_BY_TAG Tejun Heo
2010-08-25 15:47 ` [PATCH 04/30] block: deprecate barrier and replace blk_queue_ordered() with blk_queue_flush() Tejun Heo
2010-08-30 15:37 ` Boaz Harrosh
2010-08-25 15:47 ` [PATCH 05/30] block: remove spurious uses of REQ_HARDBARRIER Tejun Heo
2010-08-25 15:47 ` Tejun Heo [this message]
2010-08-25 15:47 ` [PATCH 07/30] block: drop barrier ordering by queue draining Tejun Heo
2010-08-25 15:47 ` [PATCH 08/30] block: rename blk-barrier.c to blk-flush.c Tejun Heo
2010-08-25 15:47 ` [PATCH 09/30] block: rename barrier/ordered to flush Tejun Heo
2010-08-25 15:47 ` [PATCH 10/30] block: implement REQ_FLUSH/FUA based interface for FLUSH/FUA requests Tejun Heo
2010-08-25 15:47 ` [PATCH 11/30] block: filter flush bio's in __generic_make_request() Tejun Heo
2010-08-25 15:47 ` [PATCH 12/30] block: use REQ_FLUSH in blkdev_issue_flush() Tejun Heo
2010-08-25 15:47 ` [PATCH 13/30] block: simplify queue_next_fseq Tejun Heo
2010-08-25 15:47 ` [PATCH 14/30] block/loop: implement REQ_FLUSH/FUA support Tejun Heo
2010-08-25 15:47 ` [PATCH 15/30] virtio_blk: drop REQ_HARDBARRIER support Tejun Heo
2010-08-25 15:47 ` [PATCH 16/30] lguest: replace VIRTIO_F_BARRIER support with VIRTIO_F_FLUSH support Tejun Heo
2010-08-25 15:47 ` [PATCH 17/30] md: implment REQ_FLUSH/FUA support Tejun Heo
2010-08-25 15:47 ` [PATCH 18/30] block: pass gfp_mask and flags to sb_issue_discard Tejun Heo
2010-08-25 15:47 ` [PATCH 19/30] xfs: replace barriers with explicit flush / FUA usage Tejun Heo
2010-08-25 15:47 ` [PATCH 20/30] btrfs: " Tejun Heo
2010-08-25 15:47 ` [PATCH 21/30] gfs2: " Tejun Heo
2010-08-25 15:47 ` [PATCH 22/30] reiserfs: " Tejun Heo
2010-08-25 15:47 ` [PATCH 23/30] nilfs2: " Tejun Heo
2010-08-25 15:47 ` [PATCH 24/30] jbd: " Tejun Heo
2010-08-25 15:47 ` [PATCH 25/30] jbd2: " Tejun Heo
2010-08-25 15:47 ` [PATCH 26/30] ext4: do not send discards as barriers Tejun Heo
2010-08-25 15:58 ` Christoph Hellwig
2010-08-25 16:00 ` Christoph Hellwig
2010-08-25 15:57 ` Tejun Heo
2010-08-25 20:02 ` Jan Kara
2010-08-26 8:25 ` Tejun Heo
2010-08-27 17:31 ` Jan Kara
2010-08-30 19:56 ` Jeff Moyer
2010-08-30 20:20 ` Jan Kara
2010-08-30 20:24 ` Ric Wheeler
2010-08-30 20:39 ` Vladislav Bolkhovitin
2010-08-30 21:02 ` Jan Kara
2010-08-31 9:55 ` Boaz Harrosh
2010-09-02 18:46 ` Vladislav Bolkhovitin
2010-08-30 21:01 ` Jeff Moyer
2010-08-31 8:11 ` Tejun Heo
2010-08-31 10:07 ` Boaz Harrosh
2010-08-31 10:13 ` Tejun Heo
2010-08-31 10:27 ` Boaz Harrosh
2010-09-09 22:53 ` Jan Kara
2010-08-25 15:47 ` [PATCH 27/30] fat: " Tejun Heo
2010-08-25 15:47 ` [PATCH 28/30] swap: " Tejun Heo
2010-08-25 15:47 ` [PATCH 29/30] block: remove the BLKDEV_IFL_BARRIER flag Tejun Heo
2010-08-25 15:59 ` Christoph Hellwig
2010-08-25 15:47 ` [PATCH 30/30] block: remove the BH_Eopnotsupp flag Tejun Heo
2010-08-25 16:03 ` [PATCHSET 2.6.36-rc2] block, fs: replace HARDBARRIER with FLUSH/FUA Mike Snitzer
2010-08-26 8:23 ` [PATCH 24.5/30] jbd2: Modify ASYNC_COMMIT code to not rely on queue draining on barrier Tejun Heo
2010-08-26 9:33 ` Sergei Shtylyov
2010-08-26 9:37 ` [PATCH UPDATED " Tejun Heo
2010-09-06 11:15 ` [PATCH " Andreas Dilger
2010-09-06 11:40 ` Jan Kara
2010-08-26 9:54 ` [PATCH] block: update documentation for REQ_FLUSH / REQ_FUA Christoph Hellwig
2010-08-27 9:18 ` Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1282751267-3530-7-git-send-email-tj@kernel.org \
--to=tj@kernel.org \
--cc=dm-devel@redhat.com \
--cc=hch@l \
--cc=jaxboe@fusionio.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-ide@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).