From: Tejun Heo <tj@kernel.org>
To: jaxboe@fusionio.com, linux-fsdevel@vger.kernel.org,
linux-scsi@vger.kernel.org, linux-ide@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-raid@vger.kernel.org,
hch@lst.de, James.
Cc: Tejun Heo <tj@kernel.org>, Christoph Hellwig <hch@infradead.org>,
Nick Piggin <npiggin@kernel.dk>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jeremy Fitzhardinge <jeremy@xensource.com>,
Chris Wright <chrisw@sous-sol.org>
Subject: [PATCH 02/11] block: kill QUEUE_ORDERED_BY_TAG
Date: Thu, 12 Aug 2010 14:41:22 +0200 [thread overview]
Message-ID: <1281616891-5691-3-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1281616891-5691-1-git-send-email-tj@kernel.org>
Nobody is making meaningful use of ORDERED_BY_TAG now and queue
draining for barrier requests will be removed soon which will render
the advantage of tag ordering moot. Kill ORDERED_BY_TAG. The
following users are affected.
* brd: converted to ORDERED_DRAIN.
* virtio_blk: ORDERED_TAG path was already marked deprecated. Removed.
* xen-blkfront: ORDERED_TAG case dropped.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Chris Wright <chrisw@sous-sol.org>
---
block/blk-barrier.c | 35 +++++++----------------------------
drivers/block/brd.c | 2 +-
drivers/block/virtio_blk.c | 9 ---------
drivers/block/xen-blkfront.c | 8 +++-----
drivers/scsi/sd.c | 4 +---
include/linux/blkdev.h | 17 +----------------
6 files changed, 13 insertions(+), 62 deletions(-)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f0faefc..c807e9c 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,10 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered)
if (ordered != QUEUE_ORDERED_NONE &&
ordered != QUEUE_ORDERED_DRAIN &&
ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
- ordered != QUEUE_ORDERED_DRAIN_FUA &&
- ordered != QUEUE_ORDERED_TAG &&
- ordered != QUEUE_ORDERED_TAG_FLUSH &&
- ordered != QUEUE_ORDERED_TAG_FUA) {
+ ordered != QUEUE_ORDERED_DRAIN_FUA) {
printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
return -EINVAL;
}
@@ -155,21 +152,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
- if (!blk_rq_sectors(rq)) {
+ if (!blk_rq_sectors(rq))
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
- /*
- * Empty barrier on a write-through device w/ ordered
- * tag has no command to issue and without any command
- * to issue, ordering by tag can't be used. Drain
- * instead.
- */
- if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
- !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
- q->ordered &= ~QUEUE_ORDERED_BY_TAG;
- q->ordered |= QUEUE_ORDERED_BY_DRAIN;
- }
- }
/* stash away the original request */
blk_dequeue_request(rq);
@@ -210,7 +195,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
} else
skip |= QUEUE_ORDSEQ_PREFLUSH;
- if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
+ if (queue_in_flight(q))
rq = NULL;
else
skip |= QUEUE_ORDSEQ_DRAIN;
@@ -257,16 +242,10 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
return true;
- if (q->ordered & QUEUE_ORDERED_BY_TAG) {
- /* Ordered by tag. Blocking the next barrier is enough. */
- if (is_barrier && rq != &q->bar_rq)
- *rqp = NULL;
- } else {
- /* Ordered by draining. Wait for turn. */
- WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
- if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
- *rqp = NULL;
- }
+ /* Ordered by draining. Wait for turn. */
+ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+ if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+ *rqp = NULL;
return true;
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 1c7f637..47a4127 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -482,7 +482,7 @@ static struct brd_device *brd_alloc(int i)
if (!brd->brd_queue)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
- blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
+ blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_DRAIN);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2aafafc..7965280 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -395,15 +395,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
* to implement write barrier support.
*/
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
- } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
- /*
- * If the BARRIER feature is supported the host expects us
- * to order request by tags. This implies there is not
- * volatile write cache on the host, and that the host
- * never re-orders outstanding I/O. This feature is not
- * useful for real life scenarious and deprecated.
- */
- blk_queue_ordered(q, QUEUE_ORDERED_TAG);
} else {
/*
* If the FLUSH feature is not supported we must assume that
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 510ab86..25ffbf9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -424,8 +424,7 @@ static int xlvbd_barrier(struct blkfront_info *info)
const char *barrier;
switch (info->feature_barrier) {
- case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break;
- case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break;
+ case QUEUE_ORDERED_DRAIN: barrier = "enabled"; break;
case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
default: return -EINVAL;
}
@@ -1078,8 +1077,7 @@ static void blkfront_connect(struct blkfront_info *info)
* we're dealing with a very old backend which writes
* synchronously; draining will do what needs to get done.
*
- * If there are barriers, then we can do full queued writes
- * with tagged barriers.
+ * If there are barriers, then we use flush.
*
* If barriers are not supported, then there's no much we can
* do, so just set ordering to NONE.
@@ -1087,7 +1085,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err)
info->feature_barrier = QUEUE_ORDERED_DRAIN;
else if (barrier)
- info->feature_barrier = QUEUE_ORDERED_TAG;
+ info->feature_barrier = QUEUE_ORDERED_DRAIN_FLUSH;
else
info->feature_barrier = QUEUE_ORDERED_NONE;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8e2e893..05a15b0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2151,9 +2151,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
/*
* We now have all cache related info, determine how we deal
- * with ordered requests. Note that as the current SCSI
- * dispatch function can alter request order, we cannot use
- * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
+ * with ordered requests.
*/
if (sdkp->WCE)
ordered = sdkp->DPOFUA
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 89c855c..96ef5f1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -469,12 +469,7 @@ enum {
* DRAIN : ordering by draining is enough
* DRAIN_FLUSH : ordering by draining w/ pre and post flushes
* DRAIN_FUA : ordering by draining w/ pre flush and FUA write
- * TAG : ordering by tag is enough
- * TAG_FLUSH : ordering by tag w/ pre and post flushes
- * TAG_FUA : ordering by tag w/ pre flush and FUA write
*/
- QUEUE_ORDERED_BY_DRAIN = 0x01,
- QUEUE_ORDERED_BY_TAG = 0x02,
QUEUE_ORDERED_DO_PREFLUSH = 0x10,
QUEUE_ORDERED_DO_BAR = 0x20,
QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
@@ -482,8 +477,7 @@ enum {
QUEUE_ORDERED_NONE = 0x00,
- QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
- QUEUE_ORDERED_DO_BAR,
+ QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_DO_BAR,
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH,
@@ -491,15 +485,6 @@ enum {
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_FUA,
- QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
- QUEUE_ORDERED_DO_BAR,
- QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
- QUEUE_ORDERED_DO_PREFLUSH |
- QUEUE_ORDERED_DO_POSTFLUSH,
- QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
- QUEUE_ORDERED_DO_PREFLUSH |
- QUEUE_ORDERED_DO_FUA,
-
/*
* Ordered operation sequence
*/
--
1.7.1
next prev parent reply other threads:[~2010-08-12 12:41 UTC|newest]
Thread overview: 109+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-08-12 12:41 [PATCHSET block#for-2.6.36-post] block: replace barrier with sequenced flush Tejun Heo
2010-08-12 12:41 ` [PATCH 01/11] block/loop: queue ordered mode should be DRAIN_FLUSH Tejun Heo
2010-08-12 12:41 ` Tejun Heo [this message]
2010-08-13 12:56 ` [PATCH 02/11] block: kill QUEUE_ORDERED_BY_TAG Vladislav Bolkhovitin
2010-08-13 13:06 ` Christoph Hellwig
2010-08-12 12:41 ` [PATCH 03/11] block: deprecate barrier and replace blk_queue_ordered() with blk_queue_flush() Tejun Heo
2010-08-14 1:07 ` Jeremy Fitzhardinge
2010-08-14 9:42 ` hch
2010-08-16 20:38 ` Jeremy Fitzhardinge
2010-08-12 12:41 ` [PATCH 04/11] block: remove spurious uses of REQ_HARDBARRIER Tejun Heo
2010-08-12 12:41 ` [PATCH 05/11] block: misc cleanups in barrier code Tejun Heo
2010-08-12 12:41 ` [PATCH 06/11] block: drop barrier ordering by queue draining Tejun Heo
2010-08-12 12:41 ` [PATCH 07/11] block: rename blk-barrier.c to blk-flush.c Tejun Heo
2010-08-12 12:41 ` [PATCH 08/11] block: rename barrier/ordered to flush Tejun Heo
2010-08-17 13:26 ` Christoph Hellwig
2010-08-17 16:23 ` Tejun Heo
2010-08-17 17:08 ` Christoph Hellwig
2010-08-18 6:23 ` Tejun Heo
2010-08-12 12:41 ` [PATCH 09/11] block: implement REQ_FLUSH/FUA based interface for FLUSH/FUA requests Tejun Heo
2010-08-12 12:41 ` [PATCH 10/11] fs, block: propagate REQ_FLUSH/FUA interface to upper layers Tejun Heo
2010-08-12 21:24 ` Jan Kara
2010-08-13 7:19 ` Tejun Heo
2010-08-13 7:47 ` Christoph Hellwig
2010-08-16 16:33 ` [PATCH UPDATED " Tejun Heo
2010-08-12 12:41 ` [PATCH 11/11] block: use REQ_FLUSH in blkdev_issue_flush() Tejun Heo
2010-08-13 11:48 ` [PATCHSET block#for-2.6.36-post] block: replace barrier with sequenced flush Christoph Hellwig
2010-08-13 13:48 ` Tejun Heo
2010-08-13 14:38 ` Christoph Hellwig
2010-08-13 14:51 ` Tejun Heo
2010-08-14 10:36 ` Christoph Hellwig
2010-08-17 9:59 ` Tejun Heo
2010-08-17 13:19 ` Christoph Hellwig
2010-08-17 16:41 ` Tejun Heo
2010-08-17 16:59 ` Christoph Hellwig
2010-08-18 6:35 ` Tejun Heo
2010-08-18 8:11 ` Tejun Heo
2010-08-20 8:26 ` Kiyoshi Ueda
2010-08-23 12:14 ` Tejun Heo
2010-08-23 14:17 ` Mike Snitzer
2010-08-24 10:24 ` Kiyoshi Ueda
2010-08-24 16:59 ` Tejun Heo
2010-08-24 17:52 ` Mike Snitzer
2010-08-24 18:14 ` Tejun Heo
2010-08-25 8:00 ` Kiyoshi Ueda
2010-08-25 15:28 ` Mike Snitzer
2010-08-27 9:47 ` Kiyoshi Ueda
2010-08-27 13:49 ` Mike Snitzer
2010-08-30 6:13 ` Kiyoshi Ueda
2010-09-01 0:55 ` safety of retrying SYNCHRONIZE CACHE [was: Re: [PATCHSET block#for-2.6.36-post] block: replace barrier with sequenced flush] Mike Snitzer
2010-09-01 7:32 ` Hannes Reinecke
2010-09-01 7:38 ` Hannes Reinecke
2010-08-25 15:59 ` [RFC] training mpath to discern between SCSI errors (was: Re: [PATCHSET block#for-2.6.36-post] block: replace barrier with sequenced flush) Mike Snitzer
2010-08-25 19:15 ` [RFC] training mpath to discern between SCSI errors Mike Christie
2010-08-30 11:38 ` Hannes Reinecke
2010-08-30 12:07 ` Sergei Shtylyov
2010-08-30 12:39 ` Hannes Reinecke
2010-08-30 14:52 ` [dm-devel] " Hannes Reinecke
2010-10-18 8:09 ` Jun'ichi Nomura
2010-10-18 11:55 ` Hannes Reinecke
2010-10-19 4:03 ` Jun'ichi Nomura
2010-11-19 3:11 ` [dm-devel] " Malahal Naineni
2010-11-30 22:59 ` Mike Snitzer
2010-12-07 23:16 ` [RFC PATCH 0/3] differentiate between I/O errors Mike Snitzer
2010-12-07 23:16 ` [RFC PATCH v2 1/3] scsi: Detailed " Mike Snitzer
2010-12-07 23:16 ` [RFC PATCH v2 2/3] dm mpath: propagate target errors immediately Mike Snitzer
2010-12-07 23:16 ` [RFC PATCH 3/3] block: improve detail in I/O error messages Mike Snitzer
2010-12-08 11:28 ` Sergei Shtylyov
2010-12-08 15:05 ` [PATCH v2 " Mike Snitzer
2010-12-10 23:40 ` [RFC PATCH 0/3] differentiate between I/O errors Malahal Naineni
2011-01-14 1:15 ` Mike Snitzer
2010-12-17 9:47 ` training mpath to discern between SCSI errors Hannes Reinecke
2010-12-17 14:06 ` Mike Snitzer
2011-01-14 1:09 ` Mike Snitzer
2011-01-14 7:45 ` Hannes Reinecke
2011-01-14 13:59 ` Mike Snitzer
2010-08-24 17:11 ` [PATCHSET block#for-2.6.36-post] block: replace barrier with sequenced flush Vladislav Bolkhovitin
2010-08-24 23:14 ` Alan Cox
2010-08-13 12:55 ` Vladislav Bolkhovitin
2010-08-13 13:17 ` Christoph Hellwig
2010-08-18 19:29 ` Vladislav Bolkhovitin
2010-08-13 13:21 ` Tejun Heo
2010-08-18 19:30 ` Vladislav Bolkhovitin
2010-08-19 9:51 ` Tejun Heo
2010-08-30 9:54 ` Hannes Reinecke
2010-08-30 20:34 ` Vladislav Bolkhovitin
2010-08-18 9:46 ` Christoph Hellwig
2010-08-19 9:57 ` Tejun Heo
2010-08-19 10:20 ` Christoph Hellwig
2010-08-19 10:22 ` Tejun Heo
2010-08-20 13:22 ` Christoph Hellwig
2010-08-20 15:18 ` Ric Wheeler
2010-08-20 16:00 ` Chris Mason
2010-08-20 16:02 ` Ric Wheeler
2010-08-23 12:30 ` Tejun Heo
2010-08-23 12:48 ` Christoph Hellwig
2010-08-23 13:58 ` Ric Wheeler
2010-08-23 14:01 ` Jens Axboe
2010-08-23 14:08 ` Christoph Hellwig
2010-08-23 14:13 ` Tejun Heo
2010-08-23 14:19 ` Christoph Hellwig
2010-08-25 11:31 ` Jens Axboe
2010-08-30 10:04 ` Hannes Reinecke
2010-08-23 15:19 ` Ric Wheeler
2010-08-23 16:45 ` Sergey Vlasov
2010-08-23 16:49 ` [dm-devel] " Ric Wheeler
2010-08-23 12:36 ` Tejun Heo
2010-08-23 14:05 ` Christoph Hellwig
2010-08-23 14:15 ` [PATCH] block: simplify queue_next_fseq Christoph Hellwig
2010-08-23 16:28 ` OT grammar nit " John Robinson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1281616891-5691-3-git-send-email-tj@kernel.org \
--to=tj@kernel.org \
--cc=chrisw@sous-sol.org \
--cc=hch@infradead.org \
--cc=hch@lst.de \
--cc=jaxboe@fusionio.com \
--cc=jeremy@xensource.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-ide@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=mst@redhat.com \
--cc=npiggin@kernel.dk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).