* Re: [PATCH Linux 2.6.12-rc6-mm1 01/06] blk: implement generic dispatch queue
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
@ 2005-06-16 4:56 ` Tejun Heo
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 02/06] blk: update noop iosched to use " Tejun Heo
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 4:56 UTC (permalink / raw)
To: axboe; +Cc: linux-kernel
01_blk_dispatch_queue.patch
Implements generic dispatch queue which can replace all
dispatch queues implemented by each iosched. This reduces
code duplication, eases enforcing semantics over dispatch
queue, and simplifies specific ioscheds.
Signed-off-by: Tejun Heo <htejun@gmail.com>
drivers/block/elevator.c | 245 ++++++++++++++++++++++++++++++----------------
drivers/block/ll_rw_blk.c | 26 ++--
include/linux/blkdev.h | 20 ++-
include/linux/elevator.h | 16 +--
4 files changed, 192 insertions(+), 115 deletions(-)
Index: blk-fixes/drivers/block/elevator.c
===================================================================
--- blk-fixes.orig/drivers/block/elevator.c 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/drivers/block/elevator.c 2005-06-16 13:55:37.000000000 +0900
@@ -37,18 +37,14 @@
#include <asm/uaccess.h>
-/*
- * XXX HACK XXX Before entering elevator callbacks, we temporailiy
- * turn off REQ_CMD of proxy barrier request so that elevators don't
- * try to account it as a normal one. This ugliness can go away once
- * generic dispatch queue is implemented. - tj
- */
-#define bar_rq_hack_start(q) ((q)->bar_rq && ((q)->bar_rq->flags &= ~REQ_CMD))
-#define bar_rq_hack_end(q) ((q)->bar_rq && ((q)->bar_rq->flags |= REQ_CMD))
-
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
+static inline sector_t rq_last_sector(struct request *rq)
+{
+ return rq->sector + rq->nr_sectors;
+}
+
static inline void elv_inc_inflight(request_queue_t *q)
{
q->in_flight++;
@@ -171,6 +167,9 @@ static int elevator_attach(request_queue
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
q->elevator = eq;
+ q->last_sector = 0;
+ q->boundary_rq = 0;
+ q->max_back_kb = 0;
if (eq->ops->elevator_init_fn)
ret = eq->ops->elevator_init_fn(q, eq);
@@ -249,6 +248,45 @@ void elevator_exit(elevator_t *e)
kfree(e);
}
+/*
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
+ * appended to the dispatch queue. To be used by specific elevators.
+ */
+void elv_dispatch_insert(request_queue_t *q, struct request *rq, int sort)
+{
+ sector_t boundary;
+ unsigned max_back;
+ struct list_head *entry;
+
+ if (!sort) {
+ /* Specific elevator is performing sort. Step away. */
+ q->last_sector = rq_last_sector(rq);
+ q->boundary_rq = rq;
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ return;
+ }
+
+ boundary = q->last_sector;
+ max_back = q->max_back_kb * 2;
+ boundary = boundary > max_back ? boundary - max_back : 0;
+
+ list_for_each_prev(entry, &q->queue_head) {
+ struct request *pos = list_entry_rq(entry);
+
+ if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
+ break;
+ if (rq->sector >= boundary && pos->sector < boundary)
+ continue;
+ if (rq->sector >= pos->sector)
+ break;
+ if (rq->sector < boundary && pos->sector >= boundary)
+ break;
+ }
+
+ list_add(&rq->queuelist, entry);
+}
+
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
@@ -279,13 +317,7 @@ void elv_merge_requests(request_queue_t
e->ops->elevator_merge_req_fn(q, rq, next);
}
-/*
- * For careful internal use by the block layer. Essentially the same as
- * a requeue in that it tells the io scheduler that this request is not
- * active in the driver or hardware anymore, but we don't want the request
- * added back to the scheduler. Function is not exported.
- */
-void elv_deactivate_request(request_queue_t *q, struct request *rq)
+void elv_requeue_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -293,75 +325,103 @@ void elv_deactivate_request(request_queu
* it already went through dequeue, we need to decrement the
* in_flight count again
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
elv_dec_inflight(q);
-
- rq->flags &= ~REQ_STARTED;
-
- if (e->ops->elevator_deactivate_req_fn) {
- bar_rq_hack_start(q);
- e->ops->elevator_deactivate_req_fn(q, rq);
- bar_rq_hack_end(q);
+ if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
+ e->ops->elevator_deactivate_req_fn(q, rq);
}
-}
-
-void elv_requeue_request(request_queue_t *q, struct request *rq)
-{
- elv_deactivate_request(q, rq);
- /*
- * the request is prepped and may have some resources allocated.
- * allowing unprepped requests to pass this one may cause resource
- * deadlock. turn on softbarrier.
- */
- rq->flags |= REQ_SOFTBARRIER;
+ rq->flags &= ~REQ_STARTED;
- /*
- * if iosched has an explicit requeue hook, then use that. otherwise
- * just put the request at the front of the queue
- */
- if (q->elevator->ops->elevator_requeue_req_fn) {
- bar_rq_hack_start(q);
- q->elevator->ops->elevator_requeue_req_fn(q, rq);
- bar_rq_hack_end(q);
- } else
- __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
}
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug)
{
- /*
- * barriers implicitly indicate back insertion
- */
- if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) &&
- where == ELEVATOR_INSERT_SORT)
- where = ELEVATOR_INSERT_BACK;
+ if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+ /*
+ * barriers implicitly indicate back insertion
+ */
+ if (where == ELEVATOR_INSERT_SORT)
+ where = ELEVATOR_INSERT_BACK;
+
+ /*
+ * this request is scheduling boundary, update last_sector
+ */
+ if (blk_fs_request(rq)) {
+ q->last_sector = rq_last_sector(rq);
+ q->boundary_rq = rq;
+ }
+ }
if (plug)
blk_plug_device(q);
rq->q = q;
- if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
- bar_rq_hack_start(q);
- q->elevator->ops->elevator_add_req_fn(q, rq, where);
- bar_rq_hack_end(q);
-
- if (blk_queue_plugged(q)) {
- int nrq = q->rq.count[READ] + q->rq.count[WRITE]
- - q->in_flight;
-
- if (nrq >= q->unplug_thresh)
- __generic_unplug_device(q);
- }
- } else
+ if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
/*
* if drain is set, store the request "locally". when the drain
* is finished, the requests will be handed ordered to the io
* scheduler
*/
list_add_tail(&rq->queuelist, &q->drain_list);
+ return;
+ }
+
+ switch (where) {
+ case ELEVATOR_INSERT_FRONT:
+ /*
+ * don't let dispatch sorting pass front-inserted requests.
+ */
+ rq->flags |= REQ_SOFTBARRIER;
+
+ list_add(&rq->queuelist, &q->queue_head);
+ break;
+
+ case ELEVATOR_INSERT_BACK:
+ /*
+ * don't let dispatch sorting pass back-inserted requests.
+ */
+ rq->flags |= REQ_SOFTBARRIER;
+
+ while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+ ;
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ /*
+ * We kick the queue here for the following reasons.
+ * - The elevator might have returned NULL previously
+ * to delay requests and returned them now. As the
+ * queue wasn't empty before this request, ll_rw_blk
+ * won't run the queue on return, resulting in hang.
+ * - Usually, back inserted requests won't be merged
+ * with anything. There's no point in delaying queue
+ * processing.
+ */
+ blk_remove_plug(q);
+ q->request_fn(q);
+ break;
+
+ case ELEVATOR_INSERT_SORT:
+ BUG_ON(!blk_fs_request(rq));
+ rq->flags |= REQ_SORTED;
+ q->elevator->ops->elevator_add_req_fn(q, rq);
+ break;
+
+ default:
+ printk(KERN_ERR "%s: bad insertion point %d\n",
+ __FUNCTION__, where);
+ BUG();
+ }
+
+ if (blk_queue_plugged(q)) {
+ int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+ - q->in_flight;
+
+ if (nrq >= q->unplug_thresh)
+ __generic_unplug_device(q);
+ }
}
void elv_add_request(request_queue_t *q, struct request *rq, int where,
@@ -377,10 +437,17 @@ void elv_add_request(request_queue_t *q,
static inline struct request *__elv_next_request(request_queue_t *q)
{
struct request *rq;
- while ((rq = q->elevator->ops->elevator_next_req_fn(q)))
- if (blk_do_ordered(q, &rq))
- break;
- return rq;
+
+ while (1) {
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ if (blk_do_ordered(q, &rq))
+ return rq;
+ }
+
+ if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
}
struct request *elv_next_request(request_queue_t *q)
@@ -399,6 +466,11 @@ struct request *elv_next_request(request
if (rq == q->last_merge)
q->last_merge = NULL;
+ if (!q->boundary_rq || q->boundary_rq == rq) {
+ q->last_sector = rq_last_sector(rq);
+ q->boundary_rq = NULL;
+ }
+
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
@@ -409,9 +481,9 @@ struct request *elv_next_request(request
/*
* the request may have been (partially) prepped.
* we need to keep this request in the front to
- * avoid resource deadlock. turn on softbarrier.
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
*/
- rq->flags |= REQ_SOFTBARRIER;
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
@@ -434,10 +506,14 @@ struct request *elv_next_request(request
return rq;
}
-void elv_remove_request(request_queue_t *q, struct request *rq)
+void elv_dequeue_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = q->elevator;
+ BUG_ON(list_empty(&rq->queuelist));
+
+ list_del_init(&rq->queuelist);
+
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
@@ -445,8 +521,11 @@ void elv_remove_request(request_queue_t
* driver has seen (REQ_STARTED set), to avoid false accounting
* for request-request merges
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
elv_inc_inflight(q);
+ if (blk_sorted_rq(rq) && e->ops->elevator_activate_req_fn)
+ e->ops->elevator_activate_req_fn(q, rq);
+ }
/*
* the main clearing point for q->last_merge is on retrieval of
@@ -457,22 +536,19 @@ void elv_remove_request(request_queue_t
*/
if (rq == q->last_merge)
q->last_merge = NULL;
-
- if (e->ops->elevator_remove_req_fn) {
- bar_rq_hack_start(q);
- e->ops->elevator_remove_req_fn(q, rq);
- bar_rq_hack_end(q);
- }
}
int elv_queue_empty(request_queue_t *q)
{
elevator_t *e = q->elevator;
+ if (!list_empty(&q->queue_head))
+ return 0;
+
if (e->ops->elevator_queue_empty_fn)
return e->ops->elevator_queue_empty_fn(q);
- return list_empty(&q->queue_head);
+ return 1;
}
struct request *elv_latter_request(request_queue_t *q, struct request *rq)
@@ -544,11 +620,11 @@ void elv_completed_request(request_queue
/*
* request is released from the driver, io must be done
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
elv_dec_inflight(q);
-
- if (e->ops->elevator_completed_req_fn)
- e->ops->elevator_completed_req_fn(q, rq);
+ if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+ e->ops->elevator_completed_req_fn(q, rq);
+ }
}
int elv_register_queue(struct request_queue *q)
@@ -722,11 +798,12 @@ ssize_t elv_iosched_show(request_queue_t
return len;
}
+EXPORT_SYMBOL(elv_dispatch_insert);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_requeue_request);
EXPORT_SYMBOL(elv_next_request);
-EXPORT_SYMBOL(elv_remove_request);
+EXPORT_SYMBOL(elv_dequeue_request);
EXPORT_SYMBOL(elv_queue_empty);
EXPORT_SYMBOL(elv_completed_request);
EXPORT_SYMBOL(elevator_exit);
Index: blk-fixes/drivers/block/ll_rw_blk.c
===================================================================
--- blk-fixes.orig/drivers/block/ll_rw_blk.c 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/drivers/block/ll_rw_blk.c 2005-06-16 13:55:37.000000000 +0900
@@ -519,16 +519,19 @@ void blk_ordered_complete_seq(request_qu
static void pre_flush_end_io(struct request *rq, int error)
{
+ elv_completed_request(rq->q, rq);
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
}
static void bar_end_io(struct request *rq, int error)
{
+ elv_completed_request(rq->q, rq);
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
}
static void post_flush_end_io(struct request *rq, int error)
{
+ elv_completed_request(rq->q, rq);
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}
@@ -1239,6 +1242,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags)
static char *rq_flags[] = {
"REQ_RW",
"REQ_FAILFAST",
+ "REQ_SORTED",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_FUA",
@@ -2556,6 +2560,8 @@ static void __blk_put_request(request_qu
if (unlikely(--req->ref_count))
return;
+ elv_completed_request(q, req);
+
req->rq_status = RQ_INACTIVE;
req->rl = NULL;
@@ -2566,8 +2572,6 @@ static void __blk_put_request(request_qu
if (rl) {
int rw = rq_data_dir(req);
- elv_completed_request(q, req);
-
BUG_ON(!list_empty(&req->queuelist));
blk_free_request(q, req);
@@ -2577,18 +2581,12 @@ static void __blk_put_request(request_qu
void blk_put_request(struct request *req)
{
- /*
- * if req->rl isn't set, this request didnt originate from the
- * block layer, so it's safe to just disregard it
- */
- if (req->rl) {
- unsigned long flags;
- request_queue_t *q = req->q;
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_put_request(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ unsigned long flags;
+ request_queue_t *q = req->q;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_put_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_put_request);
Index: blk-fixes/include/linux/blkdev.h
===================================================================
--- blk-fixes.orig/include/linux/blkdev.h 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/include/linux/blkdev.h 2005-06-16 13:55:37.000000000 +0900
@@ -203,6 +203,7 @@ struct request {
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
+ __REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_FUA, /* forced unit access */
@@ -233,6 +234,7 @@ enum rq_flag_bits {
#define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
+#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
@@ -326,6 +328,13 @@ struct request_queue
prepare_flush_fn *prepare_flush_fn;
/*
+ * Dispatch queue sorting
+ */
+ sector_t last_sector;
+ struct request *boundary_rq;
+ unsigned int max_back_kb;
+
+ /*
* Auto-unplugging state
*/
struct timer_list unplug_timer;
@@ -486,14 +495,14 @@ enum {
#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST)
#define blk_rq_started(rq) ((rq)->flags & REQ_STARTED)
-#define blk_account_rq(rq) \
- (blk_rq_started(rq) && blk_fs_request(rq) && rq != rq->q->bar_rq)
+#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME)
#define blk_pm_request(rq) \
((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
+#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
@@ -648,12 +657,7 @@ extern void end_request(struct request *
static inline void blkdev_dequeue_request(struct request *req)
{
- BUG_ON(list_empty(&req->queuelist));
-
- list_del_init(&req->queuelist);
-
- if (req->rl)
- elv_remove_request(req->q, req);
+ elv_dequeue_request(req->q, req);
}
/*
Index: blk-fixes/include/linux/elevator.h
===================================================================
--- blk-fixes.orig/include/linux/elevator.h 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/include/linux/elevator.h 2005-06-16 13:55:37.000000000 +0900
@@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (re
typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
-typedef struct request *(elevator_next_req_fn) (request_queue_t *);
+typedef int (elevator_dispatch_fn) (request_queue_t *, int);
-typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int);
+typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_queue_empty_fn) (request_queue_t *);
-typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
-typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *);
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
+typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
@@ -31,10 +30,9 @@ struct elevator_ops
elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_next_req_fn *elevator_next_req_fn;
+ elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn;
- elevator_remove_req_fn *elevator_remove_req_fn;
- elevator_requeue_req_fn *elevator_requeue_req_fn;
+ elevator_activate_req_fn *elevator_activate_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
elevator_queue_empty_fn *elevator_queue_empty_fn;
@@ -81,15 +79,15 @@ struct elevator_queue
/*
* block elevator interface
*/
+extern void elv_dispatch_insert(request_queue_t *, struct request *, int);
extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
extern void elv_merged_request(request_queue_t *, struct request *);
-extern void elv_remove_request(request_queue_t *, struct request *);
+extern void elv_dequeue_request(request_queue_t *, struct request *);
extern void elv_requeue_request(request_queue_t *, struct request *);
-extern void elv_deactivate_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *);
extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(request_queue_t *, struct request *);
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH Linux 2.6.12-rc6-mm1 02/06] blk: update noop iosched to use generic dispatch queue
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 01/06] blk: implement generic dispatch queue Tejun Heo
@ 2005-06-16 4:56 ` Tejun Heo
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 03/06] blk: update cfq " Tejun Heo
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 4:56 UTC (permalink / raw)
To: axboe; +Cc: linux-kernel
02_blk_dispatch_queue_noop.patch
Update noop iosched to use generic dispatch queue
Signed-off-by: Tejun Heo <htejun@gmail.com>
noop-iosched.c | 17 +++++------------
1 files changed, 5 insertions(+), 12 deletions(-)
Index: blk-fixes/drivers/block/noop-iosched.c
===================================================================
--- blk-fixes.orig/drivers/block/noop-iosched.c 2005-06-16 13:55:36.000000000 +0900
+++ blk-fixes/drivers/block/noop-iosched.c 2005-06-16 13:55:38.000000000 +0900
@@ -28,13 +28,9 @@ static void elevator_noop_merge_requests
list_del_init(&next->queuelist);
}
-static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
- int where)
+static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
{
- if (where == ELEVATOR_INSERT_FRONT)
- list_add(&rq->queuelist, &q->queue_head);
- else
- list_add_tail(&rq->queuelist, &q->queue_head);
+ elv_dispatch_insert(q, rq, 0);
/*
* new merges must not precede this barrier
@@ -45,19 +41,16 @@ static void elevator_noop_add_request(re
q->last_merge = rq;
}
-static struct request *elevator_noop_next_request(request_queue_t *q)
+static int elevator_noop_dispatch(request_queue_t *q, int force)
{
- if (!list_empty(&q->queue_head))
- return list_entry_rq(q->queue_head.next);
-
- return NULL;
+ return 0;
}
static struct elevator_type elevator_noop = {
.ops = {
.elevator_merge_fn = elevator_noop_merge,
.elevator_merge_req_fn = elevator_noop_merge_requests,
- .elevator_next_req_fn = elevator_noop_next_request,
+ .elevator_dispatch_fn = elevator_noop_dispatch,
.elevator_add_req_fn = elevator_noop_add_request,
},
.elevator_name = "noop",
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH Linux 2.6.12-rc6-mm1 03/06] blk: update cfq iosched to use generic dispatch queue
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 01/06] blk: implement generic dispatch queue Tejun Heo
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 02/06] blk: update noop iosched to use " Tejun Heo
@ 2005-06-16 4:56 ` Tejun Heo
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 04/06] blk: move last_merge handling into generic elevator code Tejun Heo
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 4:56 UTC (permalink / raw)
To: axboe; +Cc: linux-kernel
03_blk_dispatch_queue_cfq.patch
Update cfq iosched to use generic dispatch queue
Signed-off-by: Tejun Heo <htejun@gmail.com>
cfq-iosched.c | 313 +++++++++++++---------------------------------------------
1 files changed, 72 insertions(+), 241 deletions(-)
Index: blk-fixes/drivers/block/cfq-iosched.c
===================================================================
--- blk-fixes.orig/drivers/block/cfq-iosched.c 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/drivers/block/cfq-iosched.c 2005-06-16 13:55:38.000000000 +0900
@@ -83,7 +83,6 @@ static int cfq_max_depth = 1;
(node)->rb_left = NULL; \
} while (0)
#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
-#define ON_RB(node) ((node)->rb_color != RB_NONE)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
@@ -233,14 +232,11 @@ struct cfq_rq {
struct cfq_queue *cfq_queue;
struct cfq_io_context *io_context;
- unsigned in_flight : 1;
- unsigned accounted : 1;
unsigned is_sync : 1;
- unsigned requeued : 1;
};
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int);
-static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
+static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *, int);
static void cfq_put_cfqd(struct cfq_data *cfqd);
#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
@@ -253,14 +249,6 @@ static inline void cfq_del_crq_hash(stru
hlist_del_init(&crq->hash);
}
-static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
-{
- cfq_del_crq_hash(crq);
-
- if (q->last_merge == crq->request)
- q->last_merge = NULL;
-}
-
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
@@ -305,10 +293,6 @@ cfq_choose_req(struct cfq_data *cfqd, st
return crq2;
if (crq2 == NULL)
return crq1;
- if (crq1->requeued)
- return crq1;
- if (crq2->requeued)
- return crq2;
s1 = crq1->request->sector;
s2 = crq2->request->sector;
@@ -375,10 +359,7 @@ cfq_find_next_crq(struct cfq_data *cfqd,
struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
struct rb_node *rbnext, *rbprev;
- rbnext = NULL;
- if (ON_RB(&last->rb_node))
- rbnext = rb_next(&last->rb_node);
- if (!rbnext) {
+ if (!(rbnext = rb_next(&last->rb_node))) {
rbnext = rb_first(&cfqq->sort_list);
if (rbnext == &last->rb_node)
rbnext = NULL;
@@ -459,13 +440,13 @@ static void cfq_resort_rr_list(struct cf
* the pending list according to last request service
*/
static inline void
-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
BUG_ON(cfqq->on_rr);
cfqq->on_rr = 1;
cfqd->busy_queues++;
- cfq_resort_rr_list(cfqq, requeue);
+ cfq_resort_rr_list(cfqq, 0);
}
static inline void
@@ -485,22 +466,20 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, s
static inline void cfq_del_crq_rb(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = crq->is_sync;
- if (ON_RB(&crq->rb_node)) {
- struct cfq_data *cfqd = cfqq->cfqd;
- const int sync = crq->is_sync;
+ BUG_ON(!cfqq->queued[sync]);
+ cfqq->queued[sync]--;
- BUG_ON(!cfqq->queued[sync]);
- cfqq->queued[sync]--;
+ cfq_update_next_crq(crq);
- cfq_update_next_crq(crq);
+ rb_erase(&crq->rb_node, &cfqq->sort_list);
+ RB_CLEAR_COLOR(&crq->rb_node);
- rb_erase(&crq->rb_node, &cfqq->sort_list);
- RB_CLEAR_COLOR(&crq->rb_node);
+ if (cfqq->on_rr && RB_EMPTY(&cfqq->sort_list))
+ cfq_del_cfqq_rr(cfqd, cfqq);
- if (cfqq->on_rr && RB_EMPTY(&cfqq->sort_list))
- cfq_del_cfqq_rr(cfqd, cfqq);
- }
}
static struct cfq_rq *
@@ -541,12 +520,12 @@ static void cfq_add_crq_rb(struct cfq_rq
* if that happens, put the alias on the dispatch list
*/
while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
- cfq_dispatch_sort(cfqd->queue, __alias);
+ cfq_dispatch_insert(cfqd->queue, __alias, 1);
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
if (!cfqq->on_rr)
- cfq_add_cfqq_rr(cfqd, cfqq, crq->requeued);
+ cfq_add_cfqq_rr(cfqd, cfqq);
/*
* check if this request is a better next-serve candidate
@@ -557,10 +536,8 @@ static void cfq_add_crq_rb(struct cfq_rq
static inline void
cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
- if (ON_RB(&crq->rb_node)) {
- rb_erase(&crq->rb_node, &cfqq->sort_list);
- cfqq->queued[crq->is_sync]--;
- }
+ rb_erase(&crq->rb_node, &cfqq->sort_list);
+ cfqq->queued[crq->is_sync]--;
cfq_add_crq_rb(crq);
}
@@ -590,47 +567,28 @@ out:
return NULL;
}
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+static void cfq_activate_request(request_queue_t *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_rq *crq = RQ_DATA(rq);
-
- if (crq) {
- struct cfq_queue *cfqq = crq->cfq_queue;
- if (crq->accounted) {
- crq->accounted = 0;
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
- }
- if (crq->in_flight) {
- crq->in_flight = 0;
- WARN_ON(!cfqq->in_flight);
- cfqq->in_flight--;
- }
- crq->requeued = 1;
- }
+ cfqd->rq_in_driver++;
}
-/*
- * make sure the service time gets corrected on reissue of this request
- */
-static void cfq_requeue_request(request_queue_t *q, struct request *rq)
+static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
{
- cfq_deactivate_request(q, rq);
- list_add(&rq->queuelist, &q->queue_head);
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ WARN_ON(!cfqd->rq_in_driver);
+ cfqd->rq_in_driver--;
}
-static void cfq_remove_request(request_queue_t *q, struct request *rq)
+static void cfq_remove_request(struct request *rq)
{
struct cfq_rq *crq = RQ_DATA(rq);
- if (crq) {
- list_del_init(&rq->queuelist);
- cfq_del_crq_rb(crq);
- cfq_remove_merge_hints(q, crq);
-
- }
+ list_del_init(&rq->queuelist);
+ cfq_del_crq_rb(crq);
+ cfq_del_crq_hash(crq);
}
static int
@@ -674,7 +632,7 @@ static void cfq_merged_request(request_q
cfq_del_crq_hash(crq);
cfq_add_crq_hash(cfqd, crq);
- if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
+ if (rq_rb_key(req) != crq->rb_key) {
struct cfq_queue *cfqq = crq->cfq_queue;
cfq_update_next_crq(crq);
@@ -697,7 +655,7 @@ cfq_merged_requests(request_queue_t *q,
time_before(next->start_time, rq->start_time))
list_move(&rq->queuelist, &next->queuelist);
- cfq_remove_request(q, next);
+ cfq_remove_request(next);
}
static inline void
@@ -878,52 +836,16 @@ static int cfq_arm_slice_timer(struct cf
return 1;
}
-/*
- * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
- * this function sector sorts the selected request to minimize seeks. we start
- * at cfqd->last_sector, not 0.
- */
-static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
+static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq,
+ int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = crq->cfq_queue;
- struct list_head *head = &q->queue_head, *entry = head;
- struct request *__rq;
- sector_t last;
-
- list_del(&crq->request->queuelist);
-
- last = cfqd->last_sector;
- list_for_each_entry_reverse(__rq, head, queuelist) {
- struct cfq_rq *__crq = RQ_DATA(__rq);
-
- if (blk_barrier_rq(__rq))
- break;
- if (!blk_fs_request(__rq))
- break;
- if (__crq->requeued)
- break;
-
- if (__rq->sector <= crq->request->sector)
- break;
- if (__rq->sector > last && crq->request->sector < last) {
- last = crq->request->sector + crq->request->nr_sectors;
- break;
- }
- entry = &__rq->queuelist;
- }
-
- cfqd->last_sector = last;
cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
-
- cfq_del_crq_rb(crq);
- cfq_remove_merge_hints(q, crq);
-
- crq->in_flight = 1;
- crq->requeued = 0;
+ cfq_remove_request(crq->request);
cfqq->in_flight++;
- list_add_tail(&crq->request->queuelist, entry);
+ elv_dispatch_insert(q, crq->request, force);
}
/*
@@ -1020,7 +942,7 @@ keep_queue:
static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- int max_dispatch)
+ int max_dispatch, int force)
{
int dispatched = 0;
@@ -1038,7 +960,7 @@ __cfq_dispatch_requests(struct cfq_data
/*
* finally, insert request into driver dispatch list
*/
- cfq_dispatch_sort(cfqd->queue, crq);
+ cfq_dispatch_insert(cfqd->queue, crq, force);
cfqd->dispatch_slice++;
dispatched++;
@@ -1073,7 +995,7 @@ __cfq_dispatch_requests(struct cfq_data
}
static int
-cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
+cfq_dispatch_requests(request_queue_t *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
@@ -1083,96 +1005,31 @@ cfq_dispatch_requests(request_queue_t *q
cfqq = cfq_select_queue(cfqd, force);
if (cfqq) {
+ int max_dispatch;
+
+ /*
+ * if idle window is disabled, allow queue buildup
+ */
+ if (!cfqq->idle_window &&
+ cfqd->rq_in_driver >= cfqd->cfq_max_depth)
+ return 0;
+
cfqq->wait_request = 0;
cfqq->must_dispatch = 0;
del_timer(&cfqd->idle_slice_timer);
- if (cfq_class_idle(cfqq))
- max_dispatch = 1;
+ if (force)
+ max_dispatch = INT_MAX;
+ else
+ max_dispatch =
+ cfq_class_idle(cfqq) ? 1 : cfqd->cfq_quantum;
- return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+ return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch, force);
}
return 0;
}
-static inline void cfq_account_dispatch(struct cfq_rq *crq)
-{
- struct cfq_queue *cfqq = crq->cfq_queue;
- struct cfq_data *cfqd = cfqq->cfqd;
-
- if (unlikely(!blk_fs_request(crq->request)))
- return;
-
- /*
- * accounted bit is necessary since some drivers will call
- * elv_next_request() many times for the same request (eg ide)
- */
- if (crq->accounted)
- return;
-
- crq->accounted = 1;
- cfqd->rq_in_driver++;
-}
-
-static inline void
-cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
- struct cfq_data *cfqd = cfqq->cfqd;
- unsigned long now;
-
- if (!crq->accounted)
- return;
-
- now = jiffies;
-
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
-
- if (!cfq_class_idle(cfqq))
- cfqd->last_end_request = now;
-
- if (!cfqq->in_flight && cfqq->on_rr) {
- cfqq->service_last = now;
- cfq_resort_rr_list(cfqq, 0);
- }
-
- if (crq->is_sync)
- crq->io_context->last_end_request = now;
-}
-
-static struct request *cfq_next_request(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct request *rq;
-
- if (!list_empty(&q->queue_head)) {
- struct cfq_rq *crq;
-dispatch:
- rq = list_entry_rq(q->queue_head.next);
-
- crq = RQ_DATA(rq);
- if (crq) {
- /*
- * if idle window is disabled, allow queue buildup
- */
- if (!crq->in_flight && !crq->cfq_queue->idle_window &&
- cfqd->rq_in_driver >= cfqd->cfq_max_depth)
- return NULL;
-
- cfq_remove_merge_hints(q, crq);
- cfq_account_dispatch(crq);
- }
-
- return rq;
- }
-
- if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
- goto dispatch;
-
- return NULL;
-}
-
/*
* task holds one reference to the queue, dropped when task exits. each crq
* in-flight on this queue also holds a reference, dropped when crq is freed.
@@ -1675,8 +1532,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd,
}
}
-static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
+static void cfq_insert_request(request_queue_t *q, struct request *rq)
{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(rq);
struct cfq_queue *cfqq = crq->cfq_queue;
@@ -1696,38 +1554,6 @@ static void cfq_enqueue(struct cfq_data
cfq_crq_enqueued(cfqd, cfqq, crq);
}
-static void
-cfq_insert_request(request_queue_t *q, struct request *rq, int where)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- switch (where) {
- case ELEVATOR_INSERT_BACK:
- while (cfq_dispatch_requests(q, INT_MAX, 1))
- ;
- list_add_tail(&rq->queuelist, &q->queue_head);
- /*
- * If we were idling with pending requests on
- * inactive cfqqs, force dispatching will
- * remove the idle timer and the queue won't
- * be kicked by __make_request() afterward.
- * Kick it here.
- */
- kblockd_schedule_work(&cfqd->unplug_work);
- break;
- case ELEVATOR_INSERT_FRONT:
- list_add(&rq->queuelist, &q->queue_head);
- break;
- case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq));
- cfq_enqueue(cfqd, rq);
- break;
- default:
- printk("%s: bad insert point %d\n", __FUNCTION__,where);
- return;
- }
-}
-
static inline int cfq_pending_requests(struct cfq_data *cfqd)
{
return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
@@ -1743,19 +1569,27 @@ static int cfq_queue_empty(request_queue
static void cfq_completed_request(request_queue_t *q, struct request *rq)
{
struct cfq_rq *crq = RQ_DATA(rq);
- struct cfq_queue *cfqq;
+ struct cfq_queue *cfqq = crq->cfq_queue;
+ struct cfq_data *cfqd = cfqq->cfqd;
+ unsigned long now;
- if (unlikely(!blk_fs_request(rq)))
- return;
+ now = jiffies;
- cfqq = crq->cfq_queue;
+ WARN_ON(!cfqd->rq_in_driver);
+ WARN_ON(!cfqq->in_flight);
+ cfqd->rq_in_driver--;
+ cfqq->in_flight--;
- if (crq->in_flight) {
- WARN_ON(!cfqq->in_flight);
- cfqq->in_flight--;
+ if (!cfq_class_idle(cfqq))
+ cfqd->last_end_request = now;
+
+ if (!cfqq->in_flight && cfqq->on_rr) {
+ cfqq->service_last = now;
+ cfq_resort_rr_list(cfqq, 0);
}
- cfq_account_completion(cfqq, crq);
+ if (crq->is_sync)
+ crq->io_context->last_end_request = now;
}
static struct request *
@@ -1981,9 +1815,7 @@ cfq_set_request(request_queue_t *q, stru
INIT_HLIST_NODE(&crq->hash);
crq->cfq_queue = cfqq;
crq->io_context = cic;
- crq->in_flight = crq->accounted = 0;
crq->is_sync = (rw == READ || process_sync(current));
- crq->requeued = 0;
rq->elevator_private = crq;
return 0;
}
@@ -2426,10 +2258,9 @@ static struct elevator_type iosched_cfq
.elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
- .elevator_next_req_fn = cfq_next_request,
+ .elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
- .elevator_remove_req_fn = cfq_remove_request,
- .elevator_requeue_req_fn = cfq_requeue_request,
+ .elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn = cfq_deactivate_request,
.elevator_queue_empty_fn = cfq_queue_empty,
.elevator_completed_req_fn = cfq_completed_request,
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH Linux 2.6.12-rc6-mm1 04/06] blk: move last_merge handling into generic elevator code
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
` (2 preceding siblings ...)
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 03/06] blk: update cfq " Tejun Heo
@ 2005-06-16 4:56 ` Tejun Heo
2005-06-16 4:57 ` [PATCH Linux 2.6.12-rc6-mm1 05/06] blk: remove last_merge handling from noop iosched Tejun Heo
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 4:56 UTC (permalink / raw)
To: axboe; +Cc: linux-kernel
04_blk_last_merge_consolidation.patch
Currently, both generic elevator code and specific ioscheds
participate in the management and usage of last_merge. This
and the following patches move last_merge handling into
generic elevator code.
Signed-off-by: Tejun Heo <htejun@gmail.com>
drivers/block/elevator.c | 43 ++++++++++++++++++-------------------------
include/linux/elevator.h | 1 -
2 files changed, 18 insertions(+), 26 deletions(-)
Index: blk-fixes/drivers/block/elevator.c
===================================================================
--- blk-fixes.orig/drivers/block/elevator.c 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/drivers/block/elevator.c 2005-06-16 13:55:38.000000000 +0900
@@ -108,15 +108,6 @@ inline int elv_try_merge(struct request
}
EXPORT_SYMBOL(elv_try_merge);
-inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
-{
- if (q->last_merge)
- return elv_try_merge(q->last_merge, bio);
-
- return ELEVATOR_NO_MERGE;
-}
-EXPORT_SYMBOL(elv_try_last_merge);
-
static struct elevator_type *elevator_find(const char *name)
{
struct elevator_type *e = NULL;
@@ -259,6 +250,9 @@ void elv_dispatch_insert(request_queue_t
unsigned max_back;
struct list_head *entry;
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
if (!sort) {
/* Specific elevator is performing sort. Step away. */
q->last_sector = rq_last_sector(rq);
@@ -290,6 +284,15 @@ void elv_dispatch_insert(request_queue_t
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
+ int ret;
+
+ if (q->last_merge) {
+ ret = elv_try_merge(q->last_merge, bio);
+ if (ret != ELEVATOR_NO_MERGE) {
+ *req = q->last_merge;
+ return ret;
+ }
+ }
if (e->ops->elevator_merge_fn)
return e->ops->elevator_merge_fn(q, req, bio);
@@ -303,6 +306,8 @@ void elv_merged_request(request_queue_t
if (e->ops->elevator_merged_fn)
e->ops->elevator_merged_fn(q, rq);
+
+ q->last_merge = rq;
}
void elv_merge_requests(request_queue_t *q, struct request *rq,
@@ -310,11 +315,10 @@ void elv_merge_requests(request_queue_t
{
elevator_t *e = q->elevator;
- if (q->last_merge == next)
- q->last_merge = NULL;
-
if (e->ops->elevator_merge_req_fn)
e->ops->elevator_merge_req_fn(q, rq, next);
+
+ q->last_merge = rq;
}
void elv_requeue_request(request_queue_t *q, struct request *rq)
@@ -407,6 +411,8 @@ void __elv_add_request(request_queue_t *
BUG_ON(!blk_fs_request(rq));
rq->flags |= REQ_SORTED;
q->elevator->ops->elevator_add_req_fn(q, rq);
+ if (q->last_merge == NULL && rq_mergeable(rq))
+ q->last_merge = rq;
break;
default:
@@ -463,9 +469,6 @@ struct request *elv_next_request(request
*/
rq->flags |= REQ_STARTED;
- if (rq == q->last_merge)
- q->last_merge = NULL;
-
if (!q->boundary_rq || q->boundary_rq == rq) {
q->last_sector = rq_last_sector(rq);
q->boundary_rq = NULL;
@@ -526,16 +529,6 @@ void elv_dequeue_request(request_queue_t
if (blk_sorted_rq(rq) && e->ops->elevator_activate_req_fn)
e->ops->elevator_activate_req_fn(q, rq);
}
-
- /*
- * the main clearing point for q->last_merge is on retrieval of
- * request by driver (it calls elv_next_request()), but it _can_
- * also happen here if a request is added to the queue but later
- * deleted without ever being given to driver (merged with another
- * request).
- */
- if (rq == q->last_merge)
- q->last_merge = NULL;
}
int elv_queue_empty(request_queue_t *q)
Index: blk-fixes/include/linux/elevator.h
===================================================================
--- blk-fixes.orig/include/linux/elevator.h 2005-06-16 13:55:37.000000000 +0900
+++ blk-fixes/include/linux/elevator.h 2005-06-16 13:55:38.000000000 +0900
@@ -115,7 +115,6 @@ extern int elevator_init(request_queue_t
extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
extern int elv_try_merge(struct request *, struct bio *);
-extern int elv_try_last_merge(request_queue_t *, struct bio *);
/*
* Return values from elevator merger
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH Linux 2.6.12-rc6-mm1 05/06] blk: remove last_merge handling from noop iosched
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
` (3 preceding siblings ...)
2005-06-16 4:56 ` [PATCH Linux 2.6.12-rc6-mm1 04/06] blk: move last_merge handling into generic elevator code Tejun Heo
@ 2005-06-16 4:57 ` Tejun Heo
2005-06-16 4:57 ` [PATCH Linux 2.6.12-rc6-mm1 06/06] blk: remove last_merge handling from cfq iosched Tejun Heo
2005-06-16 6:19 ` [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 4:57 UTC (permalink / raw)
To: axboe; +Cc: linux-kernel
05_blk_last_merge_consolidation_noop.patch
Remove last_merge handling from noop iosched. This change
removes merging capability of noop iosched.
Signed-off-by: Tejun Heo <htejun@gmail.com>
noop-iosched.c | 31 -------------------------------
1 files changed, 31 deletions(-)
Index: blk-fixes/drivers/block/noop-iosched.c
===================================================================
--- blk-fixes.orig/drivers/block/noop-iosched.c 2005-06-16 13:55:38.000000000 +0900
+++ blk-fixes/drivers/block/noop-iosched.c 2005-06-16 13:55:38.000000000 +0900
@@ -7,38 +7,9 @@
#include <linux/module.h>
#include <linux/init.h>
-/*
- * See if we can find a request that this buffer can be coalesced with.
- */
-static int elevator_noop_merge(request_queue_t *q, struct request **req,
- struct bio *bio)
-{
- int ret;
-
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE)
- *req = q->last_merge;
-
- return ret;
-}
-
-static void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
- struct request *next)
-{
- list_del_init(&next->queuelist);
-}
-
static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
{
elv_dispatch_insert(q, rq, 0);
-
- /*
- * new merges must not precede this barrier
- */
- if (rq->flags & REQ_HARDBARRIER)
- q->last_merge = NULL;
- else if (!q->last_merge)
- q->last_merge = rq;
}
static int elevator_noop_dispatch(request_queue_t *q, int force)
@@ -48,8 +19,6 @@ static int elevator_noop_dispatch(reques
static struct elevator_type elevator_noop = {
.ops = {
- .elevator_merge_fn = elevator_noop_merge,
- .elevator_merge_req_fn = elevator_noop_merge_requests,
.elevator_dispatch_fn = elevator_noop_dispatch,
.elevator_add_req_fn = elevator_noop_add_request,
},
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH Linux 2.6.12-rc6-mm1 06/06] blk: remove last_merge handling from cfq iosched
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
` (4 preceding siblings ...)
2005-06-16 4:57 ` [PATCH Linux 2.6.12-rc6-mm1 05/06] blk: remove last_merge handling from noop iosched Tejun Heo
@ 2005-06-16 4:57 ` Tejun Heo
2005-06-16 6:19 ` [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 4:57 UTC (permalink / raw)
To: axboe; +Cc: linux-kernel
06_blk_last_merge_consolidation_cfq.patch
Remove last_merge handling from cfq iosched
Signed-off-by: Tejun Heo <htejun@gmail.com>
cfq-iosched.c | 22 ++++------------------
1 files changed, 4 insertions(+), 18 deletions(-)
Index: blk-fixes/drivers/block/cfq-iosched.c
===================================================================
--- blk-fixes.orig/drivers/block/cfq-iosched.c 2005-06-16 13:55:38.000000000 +0900
+++ blk-fixes/drivers/block/cfq-iosched.c 2005-06-16 13:55:39.000000000 +0900
@@ -598,28 +598,20 @@ cfq_merge(request_queue_t *q, struct req
struct request *__rq;
int ret;
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE) {
- __rq = q->last_merge;
- goto out_insert;
- }
-
__rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_BACK_MERGE;
- goto out;
+ goto found;
}
__rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
if (__rq && elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;
- goto out;
+ goto found;
}
return ELEVATOR_NO_MERGE;
-out:
- q->last_merge = __rq;
-out_insert:
+ found:
*req = __rq;
return ret;
}
@@ -638,8 +630,6 @@ static void cfq_merged_request(request_q
cfq_update_next_crq(crq);
cfq_reposition_crq_rb(cfqq, crq);
}
-
- q->last_merge = req;
}
static void
@@ -1544,13 +1534,9 @@ static void cfq_insert_request(request_q
list_add_tail(&rq->queuelist, &cfqq->fifo);
- if (rq_mergeable(rq)) {
+ if (rq_mergeable(rq))
cfq_add_crq_hash(cfqd, crq);
- if (!cfqd->queue->last_merge)
- cfqd->queue->last_merge = rq;
- }
-
cfq_crq_enqueued(cfqd, cfqq, crq);
}
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review)
2005-06-16 4:56 [PATCH Linux 2.6.12-rc6-mm1 00/06] blk: generic dispatch queue (for review) Tejun Heo
` (5 preceding siblings ...)
2005-06-16 4:57 ` [PATCH Linux 2.6.12-rc6-mm1 06/06] blk: remove last_merge handling from cfq iosched Tejun Heo
@ 2005-06-16 6:19 ` Tejun Heo
6 siblings, 0 replies; 8+ messages in thread
From: Tejun Heo @ 2005-06-16 6:19 UTC (permalink / raw)
To: Tejun Heo; +Cc: axboe, linux-kernel
Tejun Heo wrote:
> Hello, Jens.
>
> This patchset implements generic dispatch queue I've talked about in
> the last ordered reimplementation patchset. The patches are against
> 2.6.12-rc6-mm1 + ordered patchset + 3 last blk fix patches. As I
> haven't posted ordered patchset against 2.6.12-rc6-mm1 (still waiting
> for your comments), to apply this patchset, you'll have to apply the
> ordered patchset against 2.6.12-rc5-mm2 to 2.6.12-rc6-mm1, and then
> apply these patches. libata changes will fail but it wouldn't matter
> for review purpose. (if you want ordered patchset against
> 2.6.12-rc6-mm1, I can send it to you, just tell me.)
>
> This patchset updates only noop and cfq io schedulers. as and
> deadline wouldn't compile w/ this patchset applied. I'll update as
> and deadline once some consensus regarding the general direction of
> this patchset is gained.
>
> This patchset is composed of two large parts.
>
> * Implementation of generic dispatch queue & updating individual
> elevators.
> * Move last_merge handling into generic elevator.
>
> Currently, each specific iosched maintains its own dispatch queue to
> handle ordering, requeueing, cluster dispatching, etc... This causes
> the following problems.
>
> * duplicated codes
> * difficult to enforce semantics over dispatch queue (request
> ordering, requeueing, ...)
> * specific ioscheds have to deal with non-fs or ordered requests
> directly.
>
> With generic dispatch queue, specific ioscheds are guaranteed to be
> handed only non-barrier fs requests, such that ioscheds only have to
> implement ordering logic of normal fs requests. Also, callback
> invocation is stricter now. Each fs request follows one of the
> following paths.
>
> * add_req_fn -> dispatch_fn -> activate_fn (-> deactivate_fn ->
> activate_fn)* -> completed_req_fn
> * add_req_fn -> merged_req_fn
Oops, sorry. I was being delusional. The following special case path
doesn't exist. It never reaches specific ioscheds, so it's just above
two paths.
> * add_req_fn -> dispatch_fn (This path is special case for barrier
> request. This can be easily removed by activating at the start of
> ordered sequence, and completing at the end. Would removing this
> path be better?)
--
tejun
^ permalink raw reply [flat|nested] 8+ messages in thread