--- linux-2.5/drivers/block/deadline-iosched.c.orig 2002-11-15 23:22:19.000000000 +1100 +++ linux-2.5/drivers/block/deadline-iosched.c 2002-11-16 13:01:01.000000000 +1100 @@ -407,20 +407,46 @@ if (rbnext) dd->last_drq[data_dir] = rb_entry_drq(rbnext); /* * take it off the sort and fifo list, move * to dispatch queue */ deadline_move_to_dispatch(dd, drq); } +static int expire_batch = 8; +/* + * move a batch of entries to dispatch queue + */ +static inline void deadline_move_batch(struct deadline_data *dd, struct deadline_rq *drq) +{ + const int data_dir = rq_data_dir(drq->request); + struct rb_node *rbnext; + int i; + + for (i = 0; i < expire_batch; i++) { + struct rb_node *rbnext; + rbnext = rb_next(&drq->rb_node); + + deadline_move_to_dispatch(dd, drq); + + if (!rbnext) + break; + drq = rb_entry_drq(rbnext); + } + + dd->last_drq[data_dir] = NULL; + if (rbnext) + dd->last_drq[data_dir] = drq; +} + /* * returns 0 if there are no expired reads on the fifo, 1 otherwise */ #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo) static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) { if (!list_empty(&dd->rw_fifo[ddir])) { struct deadline_rq *drq = list_entry_fifo(dd->rw_fifo[ddir].next); /* @@ -439,56 +465,74 @@ struct deadline_rq *drq; /* * if we have expired entries on the fifo list, move some to dispatch */ if (deadline_check_fifo(dd, READ)) { if (writes && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; drq = list_entry_fifo(dd->rw_fifo[READ].next); -dispatch_requests: - deadline_move_request(dd, drq); - return 1; + + goto dispatch_batch; } if (!list_empty(&dd->rw_fifo[READ])) { if (writes && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; BUG_ON(RB_EMPTY(&dd->rb_list[READ])); drq = dd->last_drq[READ]; if (!drq) drq = list_entry_fifo(dd->rw_fifo[READ].next); - goto dispatch_requests; + goto dispatch_request; } /* * either there are no reads expired or on sort list, or the reads * have starved writes for too long. dispatch some writes */ + if (writes) { dispatch_writes: BUG_ON(RB_EMPTY(&dd->rb_list[WRITE])); dd->starved = 0; + + if (deadline_check_fifo(dd, WRITE)) { + drq = list_entry_fifo(dd->rw_fifo[WRITE].next); + + goto dispatch_batch; + } drq = dd->last_drq[WRITE]; - if (!drq || deadline_check_fifo(dd, WRITE)) + if (!drq) drq = list_entry_fifo(dd->rw_fifo[WRITE].next); - goto dispatch_requests; + goto dispatch_request; } return 0; + +dispatch_request: + deadline_move_request(dd, drq); + return 1; + +dispatch_batch: + /* dispatch in batches to prevent a seek storm if the disk + * can't keep up with the queue size and all entries end up + * being expired and submitted fifo. + */ + deadline_move_batch(dd, drq); + return 1; } static struct request *deadline_next_request(request_queue_t *q) { struct deadline_data *dd = q->elevator.elevator_data; struct request *rq; /* * if there are still requests on the dispatch queue, grab the first one */