* [PATCH 1/7] elevator: move the backmerging logic into the elevator core
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
@ 2006-07-12 8:04 ` Jens Axboe
2006-07-12 8:04 ` [PATCH 2/7] rbtree: fixed reversed RB_EMPTY_NODE and rb_next/prev Jens Axboe
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:04 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] elevator: move the backmerging logic into the elevator core
Right now, every IO scheduler implements its own backmerging (except for
noop, which does no merging). That results in duplicated code for
essentially the same operation, which is never a good thing. This patch
moves the backmerging out of the io schedulers and into the elevator
core. We save 1.6kb of text and as a bonus get backmerging for noop as
well. Win-win!
Signed-off-by: Jens Axboe <axboe@suse.de>
---
block/as-iosched.c | 139 +------------------------------------------
block/cfq-iosched.c | 86 +--------------------------
block/deadline-iosched.c | 128 +---------------------------------------
block/elevator.c | 147 +++++++++++++++++++++++++++++++++++++++++-----
block/ll_rw_blk.c | 2 +
include/linux/blkdev.h | 17 +----
include/linux/elevator.h | 2 +
7 files changed, 146 insertions(+), 375 deletions(-)
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 5da56d4..1c44ce3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -14,7 +14,6 @@ #include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
-#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/interrupt.h>
@@ -95,7 +94,6 @@ struct as_data {
struct as_rq *next_arq[2]; /* next in sort order */
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
- struct hlist_head *hash; /* request hash */
unsigned long exit_prob; /* probability a task will exit while
being waited on */
@@ -162,11 +160,6 @@ struct as_rq {
struct io_context *io_context; /* The submitting task */
/*
- * request hash, key is the ending offset (for back merge lookup)
- */
- struct hlist_node hash;
-
- /*
* expire fifo
*/
struct list_head fifo;
@@ -273,77 +266,6 @@ static void as_put_io_context(struct as_
}
/*
- * the back merge hash support functions
- */
-static const int as_hash_shift = 6;
-#define AS_HASH_BLOCK(sec) ((sec) >> 3)
-#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
-#define AS_HASH_ENTRIES (1 << as_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-
-static inline void __as_del_arq_hash(struct as_rq *arq)
-{
- hlist_del_init(&arq->hash);
-}
-
-static inline void as_del_arq_hash(struct as_rq *arq)
-{
- if (!hlist_unhashed(&arq->hash))
- __as_del_arq_hash(arq);
-}
-
-static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
-{
- struct request *rq = arq->request;
-
- BUG_ON(!hlist_unhashed(&arq->hash));
-
- hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
-}
-
-/*
- * move hot entry to front of chain
- */
-static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
-{
- struct request *rq = arq->request;
- struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
-
- if (hlist_unhashed(&arq->hash)) {
- WARN_ON(1);
- return;
- }
-
- if (&arq->hash != head->first) {
- hlist_del(&arq->hash);
- hlist_add_head(&arq->hash, head);
- }
-}
-
-static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
-{
- struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
- struct hlist_node *entry, *next;
- struct as_rq *arq;
-
- hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
- struct request *__rq = arq->request;
-
- BUG_ON(hlist_unhashed(&arq->hash));
-
- if (!rq_mergeable(__rq)) {
- as_del_arq_hash(arq);
- continue;
- }
-
- if (rq_hash_key(__rq) == offset)
- return __rq;
- }
-
- return NULL;
-}
-
-/*
* rb tree support functions
*/
#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
@@ -1060,7 +982,6 @@ static void as_remove_queued_request(req
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
list_del_init(&arq->fifo);
- as_del_arq_hash(arq);
as_del_arq_rb(ad, arq);
}
@@ -1349,8 +1270,6 @@ static void as_add_request(request_queue
}
as_add_arq_rb(ad, arq);
- if (rq_mergeable(arq->request))
- as_add_arq_hash(ad, arq);
/*
* set expire time (only used for reads) and add to fifo list
@@ -1428,42 +1347,17 @@ as_merge(request_queue_t *q, struct requ
struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
struct request *__rq;
- int ret;
-
- /*
- * see if the merge hash can satisfy a back merge
- */
- __rq = as_find_arq_hash(ad, bio->bi_sector);
- if (__rq) {
- BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_BACK_MERGE;
- goto out;
- }
- }
/*
* check for front merge
*/
__rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
- if (__rq) {
- BUG_ON(rb_key != rq_rb_key(__rq));
-
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_FRONT_MERGE;
- goto out;
- }
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
-out:
- if (ret) {
- if (rq_mergeable(__rq))
- as_hot_arq_hash(ad, RQ_DATA(__rq));
- }
- *req = __rq;
- return ret;
}
static void as_merged_request(request_queue_t *q, struct request *req)
@@ -1472,12 +1366,6 @@ static void as_merged_request(request_qu
struct as_rq *arq = RQ_DATA(req);
/*
- * hash always needs to be repositioned, key is end sector
- */
- as_del_arq_hash(arq);
- as_add_arq_hash(ad, arq);
-
- /*
* if the merge was a front merge, we need to reposition request
*/
if (rq_rb_key(req) != arq->rb_key) {
@@ -1501,13 +1389,6 @@ static void as_merged_requests(request_q
BUG_ON(!arq);
BUG_ON(!anext);
- /*
- * reposition arq (this is the merged request) in hash, and in rbtree
- * in case of a front merge
- */
- as_del_arq_hash(arq);
- as_add_arq_hash(ad, arq);
-
if (rq_rb_key(req) != arq->rb_key) {
as_del_arq_rb(ad, arq);
as_add_arq_rb(ad, arq);
@@ -1591,7 +1472,6 @@ static int as_set_request(request_queue_
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
- INIT_HLIST_NODE(&arq->hash);
INIT_LIST_HEAD(&arq->fifo);
rq->elevator_private = arq;
return 0;
@@ -1628,7 +1508,6 @@ static void as_exit_queue(elevator_t *e)
mempool_destroy(ad->arq_pool);
put_io_context(ad->io_context);
- kfree(ad->hash);
kfree(ad);
}
@@ -1639,7 +1518,6 @@ static void as_exit_queue(elevator_t *e)
static void *as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
- int i;
if (!arq_pool)
return NULL;
@@ -1651,17 +1529,9 @@ static void *as_init_queue(request_queue
ad->q = q; /* Identify what queue the data belongs to */
- ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
- GFP_KERNEL, q->node);
- if (!ad->hash) {
- kfree(ad);
- return NULL;
- }
-
ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, arq_pool, q->node);
if (!ad->arq_pool) {
- kfree(ad->hash);
kfree(ad);
return NULL;
}
@@ -1672,9 +1542,6 @@ static void *as_init_queue(request_queue
init_timer(&ad->antic_timer);
INIT_WORK(&ad->antic_work, as_work_handler, q);
- for (i = 0; i < AS_HASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&ad->hash[i]);
-
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
ad->sort_list[REQ_SYNC] = RB_ROOT;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 102ebc2..6fd8af1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -41,16 +41,6 @@ #define CFQ_QHASH_SHIFT 6
#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
-/*
- * for the hash of crq inside the cfqq
- */
-#define CFQ_MHASH_SHIFT 6
-#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
-#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
-#define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
-
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
@@ -112,11 +102,6 @@ struct cfq_data {
*/
struct hlist_head *cfq_hash;
- /*
- * global crq hash for all queues
- */
- struct hlist_head *crq_hash;
-
mempool_t *crq_pool;
int rq_in_driver;
@@ -203,7 +188,6 @@ struct cfq_rq {
struct rb_node rb_node;
sector_t rb_key;
struct request *request;
- struct hlist_node hash;
struct cfq_queue *cfq_queue;
struct cfq_io_context *io_context;
@@ -272,42 +256,6 @@ static void cfq_dispatch_insert(request_
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
/*
- * lots of deadline iosched dupes, can be abstracted later...
- */
-static inline void cfq_del_crq_hash(struct cfq_rq *crq)
-{
- hlist_del_init(&crq->hash);
-}
-
-static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
-{
- const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
-
- hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
-}
-
-static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
-{
- struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
- struct hlist_node *entry, *next;
-
- hlist_for_each_safe(entry, next, hash_list) {
- struct cfq_rq *crq = list_entry_hash(entry);
- struct request *__rq = crq->request;
-
- if (!rq_mergeable(__rq)) {
- cfq_del_crq_hash(crq);
- continue;
- }
-
- if (rq_hash_key(__rq) == offset)
- return __rq;
- }
-
- return NULL;
-}
-
-/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
@@ -677,7 +625,6 @@ static void cfq_remove_request(struct re
list_del_init(&rq->queuelist);
cfq_del_crq_rb(crq);
- cfq_del_crq_hash(crq);
}
static int
@@ -685,34 +632,20 @@ cfq_merge(request_queue_t *q, struct req
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
- int ret;
-
- __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
- if (__rq && elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_BACK_MERGE;
- goto out;
- }
__rq = cfq_find_rq_fmerge(cfqd, bio);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_FRONT_MERGE;
- goto out;
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
-out:
- *req = __rq;
- return ret;
}
static void cfq_merged_request(request_queue_t *q, struct request *req)
{
- struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(req);
- cfq_del_crq_hash(crq);
- cfq_add_crq_hash(cfqd, crq);
-
if (rq_rb_key(req) != crq->rb_key) {
struct cfq_queue *cfqq = crq->cfq_queue;
@@ -1825,9 +1758,6 @@ static void cfq_insert_request(request_q
list_add_tail(&rq->queuelist, &cfqq->fifo);
- if (rq_mergeable(rq))
- cfq_add_crq_hash(cfqd, crq);
-
cfq_crq_enqueued(cfqd, cfqq, crq);
}
@@ -2055,7 +1985,6 @@ cfq_set_request(request_queue_t *q, stru
RB_CLEAR_NODE(&crq->rb_node);
crq->rb_key = 0;
crq->request = rq;
- INIT_HLIST_NODE(&crq->hash);
crq->cfq_queue = cfqq;
crq->io_context = cic;
@@ -2221,7 +2150,6 @@ static void cfq_exit_queue(elevator_t *e
cfq_shutdown_timer_wq(cfqd);
mempool_destroy(cfqd->crq_pool);
- kfree(cfqd->crq_hash);
kfree(cfqd->cfq_hash);
kfree(cfqd);
}
@@ -2246,20 +2174,14 @@ static void *cfq_init_queue(request_queu
INIT_LIST_HEAD(&cfqd->empty_list);
INIT_LIST_HEAD(&cfqd->cic_list);
- cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
- if (!cfqd->crq_hash)
- goto out_crqhash;
-
cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->cfq_hash)
- goto out_cfqhash;
+ goto out_crqhash;
cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
if (!cfqd->crq_pool)
goto out_crqpool;
- for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
@@ -2289,8 +2211,6 @@ static void *cfq_init_queue(request_queu
return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
-out_cfqhash:
- kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
return NULL;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c7ca9f0..b66e820 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -12,7 +12,6 @@ #include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
-#include <linux/hash.h>
#include <linux/rbtree.h>
/*
@@ -24,13 +23,6 @@ static const int writes_starved = 2;
static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */
-static const int deadline_hash_shift = 5;
-#define DL_HASH_BLOCK(sec) ((sec) >> 3)
-#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
-#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
-
struct deadline_data {
/*
* run time data
@@ -46,7 +38,6 @@ struct deadline_data {
* next in sort order. read, write or both are NULL
*/
struct deadline_rq *next_drq[2];
- struct hlist_head *hash; /* request hash */
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
@@ -75,11 +66,6 @@ struct deadline_rq {
struct request *request;
/*
- * request hash, key is the ending offset (for back merge lookup)
- */
- struct hlist_node hash;
-
- /*
* expire fifo
*/
struct list_head fifo;
@@ -93,69 +79,6 @@ static kmem_cache_t *drq_pool;
#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
/*
- * the back merge hash support functions
- */
-static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
-{
- hlist_del_init(&drq->hash);
-}
-
-static inline void deadline_del_drq_hash(struct deadline_rq *drq)
-{
- if (ON_HASH(drq))
- __deadline_del_drq_hash(drq);
-}
-
-static inline void
-deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
-{
- struct request *rq = drq->request;
-
- BUG_ON(ON_HASH(drq));
-
- hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
-}
-
-/*
- * move hot entry to front of chain
- */
-static inline void
-deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
-{
- struct request *rq = drq->request;
- struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
-
- if (ON_HASH(drq) && &drq->hash != head->first) {
- hlist_del(&drq->hash);
- hlist_add_head(&drq->hash, head);
- }
-}
-
-static struct request *
-deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
-{
- struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
- struct hlist_node *entry, *next;
- struct deadline_rq *drq;
-
- hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
- struct request *__rq = drq->request;
-
- BUG_ON(!ON_HASH(drq));
-
- if (!rq_mergeable(__rq)) {
- __deadline_del_drq_hash(drq);
- continue;
- }
-
- if (rq_hash_key(__rq) == offset)
- return __rq;
- }
-
- return NULL;
-}
-
-/*
* rb tree support functions
*/
#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
@@ -267,22 +190,19 @@ deadline_add_request(struct request_queu
{
struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(rq);
-
const int data_dir = rq_data_dir(drq->request);
deadline_add_drq_rb(dd, drq);
+
/*
* set expire time (only used for reads) and add to fifo list
*/
drq->expires = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
-
- if (rq_mergeable(rq))
- deadline_add_drq_hash(dd, drq);
}
/*
- * remove rq from rbtree, fifo, and hash
+ * remove rq from rbtree and fifo.
*/
static void deadline_remove_request(request_queue_t *q, struct request *rq)
{
@@ -291,7 +211,6 @@ static void deadline_remove_request(requ
list_del_init(&drq->fifo);
deadline_del_drq_rb(dd, drq);
- deadline_del_drq_hash(drq);
}
static int
@@ -302,19 +221,6 @@ deadline_merge(request_queue_t *q, struc
int ret;
/*
- * see if the merge hash can satisfy a back merge
- */
- __rq = deadline_find_drq_hash(dd, bio->bi_sector);
- if (__rq) {
- BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_BACK_MERGE;
- goto out;
- }
- }
-
- /*
* check for front merge
*/
if (dd->front_merges) {
@@ -333,8 +239,6 @@ deadline_merge(request_queue_t *q, struc
return ELEVATOR_NO_MERGE;
out:
- if (ret)
- deadline_hot_drq_hash(dd, RQ_DATA(__rq));
*req = __rq;
return ret;
}
@@ -345,12 +249,6 @@ static void deadline_merged_request(requ
struct deadline_rq *drq = RQ_DATA(req);
/*
- * hash always needs to be repositioned, key is end sector
- */
- deadline_del_drq_hash(drq);
- deadline_add_drq_hash(dd, drq);
-
- /*
* if the merge was a front merge, we need to reposition request
*/
if (rq_rb_key(req) != drq->rb_key) {
@@ -370,13 +268,6 @@ deadline_merged_requests(request_queue_t
BUG_ON(!drq);
BUG_ON(!dnext);
- /*
- * reposition drq (this is the merged request) in hash, and in rbtree
- * in case of a front merge
- */
- deadline_del_drq_hash(drq);
- deadline_add_drq_hash(dd, drq);
-
if (rq_rb_key(req) != drq->rb_key) {
deadline_del_drq_rb(dd, drq);
deadline_add_drq_rb(dd, drq);
@@ -594,7 +485,6 @@ static void deadline_exit_queue(elevator
BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
mempool_destroy(dd->drq_pool);
- kfree(dd->hash);
kfree(dd);
}
@@ -605,7 +495,6 @@ static void deadline_exit_queue(elevator
static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
- int i;
if (!drq_pool)
return NULL;
@@ -615,24 +504,13 @@ static void *deadline_init_queue(request
return NULL;
memset(dd, 0, sizeof(*dd));
- dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
- GFP_KERNEL, q->node);
- if (!dd->hash) {
- kfree(dd);
- return NULL;
- }
-
dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, drq_pool, q->node);
if (!dd->drq_pool) {
- kfree(dd->hash);
kfree(dd);
return NULL;
}
- for (i = 0; i < DL_HASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&dd->hash[i]);
-
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
dd->sort_list[READ] = RB_ROOT;
@@ -667,8 +545,6 @@ deadline_set_request(request_queue_t *q,
RB_CLEAR_NODE(&drq->rb_node);
drq->request = rq;
- INIT_HLIST_NODE(&drq->hash);
-
INIT_LIST_HEAD(&drq->fifo);
rq->elevator_private = drq;
diff --git a/block/elevator.c b/block/elevator.c
index bc7baee..3e40530 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -33,6 +33,7 @@ #include <linux/init.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/blktrace_api.h>
+#include <linux/hash.h>
#include <asm/uaccess.h>
@@ -40,6 +41,16 @@ static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
/*
+ * Merge hash stuff.
+ */
+static const int elv_hash_shift = 6;
+#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
+#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
+#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
+#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
+#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+
+/*
* can we safely merge with this request?
*/
inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -153,25 +164,41 @@ static struct kobj_type elv_ktype;
static elevator_t *elevator_alloc(struct elevator_type *e)
{
- elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
- if (eq) {
- memset(eq, 0, sizeof(*eq));
- eq->ops = &e->ops;
- eq->elevator_type = e;
- kobject_init(&eq->kobj);
- snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
- eq->kobj.ktype = &elv_ktype;
- mutex_init(&eq->sysfs_lock);
- } else {
- elevator_put(e);
- }
+ elevator_t *eq;
+ int i;
+
+ eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+ if (unlikely(!eq))
+ goto err;
+
+ memset(eq, 0, sizeof(*eq));
+ eq->ops = &e->ops;
+ eq->elevator_type = e;
+ kobject_init(&eq->kobj);
+ snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
+ eq->kobj.ktype = &elv_ktype;
+ mutex_init(&eq->sysfs_lock);
+
+ eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL);
+ if (!eq->hash)
+ goto err;
+
+ for (i = 0; i < ELV_HASH_ENTRIES; i++)
+ INIT_HLIST_HEAD(&eq->hash[i]);
+
return eq;
+err:
+ kfree(eq);
+ elevator_put(e);
+ return NULL;
}
static void elevator_release(struct kobject *kobj)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
+
elevator_put(e->elevator_type);
+ kfree(e->hash);
kfree(e);
}
@@ -223,6 +250,53 @@ void elevator_exit(elevator_t *e)
kobject_put(&e->kobj);
}
+static inline void __elv_rqhash_del(struct request *rq)
+{
+ hlist_del_init(&rq->hash);
+}
+
+static void elv_rqhash_del(request_queue_t *q, struct request *rq)
+{
+ if (ELV_ON_HASH(rq))
+ __elv_rqhash_del(rq);
+}
+
+static void elv_rqhash_add(request_queue_t *q, struct request *rq)
+{
+ elevator_t *e = q->elevator;
+
+ BUG_ON(ELV_ON_HASH(rq));
+ hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+}
+
+static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
+{
+ __elv_rqhash_del(rq);
+ elv_rqhash_add(q, rq);
+}
+
+static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
+{
+ elevator_t *e = q->elevator;
+ struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct request *rq;
+
+ hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+ BUG_ON(!ELV_ON_HASH(rq));
+
+ if (unlikely(!rq_mergeable(rq))) {
+ __elv_rqhash_del(rq);
+ continue;
+ }
+
+ if (rq_hash_key(rq) == offset)
+ return rq;
+ }
+
+ return NULL;
+}
+
/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
@@ -235,6 +309,9 @@ void elv_dispatch_sort(request_queue_t *
if (q->last_merge == rq)
q->last_merge = NULL;
+
+ elv_rqhash_del(q, rq);
+
q->nr_sorted--;
boundary = q->end_sector;
@@ -258,11 +335,32 @@ void elv_dispatch_sort(request_queue_t *
list_add(&rq->queuelist, entry);
}
+/*
+ * This should be in elevator.h, but that requires pulling in rq and q
+ */
+void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
+{
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
+ elv_rqhash_del(q, rq);
+
+ q->nr_sorted--;
+
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ list_add_tail(&rq->queuelist, &q->queue_head);
+}
+
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
+ struct request *__rq;
int ret;
+ /*
+ * First try one-hit cache.
+ */
if (q->last_merge) {
ret = elv_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
@@ -271,6 +369,15 @@ int elv_merge(request_queue_t *q, struct
}
}
+ /*
+ * See if our hash lookup can find a potential backmerge.
+ */
+ __rq = elv_rqhash_find(q, bio->bi_sector);
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_BACK_MERGE;
+ }
+
if (e->ops->elevator_merge_fn)
return e->ops->elevator_merge_fn(q, req, bio);
@@ -284,6 +391,8 @@ void elv_merged_request(request_queue_t
if (e->ops->elevator_merged_fn)
e->ops->elevator_merged_fn(q, rq);
+ elv_rqhash_reposition(q, rq);
+
q->last_merge = rq;
}
@@ -294,8 +403,11 @@ void elv_merge_requests(request_queue_t
if (e->ops->elevator_merge_req_fn)
e->ops->elevator_merge_req_fn(q, rq, next);
- q->nr_sorted--;
+ elv_rqhash_reposition(q, rq);
+ elv_rqhash_del(q, next);
+
+ q->nr_sorted--;
q->last_merge = rq;
}
@@ -371,8 +483,12 @@ void elv_insert(request_queue_t *q, stru
BUG_ON(!blk_fs_request(rq));
rq->flags |= REQ_SORTED;
q->nr_sorted++;
- if (q->last_merge == NULL && rq_mergeable(rq))
- q->last_merge = rq;
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+ if (!q->last_merge)
+ q->last_merge = rq;
+ }
+
/*
* Some ioscheds (cfq) run q->request_fn directly, so
* rq cannot be accessed after calling
@@ -557,6 +673,7 @@ struct request *elv_next_request(request
void elv_dequeue_request(request_queue_t *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
+ BUG_ON(ELV_ON_HASH(rq));
list_del_init(&rq->queuelist);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 61d6b3c..84c7b1c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -281,6 +281,7 @@ static inline void rq_init(request_queue
{
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
+ INIT_HLIST_NODE(&rq->hash);
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
@@ -2665,6 +2666,7 @@ void __blk_put_request(request_queue_t *
int priv = req->flags & REQ_ELVPRIV;
BUG_ON(!list_empty(&req->queuelist));
+ BUG_ON(!hlist_unhashed(&req->hash));
blk_free_request(q, req);
freed_request(q, rw, priv);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aafe827..9b23cbe 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -148,6 +148,8 @@ struct request {
struct bio *bio;
struct bio *biotail;
+ struct hlist_node hash; /* merge hash */
+
void *elevator_private;
void *completion_data;
@@ -679,21 +681,6 @@ static inline void blkdev_dequeue_reques
}
/*
- * This should be in elevator.h, but that requires pulling in rq and q
- */
-static inline void elv_dispatch_add_tail(struct request_queue *q,
- struct request *rq)
-{
- if (q->last_merge == rq)
- q->last_merge = NULL;
- q->nr_sorted--;
-
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = rq;
- list_add_tail(&rq->queuelist, &q->queue_head);
-}
-
-/*
* Access functions for manipulating queue properties
*/
extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 1713ace..2c270e9 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -82,12 +82,14 @@ struct elevator_queue
struct kobject kobj;
struct elevator_type *elevator_type;
struct mutex sysfs_lock;
+ struct hlist_head *hash;
};
/*
* block elevator interface
*/
extern void elv_dispatch_sort(request_queue_t *, struct request *);
+extern void elv_dispatch_add_tail(request_queue_t *, struct request *);
extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern void elv_insert(request_queue_t *, struct request *, int);
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 2/7] rbtree: fixed reversed RB_EMPTY_NODE and rb_next/prev
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
2006-07-12 8:04 ` [PATCH 1/7] elevator: move the backmerging logic into the elevator core Jens Axboe
@ 2006-07-12 8:04 ` Jens Axboe
2006-07-12 8:04 ` [PATCH 3/7] elevator: abstract out the rbtree sort handling Jens Axboe
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:04 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] rbtree: fixed reversed RB_EMPTY_NODE and rb_next/prev
The conditions got reserved. Also make rb_next() and rb_prev() check
for the empty condition.
Signed-off-by: Jens Axboe <axboe@suse.de>
---
block/as-iosched.c | 4 ++--
include/linux/rbtree.h | 2 +-
lib/rbtree.c | 6 ++++++
3 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 1c44ce3..d677029 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -336,7 +336,7 @@ static void as_add_arq_rb(struct as_data
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
- if (!RB_EMPTY_NODE(&arq->rb_node)) {
+ if (RB_EMPTY_NODE(&arq->rb_node)) {
WARN_ON(1);
return;
}
@@ -1039,7 +1039,7 @@ static void as_move_to_dispatch(struct a
struct request *rq = arq->request;
const int data_dir = arq->is_sync;
- BUG_ON(!RB_EMPTY_NODE(&arq->rb_node));
+ BUG_ON(RB_EMPTY_NODE(&arq->rb_node));
as_antic_stop(ad);
ad->antic_status = ANTIC_OFF;
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 8d5382e..344bc34 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -133,7 +133,7 @@ #define RB_ROOT (struct rb_root) { NULL,
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
-#define RB_EMPTY_NODE(node) (rb_parent(node) != node)
+#define RB_EMPTY_NODE(node) (rb_parent(node) == node)
#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
extern void rb_insert_color(struct rb_node *, struct rb_root *);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1e55ba1..48499c2 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -322,6 +322,9 @@ struct rb_node *rb_next(struct rb_node *
{
struct rb_node *parent;
+ if (rb_parent(node) == node)
+ return NULL;
+
/* If we have a right-hand child, go down and then left as far
as we can. */
if (node->rb_right) {
@@ -348,6 +351,9 @@ struct rb_node *rb_prev(struct rb_node *
{
struct rb_node *parent;
+ if (rb_parent(node) == node)
+ return NULL;
+
/* If we have a left-hand child, go down and then right as far
as we can. */
if (node->rb_left) {
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 3/7] elevator: abstract out the rbtree sort handling
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
2006-07-12 8:04 ` [PATCH 1/7] elevator: move the backmerging logic into the elevator core Jens Axboe
2006-07-12 8:04 ` [PATCH 2/7] rbtree: fixed reversed RB_EMPTY_NODE and rb_next/prev Jens Axboe
@ 2006-07-12 8:04 ` Jens Axboe
2006-07-12 8:05 ` [PATCH 4/7] elevator: introduce a way to reuse rq for internal FIFO handling Jens Axboe
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:04 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] elevator: abstract out the rbtree sort handling
The rbtree sort/lookup/reposition logic is mostly duplicated in
cfq/deadline/as, so move it to the elevator core. The io schedulers
still provide the actual rb root, as we don't want to impose any sort
of specific handling on the schedulers.
Signed-off-by: Jens Axboe <axboe@suse.de>
---
block/as-iosched.c | 180 +++++++---------------------------------------
block/cfq-iosched.c | 179 ++++++++++++----------------------------------
block/deadline-iosched.c | 170 ++++++++-----------------------------------
block/elevator.c | 86 +++++++++++++++++++++-
block/ll_rw_blk.c | 7 +-
include/linux/blkdev.h | 1
include/linux/elevator.h | 19 ++++-
7 files changed, 214 insertions(+), 428 deletions(-)
diff --git a/block/as-iosched.c b/block/as-iosched.c
index d677029..000e776 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -149,12 +149,6 @@ enum arq_state {
};
struct as_rq {
- /*
- * rbtree index, key is the starting offset
- */
- struct rb_node rb_node;
- sector_t rb_key;
-
struct request *request;
struct io_context *io_context; /* The submitting task */
@@ -268,101 +262,22 @@ static void as_put_io_context(struct as_
/*
* rb tree support functions
*/
-#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
-#define rq_rb_key(rq) (rq)->sector
-
-/*
- * as_find_first_arq finds the first (lowest sector numbered) request
- * for the specified data_dir. Used to sweep back to the start of the disk
- * (1-way elevator) after we process the last (highest sector) request.
- */
-static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
-{
- struct rb_node *n = ad->sort_list[data_dir].rb_node;
-
- if (n == NULL)
- return NULL;
-
- for (;;) {
- if (n->rb_left == NULL)
- return rb_entry_arq(n);
-
- n = n->rb_left;
- }
-}
-
-/*
- * Add the request to the rb tree if it is unique. If there is an alias (an
- * existing request against the same sector), which can happen when using
- * direct IO, then return the alias.
- */
-static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
-{
- struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
- struct rb_node *parent = NULL;
- struct as_rq *__arq;
- struct request *rq = arq->request;
-
- arq->rb_key = rq_rb_key(rq);
-
- while (*p) {
- parent = *p;
- __arq = rb_entry_arq(parent);
-
- if (arq->rb_key < __arq->rb_key)
- p = &(*p)->rb_left;
- else if (arq->rb_key > __arq->rb_key)
- p = &(*p)->rb_right;
- else
- return __arq;
- }
-
- rb_link_node(&arq->rb_node, parent, p);
- rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
- return NULL;
-}
-
-static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
+static void as_add_arq_rb(struct as_data *ad, struct request *rq)
{
- struct as_rq *alias;
+ struct as_rq *arq = RQ_DATA(rq);
+ struct request *alias;
- while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
- as_move_to_dispatch(ad, alias);
+ while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) {
+ as_move_to_dispatch(ad, RQ_DATA(alias));
as_antic_stop(ad);
}
}
-static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
-{
- if (RB_EMPTY_NODE(&arq->rb_node)) {
- WARN_ON(1);
- return;
- }
-
- rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
- RB_CLEAR_NODE(&arq->rb_node);
-}
-
-static struct request *
-as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
+static inline void as_del_arq_rb(struct as_data *ad, struct request *rq)
{
- struct rb_node *n = ad->sort_list[data_dir].rb_node;
- struct as_rq *arq;
-
- while (n) {
- arq = rb_entry_arq(n);
-
- if (sector < arq->rb_key)
- n = n->rb_left;
- else if (sector > arq->rb_key)
- n = n->rb_right;
- else
- return arq->request;
- }
-
- return NULL;
+ elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq);
}
/*
@@ -455,32 +370,29 @@ as_choose_req(struct as_data *ad, struct
* this with as_choose_req form the basis for how the scheduler chooses
* what request to process next. Anticipation works on top of this.
*/
-static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
+static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq)
{
- const int data_dir = last->is_sync;
- struct as_rq *ret;
+ struct request *last = arq->request;
struct rb_node *rbnext = rb_next(&last->rb_node);
struct rb_node *rbprev = rb_prev(&last->rb_node);
- struct as_rq *arq_next, *arq_prev;
+ struct as_rq *next = NULL, *prev = NULL;
- BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
if (rbprev)
- arq_prev = rb_entry_arq(rbprev);
- else
- arq_prev = NULL;
+ prev = RQ_DATA(rb_entry_rq(rbprev));
if (rbnext)
- arq_next = rb_entry_arq(rbnext);
+ next = RQ_DATA(rb_entry_rq(rbnext));
else {
- arq_next = as_find_first_arq(ad, data_dir);
- if (arq_next == last)
- arq_next = NULL;
- }
+ const int data_dir = arq->is_sync;
- ret = as_choose_req(ad, arq_next, arq_prev);
+ rbnext = rb_first(&ad->sort_list[data_dir]);
+ if (rbnext && rbnext != &last->rb_node)
+ next = RQ_DATA(rb_entry_rq(rbnext));
+ }
- return ret;
+ return as_choose_req(ad, next, prev);
}
/*
@@ -982,7 +894,7 @@ static void as_remove_queued_request(req
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
list_del_init(&arq->fifo);
- as_del_arq_rb(ad, arq);
+ as_del_arq_rb(ad, rq);
}
/*
@@ -1039,7 +951,7 @@ static void as_move_to_dispatch(struct a
struct request *rq = arq->request;
const int data_dir = arq->is_sync;
- BUG_ON(RB_EMPTY_NODE(&arq->rb_node));
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
as_antic_stop(ad);
ad->antic_status = ANTIC_OFF;
@@ -1064,8 +976,6 @@ static void as_move_to_dispatch(struct a
}
ad->ioc_finished = 0;
- ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
-
/*
* take it off the sort and fifo list, add to dispatch queue
*/
@@ -1269,7 +1179,7 @@ static void as_add_request(request_queue
atomic_inc(&arq->io_context->aic->nr_queued);
}
- as_add_arq_rb(ad, arq);
+ as_add_arq_rb(ad, rq);
/*
* set expire time (only used for reads) and add to fifo list
@@ -1315,32 +1225,6 @@ static int as_queue_empty(request_queue_
&& list_empty(&ad->fifo_list[REQ_SYNC]);
}
-static struct request *as_former_request(request_queue_t *q,
- struct request *rq)
-{
- struct as_rq *arq = RQ_DATA(rq);
- struct rb_node *rbprev = rb_prev(&arq->rb_node);
- struct request *ret = NULL;
-
- if (rbprev)
- ret = rb_entry_arq(rbprev)->request;
-
- return ret;
-}
-
-static struct request *as_latter_request(request_queue_t *q,
- struct request *rq)
-{
- struct as_rq *arq = RQ_DATA(rq);
- struct rb_node *rbnext = rb_next(&arq->rb_node);
- struct request *ret = NULL;
-
- if (rbnext)
- ret = rb_entry_arq(rbnext)->request;
-
- return ret;
-}
-
static int
as_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
@@ -1351,7 +1235,7 @@ as_merge(request_queue_t *q, struct requ
/*
* check for front merge
*/
- __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
+ __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
@@ -1360,7 +1244,7 @@ as_merge(request_queue_t *q, struct requ
return ELEVATOR_NO_MERGE;
}
-static void as_merged_request(request_queue_t *q, struct request *req)
+static void as_merged_request(request_queue_t *q, struct request *req, int type)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
@@ -1368,9 +1252,8 @@ static void as_merged_request(request_qu
/*
* if the merge was a front merge, we need to reposition request
*/
- if (rq_rb_key(req) != arq->rb_key) {
- as_del_arq_rb(ad, arq);
- as_add_arq_rb(ad, arq);
+ if (type == ELEVATOR_FRONT_MERGE) {
+ elv_rb_reposition(ARQ_RB_ROOT(ad, arq), req);
/*
* Note! At this stage of this and the next function, our next
* request may not be optimal - eg the request may have "grown"
@@ -1382,18 +1265,12 @@ static void as_merged_request(request_qu
static void as_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
- struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
struct as_rq *anext = RQ_DATA(next);
BUG_ON(!arq);
BUG_ON(!anext);
- if (rq_rb_key(req) != arq->rb_key) {
- as_del_arq_rb(ad, arq);
- as_add_arq_rb(ad, arq);
- }
-
/*
* if anext expires before arq, assign its expire time to arq
* and move into anext position (anext will be deleted) in fifo
@@ -1468,7 +1345,6 @@ static int as_set_request(request_queue_
if (arq) {
memset(arq, 0, sizeof(*arq));
- RB_CLEAR_NODE(&arq->rb_node);
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
@@ -1654,8 +1530,8 @@ static struct elevator_type iosched_as =
.elevator_deactivate_req_fn = as_deactivate_request,
.elevator_queue_empty_fn = as_queue_empty,
.elevator_completed_req_fn = as_completed_request,
- .elevator_former_req_fn = as_former_request,
- .elevator_latter_req_fn = as_latter_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
.elevator_set_req_fn = as_set_request,
.elevator_put_req_fn = as_put_request,
.elevator_may_queue_fn = as_may_queue,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6fd8af1..95bc2e8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -46,12 +46,6 @@ #define list_entry_fifo(ptr) list_entry(
#define RQ_DATA(rq) (rq)->elevator_private
-/*
- * rb-tree defines
- */
-#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
-#define rq_rb_key(rq) (rq)->sector
-
static kmem_cache_t *crq_pool;
static kmem_cache_t *cfq_pool;
static kmem_cache_t *cfq_ioc_pool;
@@ -185,8 +179,6 @@ struct cfq_queue {
};
struct cfq_rq {
- struct rb_node rb_node;
- sector_t rb_key;
struct request *request;
struct cfq_queue *cfq_queue;
@@ -376,33 +368,27 @@ #define CFQ_RQ2_WRAP 0x02 /* request 2 w
*/
static struct cfq_rq *
cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_rq *last)
+ struct cfq_rq *last_crq)
{
- struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
- struct rb_node *rbnext, *rbprev;
-
- if (!(rbnext = rb_next(&last->rb_node))) {
- rbnext = rb_first(&cfqq->sort_list);
- if (rbnext == &last->rb_node)
- rbnext = NULL;
- }
+ struct request *last = last_crq->request;
+ struct rb_node *rbnext = rb_next(&last->rb_node);
+ struct rb_node *rbprev = rb_prev(&last->rb_node);
+ struct cfq_rq *next = NULL, *prev = NULL;
- rbprev = rb_prev(&last->rb_node);
+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
if (rbprev)
- crq_prev = rb_entry_crq(rbprev);
- if (rbnext)
- crq_next = rb_entry_crq(rbnext);
-
- return cfq_choose_req(cfqd, crq_next, crq_prev);
-}
+ prev = RQ_DATA(rb_entry_rq(rbprev));
-static void cfq_update_next_crq(struct cfq_rq *crq)
-{
- struct cfq_queue *cfqq = crq->cfq_queue;
+ if (rbnext)
+ next = RQ_DATA(rb_entry_rq(rbnext));
+ else {
+ rbnext = rb_first(&cfqq->sort_list);
+ if (rbnext && rbnext != &last->rb_node)
+ next = RQ_DATA(rb_entry_rq(rbnext));
+ }
- if (cfqq->next_crq == crq)
- cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
+ return cfq_choose_req(cfqd, next, prev);
}
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
@@ -497,72 +483,27 @@ static inline void cfq_del_crq_rb(struct
BUG_ON(!cfqq->queued[sync]);
cfqq->queued[sync]--;
- cfq_update_next_crq(crq);
-
- rb_erase(&crq->rb_node, &cfqq->sort_list);
+ elv_rb_del(&cfqq->sort_list, crq->request);
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
}
-static struct cfq_rq *
-__cfq_add_crq_rb(struct cfq_rq *crq)
-{
- struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
- struct rb_node *parent = NULL;
- struct cfq_rq *__crq;
-
- while (*p) {
- parent = *p;
- __crq = rb_entry_crq(parent);
-
- if (crq->rb_key < __crq->rb_key)
- p = &(*p)->rb_left;
- else if (crq->rb_key > __crq->rb_key)
- p = &(*p)->rb_right;
- else
- return __crq;
- }
-
- rb_link_node(&crq->rb_node, parent, p);
- return NULL;
-}
-
static void cfq_add_crq_rb(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
struct cfq_data *cfqd = cfqq->cfqd;
struct request *rq = crq->request;
- struct cfq_rq *__alias;
+ struct request *__alias;
- crq->rb_key = rq_rb_key(rq);
cfqq->queued[cfq_crq_is_sync(crq)]++;
/*
* looks a little odd, but the first insert might return an alias.
* if that happens, put the alias on the dispatch list
*/
- while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
- cfq_dispatch_insert(cfqd->queue, __alias);
-
- rb_insert_color(&crq->rb_node, &cfqq->sort_list);
-
- if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq);
-
- /*
- * check if this request is a better next-serve candidate
- */
- cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
-}
-
-static inline void
-cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
- rb_erase(&crq->rb_node, &cfqq->sort_list);
- cfqq->queued[cfq_crq_is_sync(crq)]--;
-
- cfq_add_crq_rb(crq);
+ while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
+ cfq_dispatch_insert(cfqd->queue, RQ_DATA(__alias));
}
static struct request *
@@ -570,28 +511,13 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd
{
struct task_struct *tsk = current;
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
struct cfq_queue *cfqq;
- struct rb_node *n;
- sector_t sector;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
- if (!cfqq)
- goto out;
-
- sector = bio->bi_sector + bio_sectors(bio);
- n = cfqq->sort_list.rb_node;
- while (n) {
- struct cfq_rq *crq = rb_entry_crq(n);
-
- if (sector < crq->rb_key)
- n = n->rb_left;
- else if (sector > crq->rb_key)
- n = n->rb_right;
- else
- return crq->request;
- }
+ if (cfqq)
+ return elv_rb_find(&cfqq->sort_list, sector);
-out:
return NULL;
}
@@ -622,9 +548,20 @@ static void cfq_deactivate_request(reque
static void cfq_remove_request(struct request *rq)
{
struct cfq_rq *crq = RQ_DATA(rq);
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
+ if (cfqq->next_crq == crq)
+ cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
list_del_init(&rq->queuelist);
cfq_del_crq_rb(crq);
+
+ if (!cfqq->next_crq && !RB_EMPTY_ROOT(&cfqq->sort_list)) {
+ struct request *rq = rb_entry_rq(rb_first(&cfqq->sort_list));
+
+ cfqq->next_crq = RQ_DATA(rq);
+ printk("foo\n");
+ }
}
static int
@@ -642,15 +579,15 @@ cfq_merge(request_queue_t *q, struct req
return ELEVATOR_NO_MERGE;
}
-static void cfq_merged_request(request_queue_t *q, struct request *req)
+static void cfq_merged_request(request_queue_t *q, struct request *req,
+ int type)
{
struct cfq_rq *crq = RQ_DATA(req);
- if (rq_rb_key(req) != crq->rb_key) {
+ if (type == ELEVATOR_FRONT_MERGE) {
struct cfq_queue *cfqq = crq->cfq_queue;
- cfq_update_next_crq(crq);
- cfq_reposition_crq_rb(cfqq, crq);
+ elv_rb_reposition(&cfqq->sort_list, req);
}
}
@@ -658,8 +595,6 @@ static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
struct request *next)
{
- cfq_merged_request(q, rq);
-
/*
* reposition in fifo if next is older than rq
*/
@@ -881,7 +816,6 @@ static void cfq_dispatch_insert(request_
struct cfq_queue *cfqq = crq->cfq_queue;
struct request *rq;
- cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
cfq_remove_request(crq->request);
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
elv_dispatch_sort(q, crq->request);
@@ -1700,6 +1634,12 @@ cfq_crq_enqueued(struct cfq_data *cfqd,
struct cfq_io_context *cic = crq->io_context;
/*
+ * check if this request is a better next-serve candidate
+ */
+ cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+ BUG_ON(!cfqq->next_crq);
+
+ /*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
@@ -1756,6 +1696,9 @@ static void cfq_insert_request(request_q
cfq_add_crq_rb(crq);
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq);
+
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_crq_enqueued(cfqd, cfqq, crq);
@@ -1803,30 +1746,6 @@ static void cfq_completed_request(reques
}
}
-static struct request *
-cfq_former_request(request_queue_t *q, struct request *rq)
-{
- struct cfq_rq *crq = RQ_DATA(rq);
- struct rb_node *rbprev = rb_prev(&crq->rb_node);
-
- if (rbprev)
- return rb_entry_crq(rbprev)->request;
-
- return NULL;
-}
-
-static struct request *
-cfq_latter_request(request_queue_t *q, struct request *rq)
-{
- struct cfq_rq *crq = RQ_DATA(rq);
- struct rb_node *rbnext = rb_next(&crq->rb_node);
-
- if (rbnext)
- return rb_entry_crq(rbnext)->request;
-
- return NULL;
-}
-
/*
* we temporarily boost lower priority queues if they are holding fs exclusive
* resources. they are boosted to normal prio (CLASS_BE/4)
@@ -1982,8 +1901,6 @@ cfq_set_request(request_queue_t *q, stru
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
- RB_CLEAR_NODE(&crq->rb_node);
- crq->rb_key = 0;
crq->request = rq;
crq->cfq_queue = cfqq;
crq->io_context = cic;
@@ -2345,8 +2262,8 @@ static struct elevator_type iosched_cfq
.elevator_deactivate_req_fn = cfq_deactivate_request,
.elevator_queue_empty_fn = cfq_queue_empty,
.elevator_completed_req_fn = cfq_completed_request,
- .elevator_former_req_fn = cfq_former_request,
- .elevator_latter_req_fn = cfq_latter_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
.elevator_set_req_fn = cfq_set_request,
.elevator_put_req_fn = cfq_put_request,
.elevator_may_queue_fn = cfq_may_queue,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b66e820..ce86b1f 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -57,12 +57,6 @@ struct deadline_data {
* pre-request data.
*/
struct deadline_rq {
- /*
- * rbtree index, key is the starting offset
- */
- struct rb_node rb_node;
- sector_t rb_key;
-
struct request *request;
/*
@@ -78,108 +72,38 @@ static kmem_cache_t *drq_pool;
#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
-/*
- * rb tree support functions
- */
-#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
-#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
-#define rq_rb_key(rq) (rq)->sector
-
-static struct deadline_rq *
-__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
-{
- struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
- struct rb_node *parent = NULL;
- struct deadline_rq *__drq;
-
- while (*p) {
- parent = *p;
- __drq = rb_entry_drq(parent);
-
- if (drq->rb_key < __drq->rb_key)
- p = &(*p)->rb_left;
- else if (drq->rb_key > __drq->rb_key)
- p = &(*p)->rb_right;
- else
- return __drq;
- }
-
- rb_link_node(&drq->rb_node, parent, p);
- return NULL;
-}
+#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
+#define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request)
static void
-deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+deadline_add_drq_rb(struct deadline_data *dd, struct request *rq)
{
- struct deadline_rq *__alias;
-
- drq->rb_key = rq_rb_key(drq->request);
+ struct rb_root *root = RQ_RB_ROOT(dd, rq);
+ struct request *__alias;
retry:
- __alias = __deadline_add_drq_rb(dd, drq);
- if (!__alias) {
- rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
- return;
+ __alias = elv_rb_add(root, rq);
+ if (unlikely(__alias)) {
+ deadline_move_request(dd, RQ_DATA(__alias));
+ goto retry;
}
-
- deadline_move_request(dd, __alias);
- goto retry;
}
static inline void
deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
{
- const int data_dir = rq_data_dir(drq->request);
+ struct request *rq = drq->request;
+ const int data_dir = rq_data_dir(rq);
if (dd->next_drq[data_dir] == drq) {
- struct rb_node *rbnext = rb_next(&drq->rb_node);
+ struct rb_node *rbnext = rb_next(&rq->rb_node);
dd->next_drq[data_dir] = NULL;
if (rbnext)
- dd->next_drq[data_dir] = rb_entry_drq(rbnext);
+ dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
}
- BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
- rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
- RB_CLEAR_NODE(&drq->rb_node);
-}
-
-static struct request *
-deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
-{
- struct rb_node *n = dd->sort_list[data_dir].rb_node;
- struct deadline_rq *drq;
-
- while (n) {
- drq = rb_entry_drq(n);
-
- if (sector < drq->rb_key)
- n = n->rb_left;
- else if (sector > drq->rb_key)
- n = n->rb_right;
- else
- return drq->request;
- }
-
- return NULL;
-}
-
-/*
- * deadline_find_first_drq finds the first (lowest sector numbered) request
- * for the specified data_dir. Used to sweep back to the start of the disk
- * (1-way elevator) after we process the last (highest sector) request.
- */
-static struct deadline_rq *
-deadline_find_first_drq(struct deadline_data *dd, int data_dir)
-{
- struct rb_node *n = dd->sort_list[data_dir].rb_node;
-
- for (;;) {
- if (n->rb_left == NULL)
- return rb_entry_drq(n);
-
- n = n->rb_left;
- }
+ elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
}
/*
@@ -192,7 +116,7 @@ deadline_add_request(struct request_queu
struct deadline_rq *drq = RQ_DATA(rq);
const int data_dir = rq_data_dir(drq->request);
- deadline_add_drq_rb(dd, drq);
+ deadline_add_drq_rb(dd, rq);
/*
* set expire time (only used for reads) and add to fifo list
@@ -224,11 +148,11 @@ deadline_merge(request_queue_t *q, struc
* check for front merge
*/
if (dd->front_merges) {
- sector_t rb_key = bio->bi_sector + bio_sectors(bio);
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
- __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
+ __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {
- BUG_ON(rb_key != rq_rb_key(__rq));
+ BUG_ON(sector != __rq->sector);
if (elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;
@@ -243,36 +167,28 @@ out:
return ret;
}
-static void deadline_merged_request(request_queue_t *q, struct request *req)
+static void deadline_merged_request(request_queue_t *q, struct request *req,
+ int type)
{
struct deadline_data *dd = q->elevator->elevator_data;
- struct deadline_rq *drq = RQ_DATA(req);
/*
* if the merge was a front merge, we need to reposition request
*/
- if (rq_rb_key(req) != drq->rb_key) {
- deadline_del_drq_rb(dd, drq);
- deadline_add_drq_rb(dd, drq);
- }
+ if (type == ELEVATOR_FRONT_MERGE)
+ elv_rb_reposition(RQ_RB_ROOT(dd, req), req);
}
static void
deadline_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
- struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(req);
struct deadline_rq *dnext = RQ_DATA(next);
BUG_ON(!drq);
BUG_ON(!dnext);
- if (rq_rb_key(req) != drq->rb_key) {
- deadline_del_drq_rb(dd, drq);
- deadline_add_drq_rb(dd, drq);
- }
-
/*
* if dnext expires before drq, assign its expire time to drq
* and move into dnext position (dnext will be deleted) in fifo
@@ -308,14 +224,15 @@ deadline_move_to_dispatch(struct deadlin
static void
deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
{
- const int data_dir = rq_data_dir(drq->request);
- struct rb_node *rbnext = rb_next(&drq->rb_node);
+ struct request *rq = drq->request;
+ const int data_dir = rq_data_dir(rq);
+ struct rb_node *rbnext = rb_next(&rq->rb_node);
dd->next_drq[READ] = NULL;
dd->next_drq[WRITE] = NULL;
if (rbnext)
- dd->next_drq[data_dir] = rb_entry_drq(rbnext);
+ dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
dd->last_sector = drq->request->sector + drq->request->nr_sectors;
@@ -426,13 +343,17 @@ dispatch_find_request:
*/
drq = dd->next_drq[data_dir];
} else {
+ struct rb_node *n;
+
/*
* The last req was the other direction or we have run out of
* higher-sectored requests. Go back to the lowest sectored
* request (1 way elevator) and start a new batch.
*/
dd->batching = 0;
- drq = deadline_find_first_drq(dd, data_dir);
+ n = rb_first(&dd->sort_list[data_dir]);
+ if (n)
+ drq = RQ_DATA(rb_entry_rq(n));
}
dispatch_request:
@@ -453,30 +374,6 @@ static int deadline_queue_empty(request_
&& list_empty(&dd->fifo_list[READ]);
}
-static struct request *
-deadline_former_request(request_queue_t *q, struct request *rq)
-{
- struct deadline_rq *drq = RQ_DATA(rq);
- struct rb_node *rbprev = rb_prev(&drq->rb_node);
-
- if (rbprev)
- return rb_entry_drq(rbprev)->request;
-
- return NULL;
-}
-
-static struct request *
-deadline_latter_request(request_queue_t *q, struct request *rq)
-{
- struct deadline_rq *drq = RQ_DATA(rq);
- struct rb_node *rbnext = rb_next(&drq->rb_node);
-
- if (rbnext)
- return rb_entry_drq(rbnext)->request;
-
- return NULL;
-}
-
static void deadline_exit_queue(elevator_t *e)
{
struct deadline_data *dd = e->elevator_data;
@@ -542,7 +439,6 @@ deadline_set_request(request_queue_t *q,
drq = mempool_alloc(dd->drq_pool, gfp_mask);
if (drq) {
memset(drq, 0, sizeof(*drq));
- RB_CLEAR_NODE(&drq->rb_node);
drq->request = rq;
INIT_LIST_HEAD(&drq->fifo);
@@ -633,8 +529,8 @@ static struct elevator_type iosched_dead
.elevator_dispatch_fn = deadline_dispatch_requests,
.elevator_add_req_fn = deadline_add_request,
.elevator_queue_empty_fn = deadline_queue_empty,
- .elevator_former_req_fn = deadline_former_request,
- .elevator_latter_req_fn = deadline_latter_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
.elevator_set_req_fn = deadline_set_request,
.elevator_put_req_fn = deadline_put_request,
.elevator_init_fn = deadline_init_queue,
diff --git a/block/elevator.c b/block/elevator.c
index 3e40530..9cca1ac 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -298,6 +298,65 @@ static struct request *elv_rqhash_find(r
}
/*
+ * RB-tree support functions for inserting/lookup/removal of requests
+ * in a sorted RB tree.
+ */
+struct request *elv_rb_add(struct rb_root *root, struct request *rq)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct request *__rq;
+
+ while (*p) {
+ parent = *p;
+ __rq = rb_entry(parent, struct request, rb_node);
+
+ if (rq->sector < __rq->sector)
+ p = &(*p)->rb_left;
+ else if (rq->sector > __rq->sector)
+ p = &(*p)->rb_right;
+ else
+ return __rq;
+ }
+
+ rb_link_node(&rq->rb_node, parent, p);
+ rb_insert_color(&rq->rb_node, root);
+ return NULL;
+}
+
+void elv_rb_del(struct rb_root *root, struct request *rq)
+{
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ rb_erase(&rq->rb_node, root);
+ RB_CLEAR_NODE(&rq->rb_node);
+}
+
+void elv_rb_reposition(struct rb_root *root, struct request *rq)
+{
+ elv_rb_del(root, rq);
+ elv_rb_add(root, rq);
+}
+
+struct request *elv_rb_find(struct rb_root *root, sector_t sector)
+{
+ struct rb_node *n = root->rb_node;
+ struct request *rq;
+
+ while (n) {
+ rq = rb_entry(n, struct request, rb_node);
+
+ if (sector < rq->sector)
+ n = n->rb_left;
+ else if (sector > rq->sector)
+ n = n->rb_right;
+ else
+ return rq;
+ }
+
+ return NULL;
+}
+
+/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
* appended to the dispatch queue. To be used by specific elevators.
@@ -384,14 +443,15 @@ int elv_merge(request_queue_t *q, struct
return ELEVATOR_NO_MERGE;
}
-void elv_merged_request(request_queue_t *q, struct request *rq)
+void elv_merged_request(request_queue_t *q, struct request *rq, int type)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_merged_fn)
- e->ops->elevator_merged_fn(q, rq);
+ e->ops->elevator_merged_fn(q, rq, type);
- elv_rqhash_reposition(q, rq);
+ if (type == ELEVATOR_BACK_MERGE)
+ elv_rqhash_reposition(q, rq);
q->last_merge = rq;
}
@@ -1024,6 +1084,26 @@ ssize_t elv_iosched_show(request_queue_t
return len;
}
+struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
+{
+ struct rb_node *rbprev = rb_prev(&rq->rb_node);
+
+ if (rbprev)
+ return rb_entry_rq(rbprev);
+
+ return NULL;
+}
+
+struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
+{
+ struct rb_node *rbnext = rb_next(&rq->rb_node);
+
+ if (rbnext)
+ return rb_entry_rq(rbnext);
+
+ return NULL;
+}
+
EXPORT_SYMBOL(elv_dispatch_sort);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 84c7b1c..08c1615 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -281,11 +281,12 @@ static inline void rq_init(request_queue
{
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
- INIT_HLIST_NODE(&rq->hash);
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
+ INIT_HLIST_NODE(&rq->hash);
+ RB_CLEAR_NODE(&rq->rb_node);
rq->ioprio = 0;
rq->buffer = NULL;
rq->ref_count = 1;
@@ -2896,7 +2897,7 @@ static int __make_request(request_queue_
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req))
- elv_merged_request(q, req);
+ elv_merged_request(q, req, el_ret);
goto out;
case ELEVATOR_FRONT_MERGE:
@@ -2923,7 +2924,7 @@ static int __make_request(request_queue_
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req))
- elv_merged_request(q, req);
+ elv_merged_request(q, req, el_ret);
goto out;
/* ELV_NO_MERGE: elevator says don't/can't merge. */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9b23cbe..e296719 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -149,6 +149,7 @@ struct request {
struct bio *biotail;
struct hlist_node hash; /* merge hash */
+ struct rb_node rb_node; /* sort/lookup */
void *elevator_private;
void *completion_data;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2c270e9..38f0f0d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -6,7 +6,7 @@ typedef int (elevator_merge_fn) (request
typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
-typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
+typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
typedef int (elevator_dispatch_fn) (request_queue_t *, int);
@@ -96,7 +96,7 @@ extern void elv_insert(request_queue_t *
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
-extern void elv_merged_request(request_queue_t *, struct request *);
+extern void elv_merged_request(request_queue_t *, struct request *, int);
extern void elv_dequeue_request(request_queue_t *, struct request *);
extern void elv_requeue_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *);
@@ -127,6 +127,20 @@ extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
+ * Helper functions.
+ */
+extern struct request *elv_rb_former_request(request_queue_t *, struct request *);
+extern struct request *elv_rb_latter_request(request_queue_t *, struct request *);
+
+/*
+ * rb support functions.
+ */
+extern struct request *elv_rb_add(struct rb_root *, struct request *);
+extern void elv_rb_del(struct rb_root *, struct request *);
+extern struct request *elv_rb_find(struct rb_root *, sector_t);
+extern void elv_rb_reposition(struct rb_root *, struct request *);
+
+/*
* Return values from elevator merger
*/
#define ELEVATOR_NO_MERGE 0
@@ -151,5 +165,6 @@ enum {
};
#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
+#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
#endif
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 4/7] elevator: introduce a way to reuse rq for internal FIFO handling
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
` (2 preceding siblings ...)
2006-07-12 8:04 ` [PATCH 3/7] elevator: abstract out the rbtree sort handling Jens Axboe
@ 2006-07-12 8:05 ` Jens Axboe
2006-07-12 8:05 ` [PATCH 5/7] deadline-iosched: remove elevator private drq request type Jens Axboe
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:05 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] elevator: introduce a way to reuse rq for internal FIFO handling
The io schedulers can use this instead of having to allocate space for
it themselves.
Signed-off-by: Jens Axboe <axboe@suse.de>
---
include/linux/elevator.h | 12 ++++++++++++
1 files changed, 12 insertions(+), 0 deletions(-)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 38f0f0d..6e1c903 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -167,4 +167,16 @@ enum {
#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
+/*
+ * Hack to reuse the donelist list_head as the fifo time holder while
+ * the request is in the io scheduler. Saves an unsigned long in rq.
+ */
+#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next)
+#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp))
+#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
+#define rq_fifo_clear(rq) do { \
+ list_del_init(&(rq)->queuelist); \
+ INIT_LIST_HEAD(&(rq)->donelist); \
+ } while (0)
+
#endif
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 5/7] deadline-iosched: remove elevator private drq request type
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
` (3 preceding siblings ...)
2006-07-12 8:05 ` [PATCH 4/7] elevator: introduce a way to reuse rq for internal FIFO handling Jens Axboe
@ 2006-07-12 8:05 ` Jens Axboe
2006-07-12 8:06 ` [PATCH 6/7] cfq-iosched: convert to using the FIFO elevator defines Jens Axboe
2006-07-12 8:06 ` [PATCH 7/7] as-iosched: reuse rq for fifo Jens Axboe
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:05 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] deadline-iosched: remove elevator private drq request type
A big win, we now save an allocation/free on each request! With the
previous rb/hash abstractions, we can just reuse queuelist/donelist
for the FIFO data and be done with it.
Signed-off-by: Jens Axboe <axboe@suse.de>
---
block/deadline-iosched.c | 192 ++++++++++++----------------------------------
1 files changed, 51 insertions(+), 141 deletions(-)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index ce86b1f..15da9b2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -37,7 +37,7 @@ struct deadline_data {
/*
* next in sort order. read, write or both are NULL
*/
- struct deadline_rq *next_drq[2];
+ struct request *next_rq[2];
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
@@ -49,34 +49,14 @@ struct deadline_data {
int fifo_batch;
int writes_starved;
int front_merges;
-
- mempool_t *drq_pool;
};
-/*
- * pre-request data.
- */
-struct deadline_rq {
- struct request *request;
-
- /*
- * expire fifo
- */
- struct list_head fifo;
- unsigned long expires;
-};
-
-static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
-
-static kmem_cache_t *drq_pool;
-
-#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
+static void deadline_move_request(struct deadline_data *, struct request *);
#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
-#define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request)
static void
-deadline_add_drq_rb(struct deadline_data *dd, struct request *rq)
+deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
{
struct rb_root *root = RQ_RB_ROOT(dd, rq);
struct request *__alias;
@@ -84,45 +64,43 @@ deadline_add_drq_rb(struct deadline_data
retry:
__alias = elv_rb_add(root, rq);
if (unlikely(__alias)) {
- deadline_move_request(dd, RQ_DATA(__alias));
+ deadline_move_request(dd, __alias);
goto retry;
}
}
static inline void
-deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
{
- struct request *rq = drq->request;
const int data_dir = rq_data_dir(rq);
- if (dd->next_drq[data_dir] == drq) {
+ if (dd->next_rq[data_dir] == rq) {
struct rb_node *rbnext = rb_next(&rq->rb_node);
- dd->next_drq[data_dir] = NULL;
+ dd->next_rq[data_dir] = NULL;
if (rbnext)
- dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
+ dd->next_rq[data_dir] = rb_entry_rq(rbnext);
}
elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
}
/*
- * add drq to rbtree and fifo
+ * add rq to rbtree and fifo
*/
static void
deadline_add_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
- struct deadline_rq *drq = RQ_DATA(rq);
- const int data_dir = rq_data_dir(drq->request);
+ const int data_dir = rq_data_dir(rq);
- deadline_add_drq_rb(dd, rq);
+ deadline_add_rq_rb(dd, rq);
/*
* set expire time (only used for reads) and add to fifo list
*/
- drq->expires = jiffies + dd->fifo_expire[data_dir];
- list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
+ rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
/*
@@ -130,11 +108,10 @@ deadline_add_request(struct request_queu
*/
static void deadline_remove_request(request_queue_t *q, struct request *rq)
{
- struct deadline_rq *drq = RQ_DATA(rq);
struct deadline_data *dd = q->elevator->elevator_data;
- list_del_init(&drq->fifo);
- deadline_del_drq_rb(dd, drq);
+ rq_fifo_clear(rq);
+ deadline_del_rq_rb(dd, rq);
}
static int
@@ -183,20 +160,14 @@ static void
deadline_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
- struct deadline_rq *drq = RQ_DATA(req);
- struct deadline_rq *dnext = RQ_DATA(next);
-
- BUG_ON(!drq);
- BUG_ON(!dnext);
-
/*
- * if dnext expires before drq, assign its expire time to drq
- * and move into dnext position (dnext will be deleted) in fifo
+ * if next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo
*/
- if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
- if (time_before(dnext->expires, drq->expires)) {
- list_move(&drq->fifo, &dnext->fifo);
- drq->expires = dnext->expires;
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ list_move(&req->queuelist, &next->queuelist);
+ rq_set_fifo_time(req, rq_fifo_time(next));
}
}
@@ -210,53 +181,50 @@ deadline_merged_requests(request_queue_t
* move request from sort list to dispatch queue.
*/
static inline void
-deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
+deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
- request_queue_t *q = drq->request->q;
+ request_queue_t *q = rq->q;
- deadline_remove_request(q, drq->request);
- elv_dispatch_add_tail(q, drq->request);
+ deadline_remove_request(q, rq);
+ elv_dispatch_add_tail(q, rq);
}
/*
* move an entry to dispatch queue
*/
static void
-deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
+deadline_move_request(struct deadline_data *dd, struct request *rq)
{
- struct request *rq = drq->request;
const int data_dir = rq_data_dir(rq);
struct rb_node *rbnext = rb_next(&rq->rb_node);
- dd->next_drq[READ] = NULL;
- dd->next_drq[WRITE] = NULL;
+ dd->next_rq[READ] = NULL;
+ dd->next_rq[WRITE] = NULL;
if (rbnext)
- dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
+ dd->next_rq[data_dir] = rb_entry_rq(rbnext);
- dd->last_sector = drq->request->sector + drq->request->nr_sectors;
+ dd->last_sector = rq->sector + rq->nr_sectors;
/*
* take it off the sort and fifo list, move
* to dispatch queue
*/
- deadline_move_to_dispatch(dd, drq);
+ deadline_move_to_dispatch(dd, rq);
}
-#define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
-
/*
* deadline_check_fifo returns 0 if there are no expired reads on the fifo,
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
*/
static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
{
- struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
+ struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
/*
- * drq is expired!
+ * rq is expired!
*/
- if (time_after(jiffies, drq->expires))
+ if (time_after(jiffies, rq_fifo_time(rq)))
return 1;
return 0;
@@ -271,21 +239,21 @@ static int deadline_dispatch_requests(re
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
const int writes = !list_empty(&dd->fifo_list[WRITE]);
- struct deadline_rq *drq;
+ struct request *rq;
int data_dir;
/*
* batches are currently reads XOR writes
*/
- if (dd->next_drq[WRITE])
- drq = dd->next_drq[WRITE];
+ if (dd->next_rq[WRITE])
+ rq = dd->next_rq[WRITE];
else
- drq = dd->next_drq[READ];
+ rq = dd->next_rq[READ];
- if (drq) {
+ if (rq) {
/* we have a "next request" */
- if (dd->last_sector != drq->request->sector)
+ if (dd->last_sector != rq->sector)
/* end the batch on a non sequential request */
dd->batching += dd->fifo_batch;
@@ -334,34 +302,33 @@ dispatch_find_request:
if (deadline_check_fifo(dd, data_dir)) {
/* An expired request exists - satisfy it */
dd->batching = 0;
- drq = list_entry_fifo(dd->fifo_list[data_dir].next);
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
- } else if (dd->next_drq[data_dir]) {
+ } else if (dd->next_rq[data_dir]) {
/*
* The last req was the same dir and we have a next request in
* sort order. No expired requests so continue on from here.
*/
- drq = dd->next_drq[data_dir];
+ rq = dd->next_rq[data_dir];
} else {
- struct rb_node *n;
-
+ struct rb_node *node;
/*
* The last req was the other direction or we have run out of
* higher-sectored requests. Go back to the lowest sectored
* request (1 way elevator) and start a new batch.
*/
dd->batching = 0;
- n = rb_first(&dd->sort_list[data_dir]);
- if (n)
- drq = RQ_DATA(rb_entry_rq(n));
+ node = rb_first(&dd->sort_list[data_dir]);
+ if (node)
+ rq = rb_entry_rq(node);
}
dispatch_request:
/*
- * drq is the selected appropriate request.
+ * rq is the selected appropriate request.
*/
dd->batching++;
- deadline_move_request(dd, drq);
+ deadline_move_request(dd, rq);
return 1;
}
@@ -381,33 +348,21 @@ static void deadline_exit_queue(elevator
BUG_ON(!list_empty(&dd->fifo_list[READ]));
BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
- mempool_destroy(dd->drq_pool);
kfree(dd);
}
/*
- * initialize elevator private data (deadline_data), and alloc a drq for
- * each request on the free lists
+ * initialize elevator private data (deadline_data).
*/
static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
- if (!drq_pool)
- return NULL;
-
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd)
return NULL;
memset(dd, 0, sizeof(*dd));
- dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, drq_pool, q->node);
- if (!dd->drq_pool) {
- kfree(dd);
- return NULL;
- }
-
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
dd->sort_list[READ] = RB_ROOT;
@@ -420,36 +375,6 @@ static void *deadline_init_queue(request
return dd;
}
-static void deadline_put_request(request_queue_t *q, struct request *rq)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- struct deadline_rq *drq = RQ_DATA(rq);
-
- mempool_free(drq, dd->drq_pool);
- rq->elevator_private = NULL;
-}
-
-static int
-deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- gfp_t gfp_mask)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- struct deadline_rq *drq;
-
- drq = mempool_alloc(dd->drq_pool, gfp_mask);
- if (drq) {
- memset(drq, 0, sizeof(*drq));
- drq->request = rq;
-
- INIT_LIST_HEAD(&drq->fifo);
-
- rq->elevator_private = drq;
- return 0;
- }
-
- return 1;
-}
-
/*
* sysfs parts below
*/
@@ -531,8 +456,6 @@ static struct elevator_type iosched_dead
.elevator_queue_empty_fn = deadline_queue_empty,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
- .elevator_set_req_fn = deadline_set_request,
- .elevator_put_req_fn = deadline_put_request,
.elevator_init_fn = deadline_init_queue,
.elevator_exit_fn = deadline_exit_queue,
},
@@ -544,24 +467,11 @@ static struct elevator_type iosched_dead
static int __init deadline_init(void)
{
- int ret;
-
- drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
- 0, 0, NULL, NULL);
-
- if (!drq_pool)
- return -ENOMEM;
-
- ret = elv_register(&iosched_deadline);
- if (ret)
- kmem_cache_destroy(drq_pool);
-
- return ret;
+ return elv_register(&iosched_deadline);
}
static void __exit deadline_exit(void)
{
- kmem_cache_destroy(drq_pool);
elv_unregister(&iosched_deadline);
}
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 6/7] cfq-iosched: convert to using the FIFO elevator defines
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
` (4 preceding siblings ...)
2006-07-12 8:05 ` [PATCH 5/7] deadline-iosched: remove elevator private drq request type Jens Axboe
@ 2006-07-12 8:06 ` Jens Axboe
2006-07-12 8:06 ` [PATCH 7/7] as-iosched: reuse rq for fifo Jens Axboe
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:06 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] cfq-iosched: convert to using the FIFO elevator defines
Signed-off-by: Jens Axboe <axboe@suse.de>
---
block/cfq-iosched.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 95bc2e8..54cc8e3 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -42,7 +42,6 @@ #define CFQ_QHASH_ENTRIES (1 << CFQ_QHAS
#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
-#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define RQ_DATA(rq) (rq)->elevator_private
@@ -839,7 +838,7 @@ static inline struct cfq_rq *cfq_check_f
if (!list_empty(&cfqq->fifo)) {
int fifo = cfq_cfqq_class_sync(cfqq);
- crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
+ crq = RQ_DATA(rq_entry_fifo(cfqq->fifo.next));
rq = crq->request;
if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
cfq_mark_cfqq_fifo_expire(cfqq);
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 7/7] as-iosched: reuse rq for fifo
2006-07-12 8:03 [PATCHSET] 0/7 IO scheduler abstractions Jens Axboe
` (5 preceding siblings ...)
2006-07-12 8:06 ` [PATCH 6/7] cfq-iosched: convert to using the FIFO elevator defines Jens Axboe
@ 2006-07-12 8:06 ` Jens Axboe
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2006-07-12 8:06 UTC (permalink / raw)
To: linux-kernel; +Cc: nickpiggin
[PATCH] as-iosched: reuse rq for fifo
Saves some space in arq.
Signed-off-by: Jens Axboe <axboe@suse.de>
---
block/as-iosched.c | 32 ++++++++++++--------------------
1 files changed, 12 insertions(+), 20 deletions(-)
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 000e776..66bd0bc 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -153,12 +153,6 @@ struct as_rq {
struct io_context *io_context; /* The submitting task */
- /*
- * expire fifo
- */
- struct list_head fifo;
- unsigned long expires;
-
unsigned int is_sync;
enum arq_state state;
};
@@ -893,7 +887,7 @@ static void as_remove_queued_request(req
if (ad->next_arq[data_dir] == arq)
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
- list_del_init(&arq->fifo);
+ rq_fifo_clear(rq);
as_del_arq_rb(ad, rq);
}
@@ -907,7 +901,7 @@ static void as_remove_queued_request(req
*/
static int as_fifo_expired(struct as_data *ad, int adir)
{
- struct as_rq *arq;
+ struct request *rq;
long delta_jif;
delta_jif = jiffies - ad->last_check_fifo[adir];
@@ -921,9 +915,9 @@ static int as_fifo_expired(struct as_dat
if (list_empty(&ad->fifo_list[adir]))
return 0;
- arq = list_entry_fifo(ad->fifo_list[adir].next);
+ rq = rq_entry_fifo(ad->fifo_list[adir].next);
- return time_after(jiffies, arq->expires);
+ return time_after(jiffies, rq_fifo_time(rq));
}
/*
@@ -1087,7 +1081,7 @@ static int as_dispatch_request(request_q
ad->changed_batch = 1;
}
ad->batch_data_dir = REQ_SYNC;
- arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+ arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[REQ_SYNC].next));
ad->last_check_fifo[ad->batch_data_dir] = jiffies;
goto dispatch_request;
}
@@ -1127,8 +1121,7 @@ dispatch_request:
if (as_fifo_expired(ad, ad->batch_data_dir)) {
fifo_expired:
- arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
- BUG_ON(arq == NULL);
+ arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next));
}
if (ad->changed_batch) {
@@ -1184,8 +1177,8 @@ static void as_add_request(request_queue
/*
* set expire time (only used for reads) and add to fifo list
*/
- arq->expires = jiffies + ad->fifo_expire[data_dir];
- list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
+ rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
+ list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
as_update_arq(ad, arq); /* keep state machine up to date */
arq->state = AS_RQ_QUEUED;
@@ -1275,10 +1268,10 @@ static void as_merged_requests(request_q
* if anext expires before arq, assign its expire time to arq
* and move into anext position (anext will be deleted) in fifo
*/
- if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) {
- if (time_before(anext->expires, arq->expires)) {
- list_move(&arq->fifo, &anext->fifo);
- arq->expires = anext->expires;
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ list_move(&req->queuelist, &next->queuelist);
+ rq_set_fifo_time(req, rq_fifo_time(next));
/*
* Don't copy here but swap, because when anext is
* removed below, it must contain the unused context
@@ -1348,7 +1341,6 @@ static int as_set_request(request_queue_
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
- INIT_LIST_HEAD(&arq->fifo);
rq->elevator_private = arq;
return 0;
}
--
1.4.1.ged0e0
--
Jens Axboe
^ permalink raw reply related [flat|nested] 8+ messages in thread