From: Nick Piggin <nickpiggin@yahoo.com.au>
To: Jens Axboe <axboe@suse.de>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>,
linux-kernel@vger.kernel.org
Subject: Re: [patch] use cheaper elv_queue_empty when unplug a device
Date: Tue, 29 Mar 2005 20:06:25 +1000 [thread overview]
Message-ID: <424928A1.8060400@yahoo.com.au> (raw)
In-Reply-To: <20050329092819.GK16636@suse.de>
[-- Attachment #1: Type: text/plain, Size: 520 bytes --]
Jens Axboe wrote:
> Looks good, I've been toying with something very similar for a long time
> myself.
>
Here is another thing I just noticed that should further reduce the
locking by at least 1, sometimes 2 lock/unlock pairs per request.
At the cost of uglifying the code somewhat. Although it is pretty
nicely contained, so Jens you might consider it acceptable as is,
or we could investigate how to make it nicer if Kenneth reports some
improvement.
Note, this isn't runtime tested - it could easily have a bug.
[-- Attachment #2: blk-reduce-locking.patch --]
[-- Type: text/plain, Size: 3627 bytes --]
---
linux-2.6-npiggin/drivers/block/ll_rw_blk.c | 31 +++++++++++++++++++---------
1 files changed, 22 insertions(+), 9 deletions(-)
diff -puN drivers/block/ll_rw_blk.c~blk-reduce-locking drivers/block/ll_rw_blk.c
--- linux-2.6/drivers/block/ll_rw_blk.c~blk-reduce-locking 2005-03-29 19:51:30.000000000 +1000
+++ linux-2.6-npiggin/drivers/block/ll_rw_blk.c 2005-03-29 20:03:12.000000000 +1000
@@ -1859,10 +1859,13 @@ static void freed_request(request_queue_
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
- * Get a free request, queue_lock must not be held
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
{
+ int batching;
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = get_io_context(gfp_mask);
@@ -1870,7 +1873,6 @@ static struct request *get_request(reque
if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
goto out;
- spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
@@ -1883,6 +1885,8 @@ static struct request *get_request(reque
blk_set_queue_full(q, rw);
}
}
+
+ batching = ioc_batching(q, ioc);
switch (elv_may_queue(q, rw)) {
case ELV_MQUEUE_NO:
@@ -1893,12 +1897,11 @@ static struct request *get_request(reque
goto get_rq;
}
- if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
+ if (blk_queue_full(q, rw) && !batching) {
/*
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
*/
- spin_unlock_irq(q->queue_lock);
goto out;
}
@@ -1932,11 +1935,10 @@ rq_starved:
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;
- spin_unlock_irq(q->queue_lock);
goto out;
}
- if (ioc_batching(q, ioc))
+ if (batching)
ioc->nr_batch_requests--;
rq_init(q, rq);
@@ -1949,6 +1951,8 @@ out:
/*
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
*/
static struct request *get_request_wait(request_queue_t *q, int rw)
{
@@ -1966,7 +1970,8 @@ static struct request *get_request_wait(
if (!rq) {
struct io_context *ioc;
- generic_unplug_device(q);
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
io_schedule();
/*
@@ -1978,6 +1983,8 @@ static struct request *get_request_wait(
ioc = get_io_context(GFP_NOIO);
ioc_set_batching(q, ioc);
put_io_context(ioc);
+
+ spin_lock_irq(q->queue_lock);
}
finish_wait(&rl->wait[rw], &wait);
} while (!rq);
@@ -1991,10 +1998,15 @@ struct request *blk_get_request(request_
BUG_ON(rw != READ && rw != WRITE);
+ spin_lock_irq(q->queue_lock);
if (gfp_mask & __GFP_WAIT)
rq = get_request_wait(q, rw);
- else
+ else {
rq = get_request(q, rw, gfp_mask);
+ if (!rq)
+ spin_unlock_irq(q->queue_lock);
+ }
+ /* q->queue_lock is unlocked at this point */
return rq;
}
@@ -2639,9 +2651,10 @@ static int __make_request(request_queue_
get_rq:
/*
* Grab a free request. This is might sleep but can not fail.
+ * Returns with the queue unlocked.
*/
- spin_unlock_irq(q->queue_lock);
req = get_request_wait(q, rw);
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
_
next prev parent reply other threads:[~2005-03-29 10:06 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-03-29 2:53 [patch] use cheaper elv_queue_empty when unplug a device Chen, Kenneth W
2005-03-29 8:06 ` Jens Axboe
2005-03-29 9:19 ` Nick Piggin
2005-03-29 9:21 ` Nick Piggin
2005-03-29 9:28 ` Jens Axboe
2005-03-29 9:50 ` Nick Piggin
2005-03-29 10:06 ` Nick Piggin [this message]
2005-03-30 0:57 ` Nick Piggin
2005-03-30 8:11 ` Jens Axboe
2005-04-08 9:45 ` Jens Axboe
2005-04-08 9:55 ` Nick Piggin
2005-04-08 10:02 ` Jens Axboe
2005-04-08 10:22 ` Nick Piggin
2005-03-29 10:10 ` Arjan van de Ven
2005-03-29 10:19 ` Jens Axboe
2005-03-29 10:23 ` Nick Piggin
2005-03-29 13:15 ` Jens Axboe
2005-03-30 0:07 ` Nick Piggin
2005-03-29 19:02 ` Chen, Kenneth W
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=424928A1.8060400@yahoo.com.au \
--to=nickpiggin@yahoo.com.au \
--cc=axboe@suse.de \
--cc=kenneth.w.chen@intel.com \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox