From: Nick Piggin <nickpiggin@yahoo.com.au>
To: Andrew Morton <akpm@osdl.org>
Cc: Jens Axboe <axboe@suse.de>,
linux-kernel <linux-kernel@vger.kernel.org>,
"Chen, Kenneth W" <kenneth.w.chen@intel.com>
Subject: [patch 0/9] blk: reduce locking
Date: Tue, 12 Apr 2005 22:52:07 +1000 [thread overview]
Message-ID: <425BC477.4020002@yahoo.com.au> (raw)
In-Reply-To: <425BC262.1070500@yahoo.com.au>
[-- Attachment #1: Type: text/plain, Size: 32 bytes --]
8/9
--
SUSE Labs, Novell Inc.
[-- Attachment #2: blk-reduce-locking.patch --]
[-- Type: text/plain, Size: 3632 bytes --]
Change around locking a bit for a result of 1-2 less spin lock
unlock pairs in request submission paths.
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Index: linux-2.6/drivers/block/ll_rw_blk.c
===================================================================
--- linux-2.6.orig/drivers/block/ll_rw_blk.c 2005-04-12 22:26:14.000000000 +1000
+++ linux-2.6/drivers/block/ll_rw_blk.c 2005-04-12 22:26:15.000000000 +1000
@@ -1859,18 +1859,20 @@ static void freed_request(request_queue_
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
- * Get a free request, queue_lock must not be held
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
{
+ int batching;
struct request *rq = NULL;
struct request_list *rl = &q->rq;
- struct io_context *ioc = get_io_context(gfp_mask);
+ struct io_context *ioc = get_io_context(GFP_ATOMIC);
if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
goto out;
- spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
@@ -1883,6 +1885,8 @@ static struct request *get_request(reque
blk_set_queue_full(q, rw);
}
}
+
+ batching = ioc_batching(q, ioc);
switch (elv_may_queue(q, rw)) {
case ELV_MQUEUE_NO:
@@ -1893,12 +1897,11 @@ static struct request *get_request(reque
goto get_rq;
}
- if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
+ if (blk_queue_full(q, rw) && !batching) {
/*
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
*/
- spin_unlock_irq(q->queue_lock);
goto out;
}
@@ -1932,11 +1935,10 @@ rq_starved:
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;
- spin_unlock_irq(q->queue_lock);
goto out;
}
- if (ioc_batching(q, ioc))
+ if (batching)
ioc->nr_batch_requests--;
rq_init(q, rq);
@@ -1949,6 +1951,8 @@ out:
/*
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
*/
static struct request *get_request_wait(request_queue_t *q, int rw)
{
@@ -1967,7 +1971,8 @@ static struct request *get_request_wait(
if (!rq) {
struct io_context *ioc;
- generic_unplug_device(q);
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
io_schedule();
/*
@@ -1979,6 +1984,8 @@ static struct request *get_request_wait(
ioc = get_io_context(GFP_NOIO);
ioc_set_batching(q, ioc);
put_io_context(ioc);
+
+ spin_lock_irq(q->queue_lock);
}
finish_wait(&rl->wait[rw], &wait);
}
@@ -1992,10 +1999,15 @@ struct request *blk_get_request(request_
BUG_ON(rw != READ && rw != WRITE);
+ spin_lock_irq(q->queue_lock);
if (gfp_mask & __GFP_WAIT)
rq = get_request_wait(q, rw);
- else
+ else {
rq = get_request(q, rw, gfp_mask);
+ if (!rq)
+ spin_unlock_irq(q->queue_lock);
+ }
+ /* q->queue_lock is unlocked at this point */
return rq;
}
@@ -2636,9 +2648,10 @@ static int __make_request(request_queue_
get_rq:
/*
* Grab a free request. This is might sleep but can not fail.
+ * Returns with the queue unlocked.
*/
- spin_unlock_irq(q->queue_lock);
req = get_request_wait(q, rw);
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
next prev parent reply other threads:[~2005-04-12 13:43 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-04-12 12:43 [patch 0/9] various (mainly mempool fixes and block layer improvements) Nick Piggin
2005-04-12 12:48 ` [patch 1/9] GFP_ZERO fix Nick Piggin
2005-04-12 19:47 ` Andrew Morton
2005-04-13 1:02 ` Nick Piggin
2005-04-14 10:59 ` Manfred Spraul
2005-04-12 12:48 ` [patch 2/9] mempool gfp flag Nick Piggin
2005-04-12 19:50 ` Andrew Morton
2005-04-13 1:03 ` Nick Piggin
2005-04-12 12:49 ` [patch 3/9] no PF_MEMALLOC tinkering Nick Piggin
2005-04-12 19:57 ` Andrew Morton
2005-04-13 1:13 ` Nick Piggin
2005-04-12 12:49 ` [patch 4/9] blk: no memory barrier Nick Piggin
2005-04-12 12:50 ` [patch 5/9] blk: branch hints Nick Piggin
2005-04-12 12:50 ` [patch 6/9] blk: unplug later Nick Piggin
2005-04-12 19:58 ` Andrew Morton
2005-04-13 1:32 ` Nick Piggin
2005-04-13 10:20 ` Jens Axboe
2005-04-12 12:51 ` [patch 7/9] blk: efficiency improvements Nick Piggin
2005-04-12 12:52 ` Nick Piggin [this message]
2005-04-12 12:53 ` [patch doh/9] mempool simplify alloc Nick Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=425BC477.4020002@yahoo.com.au \
--to=nickpiggin@yahoo.com.au \
--cc=akpm@osdl.org \
--cc=axboe@suse.de \
--cc=kenneth.w.chen@intel.com \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox