From: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
To: axboe-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org
Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
avanzini.arianna-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org,
Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Subject: [PATCH 5/8] cfq-iosched: remove @gfp_mask from cfq_find_alloc_queue()
Date: Mon, 8 Jun 2015 17:59:30 +0900 [thread overview]
Message-ID: <1433753973-23684-6-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1433753973-23684-1-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Even when allocations fail, cfq_find_alloc_queue() always returns a
valid cfq_queue by falling back to the oom cfq_queue. As such, there
isn't much point in taking @gfp_mask and trying "harder" if __GFP_WAIT
is set. GFP_ATOMIC allocations don't fail often and even when they do
the degraded behavior is acceptable and temporary.
After all, the only reason get_request(), which ultimately determines
the gfp_mask, cares about __GFP_WAIT is to guarantee request
allocation, assuming IO forward progress, for callers which are
willing to wait. There's no reason for cfq_find_alloc_queue() to
behave differently on __GFP_WAIT when it already has a fallback
mechanism.
Remove @gfp_mask from cfq_find_alloc_queue() and propagate the changes
to its callers. This simplifies the function quite a bit and will
help making async queues per-cfq_group.
Signed-off-by: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Cc: Vivek Goyal <vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Cc: Arianna Avanzini <avanzini.arianna-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
block/cfq-iosched.c | 45 ++++++++++-----------------------------------
1 file changed, 10 insertions(+), 35 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 90d5a87..b8e83cd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -858,8 +858,7 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
- struct cfq_io_cq *cic, struct bio *bio,
- gfp_t gfp_mask);
+ struct cfq_io_cq *cic, struct bio *bio);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
@@ -3507,7 +3506,7 @@ static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
cfqq = cic_to_cfqq(cic, false);
if (cfqq) {
cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, GFP_ATOMIC);
+ cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
cic_set_cfqq(cic, cfqq, false);
}
@@ -3575,13 +3574,12 @@ static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) {
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio, gfp_t gfp_mask)
+ struct bio *bio)
{
struct blkcg *blkcg;
- struct cfq_queue *cfqq, *new_cfqq = NULL;
+ struct cfq_queue *cfqq;
struct cfq_group *cfqg;
-retry:
rcu_read_lock();
blkcg = bio_blkcg(bio);
@@ -3598,27 +3596,9 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
* originally, since it should just be a temporary situation.
*/
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = NULL;
- if (new_cfqq) {
- cfqq = new_cfqq;
- new_cfqq = NULL;
- } else if (gfp_mask & __GFP_WAIT) {
- rcu_read_unlock();
- spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc_node(cfq_pool,
- gfp_mask | __GFP_ZERO,
- cfqd->queue->node);
- spin_lock_irq(cfqd->queue->queue_lock);
- if (new_cfqq)
- goto retry;
- else
- return &cfqd->oom_cfqq;
- } else {
- cfqq = kmem_cache_alloc_node(cfq_pool,
- gfp_mask | __GFP_ZERO,
- cfqd->queue->node);
- }
-
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ GFP_ATOMIC | __GFP_ZERO,
+ cfqd->queue->node);
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
cfq_init_prio_data(cfqq, cic);
@@ -3628,9 +3608,6 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
cfqq = &cfqd->oom_cfqq;
}
out:
- if (new_cfqq)
- kmem_cache_free(cfq_pool, new_cfqq);
-
rcu_read_unlock();
return cfqq;
}
@@ -3655,7 +3632,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio, gfp_t gfp_mask)
+ struct bio *bio)
{
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
@@ -3674,7 +3651,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -4218,8 +4195,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
- might_sleep_if(gfp_mask & __GFP_WAIT);
-
spin_lock_irq(q->queue_lock);
check_ioprio_changed(cic, bio);
@@ -4229,7 +4204,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
if (cfqq)
cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
+ cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
--
2.4.2
next prev parent reply other threads:[~2015-06-08 8:59 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-08 8:59 [PATCHSET block/for-4.2/writeback] block, cgroup: make cfq charge async IOs to the appropriate blkcgs Tejun Heo
[not found] ` <1433753973-23684-1-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 8:59 ` [PATCH 1/8] cfq-iosched: simplify control flow in cfq_get_queue() Tejun Heo
[not found] ` <1433753973-23684-2-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 18:36 ` Jeff Moyer
2015-06-08 8:59 ` [PATCH 2/8] cfq-iosched: fix async oom queue handling Tejun Heo
[not found] ` <1433753973-23684-3-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 18:42 ` Jeff Moyer
2015-06-08 8:59 ` [PATCH 3/8] cfq-iosched: fix oom cfq_queue ref leak in cfq_set_request() Tejun Heo
[not found] ` <1433753973-23684-4-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 18:51 ` Jeff Moyer
2015-06-08 8:59 ` [PATCH 4/8] cfq-iosched: minor cleanups Tejun Heo
[not found] ` <1433753973-23684-5-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 18:59 ` Jeff Moyer
2015-06-08 8:59 ` Tejun Heo [this message]
[not found] ` <1433753973-23684-6-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 19:24 ` [PATCH 5/8] cfq-iosched: remove @gfp_mask from cfq_find_alloc_queue() Jeff Moyer
2015-06-08 20:27 ` Jeff Moyer
[not found] ` <x49sia2gd41.fsf-RRHT56Q3PSP4kTEheFKJxxDDeQx5vsVwAInAS/Ez/D0@public.gmane.org>
2015-06-08 21:19 ` Vivek Goyal
[not found] ` <20150608211930.GA20918-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2015-06-09 3:01 ` Tejun Heo
2015-06-09 3:00 ` Tejun Heo
[not found] ` <20150609030054.GJ21465-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2015-06-09 14:29 ` Jeff Moyer
2015-06-08 8:59 ` [PATCH 7/8] cfq-iosched: fold cfq_find_alloc_queue() into cfq_get_queue() Tejun Heo
[not found] ` <1433753973-23684-8-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-09 14:40 ` Jeff Moyer
[not found] ` <x49twuhrlml.fsf-RRHT56Q3PSP4kTEheFKJxxDDeQx5vsVwAInAS/Ez/D0@public.gmane.org>
2015-06-10 2:47 ` Tejun Heo
2015-06-09 4:21 ` [PATCH 4.5/8] blkcg, cfq-iosched: use GFP_NOWAIT instead of GFP_ATOMIC for non-critical allocations Tejun Heo
[not found] ` <20150609042131.GN21465-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2015-06-09 14:27 ` Jeff Moyer
2015-06-08 8:59 ` [PATCH 6/8] cfq-iosched: move cfq_group determination from cfq_find_alloc_queue() to cfq_get_queue() Tejun Heo
[not found] ` <1433753973-23684-7-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-09 14:32 ` Jeff Moyer
2015-06-08 8:59 ` [PATCH 8/8] cfq-iosched: charge async IOs to the appropriate blkcg's instead of the root Tejun Heo
[not found] ` <1433753973-23684-9-git-send-email-tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-06-08 22:29 ` Vivek Goyal
[not found] ` <20150608222904.GB20918-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2015-06-09 3:11 ` Tejun Heo
2015-06-09 15:03 ` Jeff Moyer
2015-06-08 19:49 ` [PATCHSET block/for-4.2/writeback] block, cgroup: make cfq charge async IOs to the appropriate blkcgs Jeff Moyer
[not found] ` <x491thmhtej.fsf-RRHT56Q3PSP4kTEheFKJxxDDeQx5vsVwAInAS/Ez/D0@public.gmane.org>
2015-06-09 3:03 ` Tejun Heo
[not found] ` <20150609030327.GL21465-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2015-06-09 15:05 ` Jeff Moyer
[not found] ` <x49lhfssz05.fsf-RRHT56Q3PSP4kTEheFKJxxDDeQx5vsVwAInAS/Ez/D0@public.gmane.org>
2015-06-10 2:49 ` Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1433753973-23684-6-git-send-email-tj@kernel.org \
--to=tj-dgejt+ai2ygdnm+yrofe0a@public.gmane.org \
--cc=avanzini.arianna-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
--cc=axboe-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org \
--cc=cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).