From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>, Tejun Heo <tj@kernel.org>,
Josef Bacik <josef@toxicpanda.com>
Cc: Ming Lei <ming.lei@redhat.com>,
cgroups@vger.kernel.org, linux-block@vger.kernel.org
Subject: [PATCH 1/3] blk-throttle: store a gendisk in struct throtl_data
Date: Mon, 13 Feb 2023 11:41:32 +0100 [thread overview]
Message-ID: <20230213104134.475204-2-hch@lst.de> (raw)
In-Reply-To: <20230213104134.475204-1-hch@lst.de>
We generally need a gendisk for core cgroup helpers, so store that
and derive the queue from it where needed.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-throttle.c | 52 ++++++++++++++++++++------------------------
1 file changed, 23 insertions(+), 29 deletions(-)
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e7bd7050d68402..6a8b82939a38ad 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -61,7 +61,7 @@ struct throtl_data
/* service tree for active throtl groups */
struct throtl_service_queue service_queue;
- struct request_queue *queue;
+ struct gendisk *disk;
/* Total Number of queued bios on READ and WRITE lists */
unsigned int nr_queued[2];
@@ -223,13 +223,13 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
struct throtl_data *__td = sq_to_td((sq)); \
\
(void)__td; \
- if (likely(!blk_trace_note_message_enabled(__td->queue))) \
+ if (likely(!blk_trace_note_message_enabled(__td->disk->queue))) \
break; \
if ((__tg)) { \
- blk_add_cgroup_trace_msg(__td->queue, \
+ blk_add_cgroup_trace_msg(__td->disk->queue, \
&tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
} else { \
- blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
+ blk_add_trace_msg(__td->disk->queue, "throtl " fmt, ##args); \
} \
} while (0)
@@ -451,8 +451,7 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
bool low_valid = false;
rcu_read_lock();
- blkg_for_each_descendant_post(blkg, pos_css,
- td->queue->disk->root_blkg) {
+ blkg_for_each_descendant_post(blkg, pos_css, td->disk->root_blkg) {
struct throtl_grp *tg = blkg_to_tg(blkg);
if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
@@ -1169,19 +1168,19 @@ static void throtl_pending_timer_fn(struct timer_list *t)
struct throtl_grp *tg = sq_to_tg(sq);
struct throtl_data *td = sq_to_td(sq);
struct throtl_service_queue *parent_sq;
- struct request_queue *q;
+ struct gendisk *disk;
bool dispatched;
int ret;
/* throtl_data may be gone, so figure out request queue by blkg */
if (tg)
- q = tg->pd.blkg->disk->queue;
+ disk = tg->pd.blkg->disk;
else
- q = td->queue;
+ disk = td->disk;
- spin_lock_irq(&q->queue_lock);
+ spin_lock_irq(&disk->queue->queue_lock);
- if (!q->disk->root_blkg)
+ if (!disk->root_blkg)
goto out_unlock;
if (throtl_can_upgrade(td, NULL))
@@ -1206,9 +1205,9 @@ static void throtl_pending_timer_fn(struct timer_list *t)
break;
/* this dispatch windows is still open, relax and repeat */
- spin_unlock_irq(&q->queue_lock);
+ spin_unlock_irq(&disk->queue->queue_lock);
cpu_relax();
- spin_lock_irq(&q->queue_lock);
+ spin_lock_irq(&disk->queue->queue_lock);
}
if (!dispatched)
@@ -1230,7 +1229,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
queue_work(kthrotld_workqueue, &td->dispatch_work);
}
out_unlock:
- spin_unlock_irq(&q->queue_lock);
+ spin_unlock_irq(&disk->queue->queue_lock);
}
/**
@@ -1246,7 +1245,6 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
struct throtl_data *td = container_of(work, struct throtl_data,
dispatch_work);
struct throtl_service_queue *td_sq = &td->service_queue;
- struct request_queue *q = td->queue;
struct bio_list bio_list_on_stack;
struct bio *bio;
struct blk_plug plug;
@@ -1254,11 +1252,11 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
bio_list_init(&bio_list_on_stack);
- spin_lock_irq(&q->queue_lock);
+ spin_lock_irq(&td->disk->queue->queue_lock);
for (rw = READ; rw <= WRITE; rw++)
while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
bio_list_add(&bio_list_on_stack, bio);
- spin_unlock_irq(&q->queue_lock);
+ spin_unlock_irq(&td->disk->queue->queue_lock);
if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
@@ -1323,8 +1321,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
* blk-throttle.
*/
blkg_for_each_descendant_pre(blkg, pos_css,
- global ? tg->td->queue->disk->root_blkg :
- tg_to_blkg(tg)) {
+ global ? tg->td->disk->root_blkg : tg_to_blkg(tg)) {
struct throtl_grp *this_tg = blkg_to_tg(blkg);
struct throtl_grp *parent_tg;
@@ -1873,8 +1870,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
return false;
rcu_read_lock();
- blkg_for_each_descendant_post(blkg, pos_css,
- td->queue->disk->root_blkg) {
+ blkg_for_each_descendant_post(blkg, pos_css, td->disk->root_blkg) {
struct throtl_grp *tg = blkg_to_tg(blkg);
if (tg == this_tg)
@@ -1920,8 +1916,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
td->low_upgrade_time = jiffies;
td->scale = 0;
rcu_read_lock();
- blkg_for_each_descendant_post(blkg, pos_css,
- td->queue->disk->root_blkg) {
+ blkg_for_each_descendant_post(blkg, pos_css, td->disk->root_blkg) {
struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_service_queue *sq = &tg->service_queue;
@@ -2068,7 +2063,7 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
unsigned long last_latency[2] = { 0 };
unsigned long latency[2];
- if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
+ if (!blk_queue_nonrot(td->disk->queue) || !td->limit_valid[LIMIT_LOW])
return;
if (time_before(jiffies, td->last_calculate_time + HZ))
return;
@@ -2288,7 +2283,7 @@ static void throtl_track_latency(struct throtl_data *td, sector_t size,
if (!td || td->limit_index != LIMIT_LOW ||
!(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
- !blk_queue_nonrot(td->queue))
+ !blk_queue_nonrot(td->disk->queue))
return;
index = request_bucket_index(size);
@@ -2365,11 +2360,10 @@ void blk_throtl_bio_endio(struct bio *bio)
int blk_throtl_init(struct gendisk *disk)
{
- struct request_queue *q = disk->queue;
struct throtl_data *td;
int ret;
- td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
+ td = kzalloc_node(sizeof(*td), GFP_KERNEL, disk->queue->node);
if (!td)
return -ENOMEM;
td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
@@ -2389,8 +2383,8 @@ int blk_throtl_init(struct gendisk *disk)
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
throtl_service_queue_init(&td->service_queue);
- q->td = td;
- td->queue = q;
+ disk->queue->td = td;
+ td->disk = disk;
td->limit_valid[LIMIT_MAX] = true;
td->limit_index = LIMIT_MAX;
--
2.39.1
next prev parent reply other threads:[~2023-02-13 10:41 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-13 10:41 fix circular disk reference in blk-cgroup Christoph Hellwig
2023-02-13 10:41 ` Christoph Hellwig [this message]
[not found] ` <20230213104134.475204-1-hch-jcswGhMUV9g@public.gmane.org>
2023-02-13 10:41 ` [PATCH 2/3] blk-throttle: move the throtl_data pointer from to struct gendisk Christoph Hellwig
2023-02-13 10:41 ` [PATCH 3/3] blk-cgroup: only grab an inode reference to the disk for each blkg Christoph Hellwig
2023-02-13 12:11 ` Ming Lei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230213104134.475204-2-hch@lst.de \
--to=hch@lst.de \
--cc=axboe@kernel.dk \
--cc=cgroups@vger.kernel.org \
--cc=josef@toxicpanda.com \
--cc=linux-block@vger.kernel.org \
--cc=ming.lei@redhat.com \
--cc=tj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox