From: Andreas Herrmann <aherrmann-l3A5Bk7waGM@public.gmane.org>
To: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
Cc: Jens Axboe <axboe-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>,
Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
Josef Bacik <josef-DigfWCa+lFGyeJad7bwFQA@public.gmane.org>,
linux-block-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Subject: Re: [PATCH 09/15] blk-rq-qos: make rq_qos_add and rq_qos_del more useful
Date: Fri, 20 Jan 2023 10:57:23 +0100 [thread overview]
Message-ID: <Y8plg6OAa4lrnyZZ@suselix> (raw)
In-Reply-To: <20230117081257.3089859-10-hch-jcswGhMUV9g@public.gmane.org>
On Tue, Jan 17, 2023 at 09:12:51AM +0100, Christoph Hellwig wrote:
> Switch to passing a gendisk, and make rq_qos_add initialize all required
> fields and drop the not required q argument from rq_qos_del. Also move
> the code out of line given how large it is.
>
> Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
> ---
> block/blk-iocost.c | 13 ++-------
> block/blk-iolatency.c | 14 +++------
> block/blk-rq-qos.c | 67 +++++++++++++++++++++++++++++++++++++++++++
> block/blk-rq-qos.h | 62 ++-------------------------------------
> block/blk-wbt.c | 5 +---
> 5 files changed, 78 insertions(+), 83 deletions(-)
>
> diff --git a/block/blk-iocost.c b/block/blk-iocost.c
> index 6f39ca99e9d76f..9b5c0d23c9ce8b 100644
> --- a/block/blk-iocost.c
> +++ b/block/blk-iocost.c
> @@ -2836,9 +2836,7 @@ static struct rq_qos_ops ioc_rqos_ops = {
>
> static int blk_iocost_init(struct gendisk *disk)
> {
> - struct request_queue *q = disk->queue;
> struct ioc *ioc;
> - struct rq_qos *rqos;
> int i, cpu, ret;
>
> ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
> @@ -2861,11 +2859,6 @@ static int blk_iocost_init(struct gendisk *disk)
> local64_set(&ccs->rq_wait_ns, 0);
> }
>
> - rqos = &ioc->rqos;
> - rqos->id = RQ_QOS_COST;
> - rqos->ops = &ioc_rqos_ops;
> - rqos->q = q;
> -
> spin_lock_init(&ioc->lock);
> timer_setup(&ioc->timer, ioc_timer_fn, 0);
> INIT_LIST_HEAD(&ioc->active_iocgs);
> @@ -2889,17 +2882,17 @@ static int blk_iocost_init(struct gendisk *disk)
> * called before policy activation completion, can't assume that the
> * target bio has an iocg associated and need to test for NULL iocg.
> */
> - ret = rq_qos_add(q, rqos);
> + ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops);
> if (ret)
> goto err_free_ioc;
>
> - ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
> + ret = blkcg_activate_policy(disk->queue, &blkcg_policy_iocost);
> if (ret)
> goto err_del_qos;
> return 0;
>
> err_del_qos:
> - rq_qos_del(q, rqos);
> + rq_qos_del(&ioc->rqos);
> err_free_ioc:
> free_percpu(ioc->pcpu_stat);
> kfree(ioc);
> diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
> index b55eac2cf91944..1c394bd77aa0b4 100644
> --- a/block/blk-iolatency.c
> +++ b/block/blk-iolatency.c
> @@ -757,24 +757,18 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
>
> int blk_iolatency_init(struct gendisk *disk)
> {
> - struct request_queue *q = disk->queue;
> struct blk_iolatency *blkiolat;
> - struct rq_qos *rqos;
> int ret;
>
> blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
> if (!blkiolat)
> return -ENOMEM;
>
> - rqos = &blkiolat->rqos;
> - rqos->id = RQ_QOS_LATENCY;
> - rqos->ops = &blkcg_iolatency_ops;
> - rqos->q = q;
> -
> - ret = rq_qos_add(q, rqos);
> + ret = rq_qos_add(&blkiolat->rqos, disk, RQ_QOS_LATENCY,
> + &blkcg_iolatency_ops);
> if (ret)
> goto err_free;
> - ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
> + ret = blkcg_activate_policy(disk->queue, &blkcg_policy_iolatency);
> if (ret)
> goto err_qos_del;
>
> @@ -784,7 +778,7 @@ int blk_iolatency_init(struct gendisk *disk)
> return 0;
>
> err_qos_del:
> - rq_qos_del(q, rqos);
> + rq_qos_del(&blkiolat->rqos);
> err_free:
> kfree(blkiolat);
> return ret;
> diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
> index 88f0fe7dcf5451..14bee1bd761362 100644
> --- a/block/blk-rq-qos.c
> +++ b/block/blk-rq-qos.c
> @@ -294,3 +294,70 @@ void rq_qos_exit(struct request_queue *q)
> rqos->ops->exit(rqos);
> }
> }
> +
> +int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
> + struct rq_qos_ops *ops)
> +{
> + struct request_queue *q = disk->queue;
> +
> + rqos->q = q;
> + rqos->id = id;
> + rqos->ops = ops;
> +
> + /*
> + * No IO can be in-flight when adding rqos, so freeze queue, which
> + * is fine since we only support rq_qos for blk-mq queue.
> + *
> + * Reuse ->queue_lock for protecting against other concurrent
> + * rq_qos adding/deleting
> + */
> + blk_mq_freeze_queue(q);
> +
> + spin_lock_irq(&q->queue_lock);
> + if (rq_qos_id(q, rqos->id))
> + goto ebusy;
> + rqos->next = q->rq_qos;
> + q->rq_qos = rqos;
> + spin_unlock_irq(&q->queue_lock);
> +
> + blk_mq_unfreeze_queue(q);
> +
> + if (rqos->ops->debugfs_attrs) {
> + mutex_lock(&q->debugfs_mutex);
> + blk_mq_debugfs_register_rqos(rqos);
> + mutex_unlock(&q->debugfs_mutex);
> + }
> +
> + return 0;
> +ebusy:
> + spin_unlock_irq(&q->queue_lock);
> + blk_mq_unfreeze_queue(q);
> + return -EBUSY;
> +}
> +
> +void rq_qos_del(struct rq_qos *rqos)
> +{
> + struct request_queue *q = rqos->q;
> + struct rq_qos **cur;
> +
> + /*
> + * See comment in rq_qos_add() about freezing queue & using
> + * ->queue_lock.
> + */
> + blk_mq_freeze_queue(q);
> +
> + spin_lock_irq(&q->queue_lock);
> + for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
> + if (*cur == rqos) {
> + *cur = rqos->next;
> + break;
> + }
> + }
> + spin_unlock_irq(&q->queue_lock);
> +
> + blk_mq_unfreeze_queue(q);
> +
> + mutex_lock(&q->debugfs_mutex);
> + blk_mq_debugfs_unregister_rqos(rqos);
> + mutex_unlock(&q->debugfs_mutex);
> +}
> diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
> index 1ef1f7d4bc3cbc..22552785aa31ed 100644
> --- a/block/blk-rq-qos.h
> +++ b/block/blk-rq-qos.h
> @@ -85,65 +85,9 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
> init_waitqueue_head(&rq_wait->wait);
> }
>
> -static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
> -{
> - /*
> - * No IO can be in-flight when adding rqos, so freeze queue, which
> - * is fine since we only support rq_qos for blk-mq queue.
> - *
> - * Reuse ->queue_lock for protecting against other concurrent
> - * rq_qos adding/deleting
> - */
> - blk_mq_freeze_queue(q);
> -
> - spin_lock_irq(&q->queue_lock);
> - if (rq_qos_id(q, rqos->id))
> - goto ebusy;
> - rqos->next = q->rq_qos;
> - q->rq_qos = rqos;
> - spin_unlock_irq(&q->queue_lock);
> -
> - blk_mq_unfreeze_queue(q);
> -
> - if (rqos->ops->debugfs_attrs) {
> - mutex_lock(&q->debugfs_mutex);
> - blk_mq_debugfs_register_rqos(rqos);
> - mutex_unlock(&q->debugfs_mutex);
> - }
> -
> - return 0;
> -ebusy:
> - spin_unlock_irq(&q->queue_lock);
> - blk_mq_unfreeze_queue(q);
> - return -EBUSY;
> -
> -}
> -
> -static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
> -{
> - struct rq_qos **cur;
> -
> - /*
> - * See comment in rq_qos_add() about freezing queue & using
> - * ->queue_lock.
> - */
> - blk_mq_freeze_queue(q);
> -
> - spin_lock_irq(&q->queue_lock);
> - for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
> - if (*cur == rqos) {
> - *cur = rqos->next;
> - break;
> - }
> - }
> - spin_unlock_irq(&q->queue_lock);
> -
> - blk_mq_unfreeze_queue(q);
> -
> - mutex_lock(&q->debugfs_mutex);
> - blk_mq_debugfs_unregister_rqos(rqos);
> - mutex_unlock(&q->debugfs_mutex);
> -}
> +int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
> + struct rq_qos_ops *ops);
> +void rq_qos_del(struct rq_qos *rqos);
>
> typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
> typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
> diff --git a/block/blk-wbt.c b/block/blk-wbt.c
> index 473ae72befaf1a..97149a4f10e600 100644
> --- a/block/blk-wbt.c
> +++ b/block/blk-wbt.c
> @@ -855,9 +855,6 @@ int wbt_init(struct gendisk *disk)
> for (i = 0; i < WBT_NUM_RWQ; i++)
> rq_wait_init(&rwb->rq_wait[i]);
>
> - rwb->rqos.id = RQ_QOS_WBT;
> - rwb->rqos.ops = &wbt_rqos_ops;
> - rwb->rqos.q = q;
> rwb->last_comp = rwb->last_issue = jiffies;
> rwb->win_nsec = RWB_WINDOW_NSEC;
> rwb->enable_state = WBT_STATE_ON_DEFAULT;
> @@ -870,7 +867,7 @@ int wbt_init(struct gendisk *disk)
> /*
> * Assign rwb and add the stats callback.
> */
> - ret = rq_qos_add(q, &rwb->rqos);
> + ret = rq_qos_add(&rwb->rqos, q->disk, RQ_QOS_WBT, &wbt_rqos_ops);
^^^^^^^
disk (no?)
> if (ret)
> goto err_free;
>
> --
> 2.39.0
>
Otherwise looks good to me. Feel free to add
Reviewed-by: Andreas Herrmann <aherrmann-l3A5Bk7waGM@public.gmane.org>
--
Regards,
Andreas
SUSE Software Solutions Germany GmbH
Frankenstrasse 146, 90461 Nürnberg, Germany
GF: Ivo Totev, Andrew Myers, Andrew McDonald, Martje Boudien Moerman
(HRB 36809, AG Nürnberg)
next prev parent reply other threads:[~2023-01-20 9:57 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-17 8:12 switch blk-cgroup to work on gendisk Christoph Hellwig
[not found] ` <20230117081257.3089859-1-hch-jcswGhMUV9g@public.gmane.org>
2023-01-17 8:12 ` [PATCH 01/15] blk-cgroup: don't defer blkg_free to a workqueue Christoph Hellwig
2023-01-20 8:54 ` Andreas Herrmann
2023-01-27 6:59 ` Hannes Reinecke
[not found] ` <b4622942-67e7-969b-4439-0aea7c5bd165-l3A5Bk7waGM@public.gmane.org>
2023-01-27 7:07 ` Christoph Hellwig
2023-01-27 7:43 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 02/15] block: don't call blk_throtl_stat_add for non-READ/WRITE commands Christoph Hellwig
[not found] ` <20230117081257.3089859-3-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 8:55 ` Andreas Herrmann
2023-01-27 7:00 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 04/15] blk-cgroup: pin the gendisk in struct blkcg_gq Christoph Hellwig
[not found] ` <20230117081257.3089859-5-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 8:57 ` Andreas Herrmann
2023-01-27 7:02 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 06/15] blk-wbt: pass a gendisk to wbt_{enable,disable}_default Christoph Hellwig
2023-01-20 9:10 ` Andreas Herrmann
2023-01-27 7:04 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 07/15] blk-wbt: pass a gendisk to wbt_init Christoph Hellwig
2023-01-20 9:53 ` Andreas Herrmann
[not found] ` <20230117081257.3089859-8-hch-jcswGhMUV9g@public.gmane.org>
2023-01-27 7:05 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 08/15] blk-wbt: open code wbt_queue_depth_changed in wbt_update_limits Christoph Hellwig
2023-01-20 9:21 ` Andreas Herrmann
[not found] ` <20230117081257.3089859-9-hch-jcswGhMUV9g@public.gmane.org>
2023-01-27 7:07 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 11/15] blk-rq-qos: store a gendisk instead of request_queue in struct rq_qos Christoph Hellwig
2023-01-20 10:19 ` Andreas Herrmann
2023-01-27 7:09 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 03/15] blk-cgroup: delay blk-cgroup initialization until add_disk Christoph Hellwig
[not found] ` <20230117081257.3089859-4-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 8:56 ` Andreas Herrmann
2023-01-27 7:01 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 05/15] blk-cgroup: store a gendisk to throttle in struct task_struct Christoph Hellwig
2023-01-20 9:00 ` Andreas Herrmann
2023-01-27 7:03 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 09/15] blk-rq-qos: make rq_qos_add and rq_qos_del more useful Christoph Hellwig
[not found] ` <20230117081257.3089859-10-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 9:57 ` Andreas Herrmann [this message]
2023-01-27 7:08 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 10/15] blk-rq-qos: constify rq_qos_ops Christoph Hellwig
2023-01-20 10:01 ` Andreas Herrmann
[not found] ` <20230117081257.3089859-11-hch-jcswGhMUV9g@public.gmane.org>
2023-01-27 7:08 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 12/15] blk-cgroup: pass a gendisk to blkcg_{de,}activate_policy Christoph Hellwig
2023-01-20 10:26 ` Andreas Herrmann
2023-01-27 7:10 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 13/15] blk-cgroup: pass a gendisk to pd_alloc_fn Christoph Hellwig
[not found] ` <20230117081257.3089859-14-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 10:37 ` Andreas Herrmann
2023-01-27 7:10 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 14/15] blk-cgroup: pass a gendisk to blkg_lookup Christoph Hellwig
[not found] ` <20230117081257.3089859-15-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 11:07 ` Andreas Herrmann
2023-01-27 7:11 ` Hannes Reinecke
2023-01-17 8:12 ` [PATCH 15/15] blk-cgroup: move the cgroup information to struct gendisk Christoph Hellwig
[not found] ` <20230117081257.3089859-16-hch-jcswGhMUV9g@public.gmane.org>
2023-01-20 11:07 ` Andreas Herrmann
2023-01-27 7:12 ` Hannes Reinecke
2023-01-19 17:03 ` switch blk-cgroup to work on gendisk Tejun Heo
[not found] ` <Y8l34/qeHPLV4rKJ-NiLfg/pYEd1N0TnZuCh8vA@public.gmane.org>
2023-01-19 17:05 ` Christoph Hellwig
[not found] ` <20230119170526.GA5050-jcswGhMUV9g@public.gmane.org>
2023-01-19 17:13 ` Tejun Heo
-- strict thread matches above, loose matches on Subject: below --
2023-01-24 6:57 switch blk-cgroup to work on gendisk v2 Christoph Hellwig
2023-01-24 6:57 ` [PATCH 09/15] blk-rq-qos: make rq_qos_add and rq_qos_del more useful Christoph Hellwig
2023-01-28 0:01 ` Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Y8plg6OAa4lrnyZZ@suselix \
--to=aherrmann-l3a5bk7wagm@public.gmane.org \
--cc=axboe-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org \
--cc=cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=hch-jcswGhMUV9g@public.gmane.org \
--cc=josef-DigfWCa+lFGyeJad7bwFQA@public.gmane.org \
--cc=linux-block-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox