linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ming Lei <ming.lei@redhat.com>
To: Yu Kuai <yukuai@fnnas.com>
Cc: axboe@kernel.dk, linux-block@vger.kernel.org, tj@kernel.org,
	nilay@linux.ibm.com
Subject: Re: [PATCH v5 02/13] blk-wbt: fix possible deadlock to nest pcpu_alloc_mutex under q_usage_counter
Date: Tue, 16 Dec 2025 19:03:49 +0800	[thread overview]
Message-ID: <aUE8lSlWa6DnCYMa@fedora> (raw)
In-Reply-To: <20251214101409.1723751-3-yukuai@fnnas.com>

On Sun, Dec 14, 2025 at 06:13:57PM +0800, Yu Kuai wrote:
> If wbt is disabled by default and user configures wbt by sysfs, queue
> will be frozen first and then pcpu_alloc_mutex will be held in
> blk_stat_alloc_callback().
> 
> Fix this problem by allocating memory first before queue frozen.
> 
> Signed-off-by: Yu Kuai <yukuai@fnnas.com>
> ---
>  block/blk-wbt.c | 106 ++++++++++++++++++++++++++++--------------------
>  1 file changed, 61 insertions(+), 45 deletions(-)
> 
> diff --git a/block/blk-wbt.c b/block/blk-wbt.c
> index ba807649b29a..696baa681717 100644
> --- a/block/blk-wbt.c
> +++ b/block/blk-wbt.c
> @@ -93,7 +93,7 @@ struct rq_wb {
>  	struct rq_depth rq_depth;
>  };
>  
> -static int wbt_init(struct gendisk *disk);
> +static int wbt_init(struct gendisk *disk, struct rq_wb *rwb);
>  
>  static inline struct rq_wb *RQWB(struct rq_qos *rqos)
>  {
> @@ -698,6 +698,41 @@ static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
>  	}
>  }
>  
> +static int wbt_data_dir(const struct request *rq)
> +{
> +	const enum req_op op = req_op(rq);
> +
> +	if (op == REQ_OP_READ)
> +		return READ;
> +	else if (op_is_write(op))
> +		return WRITE;
> +
> +	/* don't account */
> +	return -1;
> +}
> +
> +static struct rq_wb *wbt_alloc(void)
> +{
> +	struct rq_wb *rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
> +
> +	if (!rwb)
> +		return NULL;
> +
> +	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
> +	if (!rwb->cb) {
> +		kfree(rwb);
> +		return NULL;
> +	}
> +
> +	return rwb;
> +}
> +
> +static void wbt_free(struct rq_wb *rwb)
> +{
> +	blk_stat_free_callback(rwb->cb);
> +	kfree(rwb);
> +}
> +
>  /*
>   * Enable wbt if defaults are configured that way
>   */
> @@ -726,8 +761,15 @@ void wbt_enable_default(struct gendisk *disk)
>  	if (!blk_queue_registered(q))
>  		return;
>  
> -	if (queue_is_mq(q) && enable)
> -		wbt_init(disk);
> +	if (queue_is_mq(q) && enable) {
> +		struct rq_wb *rwb = wbt_alloc();
> +
> +		if (WARN_ON_ONCE(!rwb))
> +			return;
> +
> +		if (WARN_ON_ONCE(wbt_init(disk, rwb)))
> +			wbt_free(rwb);
> +	}
>  }
>  EXPORT_SYMBOL_GPL(wbt_enable_default);
>  
> @@ -743,19 +785,6 @@ static u64 wbt_default_latency_nsec(struct request_queue *q)
>  		return 75000000ULL;
>  }
>  
> -static int wbt_data_dir(const struct request *rq)
> -{
> -	const enum req_op op = req_op(rq);
> -
> -	if (op == REQ_OP_READ)
> -		return READ;
> -	else if (op_is_write(op))
> -		return WRITE;
> -
> -	/* don't account */
> -	return -1;
> -}
> -
>  static void wbt_queue_depth_changed(struct rq_qos *rqos)
>  {
>  	RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue);
> @@ -767,8 +796,7 @@ static void wbt_exit(struct rq_qos *rqos)
>  	struct rq_wb *rwb = RQWB(rqos);
>  
>  	blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
> -	blk_stat_free_callback(rwb->cb);
> -	kfree(rwb);
> +	wbt_free(rwb);
>  }
>  
>  /*
> @@ -892,22 +920,11 @@ static const struct rq_qos_ops wbt_rqos_ops = {
>  #endif
>  };
>  
> -static int wbt_init(struct gendisk *disk)
> +static int wbt_init(struct gendisk *disk, struct rq_wb *rwb)
>  {
>  	struct request_queue *q = disk->queue;
> -	struct rq_wb *rwb;
> -	int i;
>  	int ret;
> -
> -	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
> -	if (!rwb)
> -		return -ENOMEM;
> -
> -	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
> -	if (!rwb->cb) {
> -		kfree(rwb);
> -		return -ENOMEM;
> -	}
> +	int i;
>  
>  	for (i = 0; i < WBT_NUM_RWQ; i++)
>  		rq_wait_init(&rwb->rq_wait[i]);
> @@ -927,38 +944,38 @@ static int wbt_init(struct gendisk *disk)
>  	ret = rq_qos_add(&rwb->rqos, disk, RQ_QOS_WBT, &wbt_rqos_ops);
>  	mutex_unlock(&q->rq_qos_mutex);
>  	if (ret)
> -		goto err_free;
> +		return ret;
>  
>  	blk_stat_add_callback(q, rwb->cb);
> -
>  	return 0;
> -
> -err_free:
> -	blk_stat_free_callback(rwb->cb);
> -	kfree(rwb);
> -	return ret;
> -
>  }
>  
>  int wbt_set_lat(struct gendisk *disk, s64 val)
>  {
>  	struct request_queue *q = disk->queue;
> +	struct rq_qos *rqos = wbt_rq_qos(q);
> +	struct rq_wb *rwb = NULL;
>  	unsigned int memflags;
> -	struct rq_qos *rqos;
>  	int ret = 0;
>  
> +	if (!rqos) {
> +		rwb = wbt_alloc();
> +		if (!rwb)
> +			return -ENOMEM;
> +	}
> +
>  	/*
>  	 * Ensure that the queue is idled, in case the latency update
>  	 * ends up either enabling or disabling wbt completely. We can't
>  	 * have IO inflight if that happens.
>  	 */
>  	memflags = blk_mq_freeze_queue(q);
> -
> -	rqos = wbt_rq_qos(q);
>  	if (!rqos) {
> -		ret = wbt_init(disk);
> -		if (ret)
> +		ret = wbt_init(disk, rwb);
> +		if (ret) {
> +			wbt_free(rwb);
>  			goto out;
> +		}

Here it actually depends on patch "block: fix race between wbt_enable_default and IO submission"
which kills wbt_init() from bfq & iocost code path, otherwise you may have
to handle -EBUSY from wbt_init().

With the mentioned patch, this fix looks fine:

Reviewed-by: Ming Lei <ming.lei@redhat.com>


Thanks,
Ming


  reply	other threads:[~2025-12-16 11:04 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-14 10:13 [PATCH v5 00/13] blk-mq: fix possible deadlocks Yu Kuai
2025-12-14 10:13 ` [PATCH v5 01/13] blk-wbt: factor out a helper wbt_set_lat() Yu Kuai
2025-12-16 10:19   ` Ming Lei
2025-12-18 14:18   ` Nilay Shroff
2025-12-14 10:13 ` [PATCH v5 02/13] blk-wbt: fix possible deadlock to nest pcpu_alloc_mutex under q_usage_counter Yu Kuai
2025-12-16 11:03   ` Ming Lei [this message]
2025-12-16 11:29     ` Yu Kuai
2025-12-18 14:20   ` Nilay Shroff
2025-12-14 10:13 ` [PATCH v5 03/13] blk-mq-debugfs: factor out a helper to register debugfs for all rq_qos Yu Kuai
2025-12-18 11:06   ` Ming Lei
2025-12-18 14:28   ` Nilay Shroff
2025-12-14 10:13 ` [PATCH v5 04/13] blk-rq-qos: fix possible debugfs_mutex deadlock Yu Kuai
2025-12-18 11:11   ` Ming Lei
2025-12-18 14:33   ` Nilay Shroff
2025-12-14 10:14 ` [PATCH v5 05/13] blk-mq-debugfs: make blk_mq_debugfs_register_rqos() static Yu Kuai
2025-12-18 14:52   ` Nilay Shroff
2025-12-25  9:30     ` Yu Kuai
2025-12-14 10:14 ` [PATCH v5 06/13] blk-mq-debugfs: warn about possible deadlock Yu Kuai
2025-12-18 18:26   ` Nilay Shroff
2025-12-25  9:37     ` Yu Kuai
2025-12-14 10:14 ` [PATCH v5 07/13] blk-throttle: convert to GFP_NOIO in blk_throtl_init() Yu Kuai
2025-12-14 10:14 ` [PATCH v5 08/13] block/blk-rq-qos: add a new helper rq_qos_add_frozen() Yu Kuai
2025-12-14 10:14 ` [PATCH v5 09/13] blk-wbt: fix incorrect lock order for rq_qos_mutex and freeze queue Yu Kuai
2025-12-14 10:14 ` [PATCH v5 10/13] blk-iocost: " Yu Kuai
2025-12-14 10:14 ` [PATCH v5 11/13] blk-iolatency: " Yu Kuai
2025-12-14 10:14 ` [PATCH v5 12/13] blk-throttle: remove useless queue frozen Yu Kuai
2025-12-14 10:14 ` [PATCH v5 13/13] block/blk-rq-qos: cleanup rq_qos_add() Yu Kuai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aUE8lSlWa6DnCYMa@fedora \
    --to=ming.lei@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=linux-block@vger.kernel.org \
    --cc=nilay@linux.ibm.com \
    --cc=tj@kernel.org \
    --cc=yukuai@fnnas.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).