public inbox for linux-block@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sbitmap: fix batched wait_cnt accounting
@ 2022-08-24 17:00 Keith Busch
  2022-08-24 19:30 ` Keith Busch
  0 siblings, 1 reply; 2+ messages in thread
From: Keith Busch @ 2022-08-24 17:00 UTC (permalink / raw)
  To: axboe, linux-block; +Cc: Keith Busch

From: Keith Busch <kbusch@kernel.org>

Batched completions can clear multiple bits, but we're only decrementing
the wait_cnt by one each time. This can cause waiters to never be woken,
stalling IO. Use the batched account instead.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=215679
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 block/blk-mq-tag.c      |  2 +-
 include/linux/sbitmap.h |  3 ++-
 lib/sbitmap.c           | 23 ++++++++++++-----------
 3 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 8e3b36d1cb57..9eb968e14d31 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 		 * other allocations on previous queue won't be starved.
 		 */
 		if (bt != bt_prev)
-			sbitmap_queue_wake_up(bt_prev);
+			sbitmap_queue_wake_up(bt_prev, 1);
 
 		ws = bt_wait_ptr(bt, data->hctx);
 	} while (1);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 8f5a86e210b9..3062ce6534fb 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -575,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
  * on a &struct sbitmap_queue.
  * @sbq: Bitmap queue to wake up.
+ * @nr: Number of tags being released.
  */
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
 
 /**
  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 1f31147872e6..b1fd7cab6f1a 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -600,17 +600,20 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
 	return NULL;
 }
 
-static bool __sbq_wake_up(struct sbitmap_queue *sbq)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
 {
 	struct sbq_wait_state *ws;
-	unsigned int wake_batch;
-	int wait_cnt;
+	int wake_batch, wait_cnt, sub;
 
 	ws = sbq_wake_ptr(sbq);
-	if (!ws)
+	if (!ws || !(*nr))
 		return false;
 
-	wait_cnt = atomic_dec_return(&ws->wait_cnt);
+	wake_batch = READ_ONCE(sbq->wake_batch);
+	sub = min3(wake_batch, *nr, atomic_read(&ws->wait_cnt));
+	wait_cnt = atomic_sub_return(sub, &ws->wait_cnt);
+	*nr -= sub;
+
 	/*
 	 * For concurrent callers of this, callers should call this function
 	 * again to wakeup a new batch on a different 'ws'.
@@ -621,7 +624,6 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 	if (wait_cnt > 0)
 		return false;
 
-	wake_batch = READ_ONCE(sbq->wake_batch);
 
 	/*
 	 * Wake up first in case that concurrent callers decrease wait_cnt
@@ -652,12 +654,11 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 	return false;
 }
 
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
 {
-	while (__sbq_wake_up(sbq))
+	while (__sbq_wake_up(sbq, &nr))
 		;
 }
-EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 
 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
 {
@@ -694,7 +695,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
 		atomic_long_andnot(mask, (atomic_long_t *) addr);
 
 	smp_mb__after_atomic();
-	sbitmap_queue_wake_up(sbq);
+	sbitmap_queue_wake_up(sbq, nr_tags);
 	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
 					tags[nr_tags - 1] - offset);
 }
@@ -722,7 +723,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 	 * waiter. See the comment on waitqueue_active().
 	 */
 	smp_mb__after_atomic();
-	sbitmap_queue_wake_up(sbq);
+	sbitmap_queue_wake_up(sbq, 1);
 	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] sbitmap: fix batched wait_cnt accounting
  2022-08-24 17:00 [PATCH] sbitmap: fix batched wait_cnt accounting Keith Busch
@ 2022-08-24 19:30 ` Keith Busch
  0 siblings, 0 replies; 2+ messages in thread
From: Keith Busch @ 2022-08-24 19:30 UTC (permalink / raw)
  To: Keith Busch; +Cc: axboe, linux-block

On Wed, Aug 24, 2022 at 10:00:23AM -0700, Keith Busch wrote:
> +static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
>  {
>  	struct sbq_wait_state *ws;
> -	unsigned int wake_batch;
> -	int wait_cnt;
> +	int wake_batch, wait_cnt, sub;
>  
>  	ws = sbq_wake_ptr(sbq);
> -	if (!ws)
> +	if (!ws || !(*nr))
>  		return false;
>  
> -	wait_cnt = atomic_dec_return(&ws->wait_cnt);
> +	wake_batch = READ_ONCE(sbq->wake_batch);
> +	sub = min3(wake_batch, *nr, atomic_read(&ws->wait_cnt));
> +	wait_cnt = atomic_sub_return(sub, &ws->wait_cnt);
> +	*nr -= sub;
> +
>  	/*
>  	 * For concurrent callers of this, callers should call this function
>  	 * again to wakeup a new batch on a different 'ws'.

I'll need to send a new version. The code expects 'wait_cnt' to be 0 in order
to wake up waiters, but if two batched completions have different amounts of
cleared bits, one thread may see > 0, the other may see < 0, and no one will
make progress. 

I think we may need to do atomic_dec_return() in a loop so that a completer is
guaranteed to eventually see '0'.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-08-24 19:31 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-08-24 17:00 [PATCH] sbitmap: fix batched wait_cnt accounting Keith Busch
2022-08-24 19:30 ` Keith Busch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox