linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>
Cc: Al Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>,
	"Darrick J. Wong" <djwong@kernel.org>,
	linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-xfs@vger.kernel.org
Subject: [PATCH 4/9] block: turn bdev_lock into a mutex
Date: Fri,  5 May 2023 13:51:27 -0400	[thread overview]
Message-ID: <20230505175132.2236632-5-hch@lst.de> (raw)
In-Reply-To: <20230505175132.2236632-1-hch@lst.de>

There is no reason for this lock to spin, and being able to sleep under
it will come in handy soon.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bdev.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/block/bdev.c b/block/bdev.c
index f2c7181b0bba7d..bad75f6cf8edcd 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(thaw_bdev);
  * pseudo-fs
  */
 
-static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
+static  __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
 static struct kmem_cache * bdev_cachep __read_mostly;
 
 static struct inode *bdev_alloc_inode(struct super_block *sb)
@@ -457,15 +457,14 @@ long nr_blockdev_pages(void)
  *
  * Test whether @bdev can be claimed by @holder.
  *
- * CONTEXT:
- * spin_lock(&bdev_lock).
- *
  * RETURNS:
  * %true if @bdev can be claimed, %false otherwise.
  */
 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
 			 void *holder)
 {
+	lockdep_assert_held(&bdev_lock);
+
 	if (bdev->bd_holder == holder)
 		return true;	 /* already a holder */
 	else if (bdev->bd_holder != NULL)
@@ -500,10 +499,10 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder)
 	if (WARN_ON_ONCE(!holder))
 		return -EINVAL;
 retry:
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	/* if someone else claimed, fail */
 	if (!bd_may_claim(bdev, whole, holder)) {
-		spin_unlock(&bdev_lock);
+		mutex_unlock(&bdev_lock);
 		return -EBUSY;
 	}
 
@@ -513,7 +512,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder)
 		DEFINE_WAIT(wait);
 
 		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-		spin_unlock(&bdev_lock);
+		mutex_unlock(&bdev_lock);
 		schedule();
 		finish_wait(wq, &wait);
 		goto retry;
@@ -521,7 +520,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder)
 
 	/* yay, all mine */
 	whole->bd_claiming = holder;
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
@@ -547,7 +546,7 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
 {
 	struct block_device *whole = bdev_whole(bdev);
 
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	BUG_ON(!bd_may_claim(bdev, whole, holder));
 	/*
 	 * Note that for a whole device bd_holders will be incremented twice,
@@ -558,7 +557,7 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
 	bdev->bd_holders++;
 	bdev->bd_holder = holder;
 	bd_clear_claiming(whole, holder);
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 }
 
 /**
@@ -572,9 +571,9 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
  */
 void bd_abort_claiming(struct block_device *bdev, void *holder)
 {
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	bd_clear_claiming(bdev_whole(bdev), holder);
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 }
 EXPORT_SYMBOL(bd_abort_claiming);
 
@@ -587,7 +586,7 @@ static void bd_end_claim(struct block_device *bdev)
 	 * Release a claim on the device.  The holder fields are protected with
 	 * bdev_lock.  open_mutex is used to synchronize disk_holder unlinking.
 	 */
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	WARN_ON_ONCE(--bdev->bd_holders < 0);
 	WARN_ON_ONCE(--whole->bd_holders < 0);
 	if (!bdev->bd_holders) {
@@ -597,7 +596,7 @@ static void bd_end_claim(struct block_device *bdev)
 	}
 	if (!whole->bd_holders)
 		whole->bd_holder = NULL;
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 
 	/*
 	 * If this was the last claim, remove holder link and unblock evpoll if
-- 
2.39.2


  parent reply	other threads:[~2023-05-05 17:53 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-05 17:51 introduce bdev holder ops and a file system shutdown method Christoph Hellwig
2023-05-05 17:51 ` [PATCH 1/9] block: consolidate the shutdown logic in blk_mark_disk_dead and del_gendisk Christoph Hellwig
2023-05-07 19:08   ` Jan Kara
2023-05-09 13:35     ` Christoph Hellwig
2023-05-05 17:51 ` [PATCH 2/9] block: avoid repeated work in blk_mark_disk_dead Christoph Hellwig
2023-05-07 19:05   ` Jan Kara
2023-05-16 16:29   ` Christian Brauner
2023-05-05 17:51 ` [PATCH 3/9] block: factor out a bd_end_claim helper from blkdev_put Christoph Hellwig
2023-05-07 19:08   ` Jan Kara
2023-05-16 16:29   ` Christian Brauner
2023-05-05 17:51 ` Christoph Hellwig [this message]
2023-05-07 19:09   ` [PATCH 4/9] block: turn bdev_lock into a mutex Jan Kara
2023-05-16 16:24   ` Christian Brauner
2023-05-05 17:51 ` [PATCH 5/9] block: introduce holder ops Christoph Hellwig
2023-05-05 18:51   ` Darrick J. Wong
2023-05-09 13:35     ` Christoph Hellwig
2023-05-09 22:19       ` Dave Chinner
2023-05-10  1:38         ` Darrick J. Wong
2023-05-10 15:13         ` Christoph Hellwig
2023-05-07 19:12   ` Jan Kara
2023-05-16 11:02   ` Ming Lei
2023-05-16 14:36     ` Darrick J. Wong
2023-05-17  7:29       ` Christoph Hellwig
2023-05-16 16:00   ` Christian Brauner
2023-05-17  7:30     ` Christoph Hellwig
2023-05-17  7:57       ` Christian Brauner
2023-05-17  8:06         ` Christoph Hellwig
2023-05-17  8:42           ` Christian Brauner
2023-05-17 12:02             ` Christoph Hellwig
2023-05-17 13:14               ` Christian Brauner
2023-05-17 14:26                 ` Christoph Hellwig
2023-05-18  8:13                   ` Christian Brauner
2023-05-18 13:12                     ` Christoph Hellwig
2023-05-18 13:13                       ` Christoph Hellwig
2023-05-18 13:56                       ` Christian Brauner
2023-05-05 17:51 ` [PATCH 6/9] block: add a mark_dead holder operation Christoph Hellwig
2023-05-05 18:37   ` Darrick J. Wong
2023-05-09 13:30     ` Christoph Hellwig
2023-05-07 19:19   ` Jan Kara
2023-05-09 13:32     ` Christoph Hellwig
2023-05-16 16:17       ` Christian Brauner
2023-05-05 17:51 ` [PATCH 7/9] fs: add a method to shut down the file system Christoph Hellwig
2023-05-05 18:39   ` Darrick J. Wong
2023-05-07 19:20   ` Jan Kara
2023-05-16 16:20   ` Christian Brauner
2023-05-17  7:27     ` Christoph Hellwig
2023-05-05 17:51 ` [PATCH 8/9] xfs: wire up sops->shutdown Christoph Hellwig
2023-05-05 18:23   ` Darrick J. Wong
2023-05-09 13:28     ` Christoph Hellwig
2023-05-05 17:51 ` [PATCH 9/9] xfs: wire up the ->mark_dead holder operation for log and RT devices Christoph Hellwig
2023-05-05 18:32   ` Darrick J. Wong
2023-05-08 15:20 ` introduce bdev holder ops and a file system shutdown method Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230505175132.2236632-5-hch@lst.de \
    --to=hch@lst.de \
    --cc=axboe@kernel.dk \
    --cc=brauner@kernel.org \
    --cc=djwong@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).