linux-btrfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Josef Bacik <josef@toxicpanda.com>
To: linux-btrfs@vger.kernel.org
Subject: [PATCH 05/25] btrfs: migrate nocow and reservation helpers
Date: Thu, 20 Jun 2019 15:37:47 -0400	[thread overview]
Message-ID: <20190620193807.29311-6-josef@toxicpanda.com> (raw)
In-Reply-To: <20190620193807.29311-1-josef@toxicpanda.com>

These are relatively straightforward as well.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/block-group.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/block-group.h |  6 ++++
 fs/btrfs/ctree.h       |  6 ----
 fs/btrfs/extent-tree.c | 82 -------------------------------------------------
 4 files changed, 89 insertions(+), 88 deletions(-)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index b15d7070bcfd..aeb2c806b2b0 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -4,6 +4,7 @@
  */
 #include "ctree.h"
 #include "block-group.h"
+#include "space-info.h"
 
 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 {
@@ -121,3 +122,85 @@ btrfs_next_block_group(struct btrfs_block_group_cache *cache)
 	spin_unlock(&fs_info->block_group_cache_lock);
 	return cache;
 }
+
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+	struct btrfs_block_group_cache *bg;
+	bool ret = true;
+
+	bg = btrfs_lookup_block_group(fs_info, bytenr);
+	if (!bg)
+		return false;
+
+	spin_lock(&bg->lock);
+	if (bg->ro)
+		ret = false;
+	else
+		atomic_inc(&bg->nocow_writers);
+	spin_unlock(&bg->lock);
+
+	/* no put on block group, done by btrfs_dec_nocow_writers */
+	if (!ret)
+		btrfs_put_block_group(bg);
+
+	return ret;
+
+}
+
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+	struct btrfs_block_group_cache *bg;
+
+	bg = btrfs_lookup_block_group(fs_info, bytenr);
+	ASSERT(bg);
+	if (atomic_dec_and_test(&bg->nocow_writers))
+		wake_up_var(&bg->nocow_writers);
+	/*
+	 * Once for our lookup and once for the lookup done by a previous call
+	 * to btrfs_inc_nocow_writers()
+	 */
+	btrfs_put_block_group(bg);
+	btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
+{
+	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
+}
+
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+					const u64 start)
+{
+	struct btrfs_block_group_cache *bg;
+
+	bg = btrfs_lookup_block_group(fs_info, start);
+	ASSERT(bg);
+	if (atomic_dec_and_test(&bg->reservations))
+		wake_up_var(&bg->reservations);
+	btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
+{
+	struct btrfs_space_info *space_info = bg->space_info;
+
+	ASSERT(bg->ro);
+
+	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
+		return;
+
+	/*
+	 * Our block group is read only but before we set it to read only,
+	 * some task might have had allocated an extent from it already, but it
+	 * has not yet created a respective ordered extent (and added it to a
+	 * root's list of ordered extents).
+	 * Therefore wait for any task currently allocating extents, since the
+	 * block group's reservations counter is incremented while a read lock
+	 * on the groups' semaphore is held and decremented after releasing
+	 * the read access on that semaphore and creating the ordered extent.
+	 */
+	down_write(&space_info->groups_sem);
+	up_write(&space_info->groups_sem);
+
+	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
+}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index ddd91c7ed44a..bc2ed52210a3 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -161,5 +161,11 @@ struct btrfs_block_group_cache *
 btrfs_next_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+					 const u64 start);
+void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
 
 #endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4c6e643bc65d..c4ae6714e3d4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2460,12 +2460,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
 	return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
 }
 
-void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
-					 const u64 start);
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 			   unsigned long count);
 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 01a45674382e..63b594532b92 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3560,51 +3560,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
 	return readonly;
 }
 
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
-	struct btrfs_block_group_cache *bg;
-	bool ret = true;
-
-	bg = btrfs_lookup_block_group(fs_info, bytenr);
-	if (!bg)
-		return false;
-
-	spin_lock(&bg->lock);
-	if (bg->ro)
-		ret = false;
-	else
-		atomic_inc(&bg->nocow_writers);
-	spin_unlock(&bg->lock);
-
-	/* no put on block group, done by btrfs_dec_nocow_writers */
-	if (!ret)
-		btrfs_put_block_group(bg);
-
-	return ret;
-
-}
-
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
-	struct btrfs_block_group_cache *bg;
-
-	bg = btrfs_lookup_block_group(fs_info, bytenr);
-	ASSERT(bg);
-	if (atomic_dec_and_test(&bg->nocow_writers))
-		wake_up_var(&bg->nocow_writers);
-	/*
-	 * Once for our lookup and once for the lookup done by a previous call
-	 * to btrfs_inc_nocow_writers()
-	 */
-	btrfs_put_block_group(bg);
-	btrfs_put_block_group(bg);
-}
-
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
-{
-	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
-}
-
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
 	u64 extra_flags = chunk_to_extended(flags) &
@@ -4279,43 +4234,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
 	atomic_inc(&bg->reservations);
 }
 
-void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
-					const u64 start)
-{
-	struct btrfs_block_group_cache *bg;
-
-	bg = btrfs_lookup_block_group(fs_info, start);
-	ASSERT(bg);
-	if (atomic_dec_and_test(&bg->reservations))
-		wake_up_var(&bg->reservations);
-	btrfs_put_block_group(bg);
-}
-
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
-{
-	struct btrfs_space_info *space_info = bg->space_info;
-
-	ASSERT(bg->ro);
-
-	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
-		return;
-
-	/*
-	 * Our block group is read only but before we set it to read only,
-	 * some task might have had allocated an extent from it already, but it
-	 * has not yet created a respective ordered extent (and added it to a
-	 * root's list of ordered extents).
-	 * Therefore wait for any task currently allocating extents, since the
-	 * block group's reservations counter is incremented while a read lock
-	 * on the groups' semaphore is held and decremented after releasing
-	 * the read access on that semaphore and creating the ordered extent.
-	 */
-	down_write(&space_info->groups_sem);
-	up_write(&space_info->groups_sem);
-
-	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
-}
-
 /**
  * btrfs_add_reserved_bytes - update the block_group and space info counters
  * @cache:	The cache we are manipulating
-- 
2.14.3


  parent reply	other threads:[~2019-06-20 19:38 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-20 19:37 [PATCH 00/25] btrfs: migrate the block group code Josef Bacik
2019-06-20 19:37 ` [PATCH 01/25] btrfs: move btrfs_add_free_space out of a header file Josef Bacik
2019-06-20 19:37 ` [PATCH 02/25] btrfs: move basic block_group definitions to their own header Josef Bacik
2019-06-20 19:37 ` [PATCH 03/25] btrfs: migrate the block group lookup code Josef Bacik
2019-06-20 19:37 ` [PATCH 04/25] btrfs: migrate the block group ref counting stuff Josef Bacik
2019-06-20 19:37 ` Josef Bacik [this message]
2019-06-20 19:37 ` [PATCH 06/25] btrfs: export the block group caching helpers Josef Bacik
2019-06-20 19:37 ` [PATCH 07/25] btrfs: export the excluded extents helpers Josef Bacik
2019-06-20 19:37 ` [PATCH 08/25] btrfs: export the caching control helpers Josef Bacik
2019-06-20 19:37 ` [PATCH 09/25] btrfs: temporarily export fragment_free_space Josef Bacik
2019-06-20 19:37 ` [PATCH 10/25] btrfs: make caching_thread use btrfs_find_next_key Josef Bacik
2019-06-20 19:37 ` [PATCH 11/25] btrfs: migrate the block group caching code Josef Bacik
2019-06-20 19:37 ` [PATCH 12/25] btrfs: temporarily export inc_block_group_ro Josef Bacik
2019-06-20 19:37 ` [PATCH 13/25] btrfs: migrate the block group removal code Josef Bacik
2019-06-20 19:37 ` [PATCH 14/25] btrfs: export get_alloc_profile Josef Bacik
2019-06-20 19:37 ` [PATCH 15/25] btrfs: migrate the block group read/creation code Josef Bacik
2019-06-20 19:37 ` [PATCH 16/25] btrfs: temporarily export btrfs_get_restripe_target Josef Bacik
2019-06-20 19:37 ` [PATCH 17/25] btrfs: migrate inc/dec_block_group_ro code Josef Bacik
2019-06-20 19:38 ` [PATCH 18/25] btrfs: migrate the dirty bg writeout code Josef Bacik
2019-06-20 19:38 ` [PATCH 19/25] btrfs: export block group accounting helpers Josef Bacik
2019-06-20 19:38 ` [PATCH 20/25] btrfs: migrate the block group space " Josef Bacik
2019-06-20 19:38 ` [PATCH 21/25] btrfs: migrate btrfs_can_relocate Josef Bacik
2019-06-20 19:38 ` [PATCH 22/25] btrfs: migrate the chunk allocation code Josef Bacik
2019-06-20 19:38 ` [PATCH 23/25] btrfs: migrate the alloc_profile helpers Josef Bacik
2019-06-20 19:38 ` [PATCH 24/25] btrfs: migrate the block group cleanup code Josef Bacik
2019-06-20 19:38 ` [PATCH 25/25] btrfs: unexport the temporary exported functions Josef Bacik
2019-08-02 13:56 ` [PATCH 00/25] btrfs: migrate the block group code David Sterba
2019-08-02 14:07   ` Josef Bacik
2019-08-02 18:05     ` David Sterba
2019-08-06 16:43   ` David Sterba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190620193807.29311-6-josef@toxicpanda.com \
    --to=josef@toxicpanda.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).