public inbox for linux-bcachefs@vger.kernel.org
 help / color / mirror / Atom feed
From: Alan Huang <mmpgouride@gmail.com>
To: kent.overstreet@linux.dev
Cc: linux-bcachefs@vger.kernel.org, Alan Huang <mmpgouride@gmail.com>
Subject: [PATCH 2/4] bcachefs: mark_btree_node_locked_noreset -> mark_btree_node_locked_reset
Date: Tue, 15 Apr 2025 13:33:05 +0800	[thread overview]
Message-ID: <20250415053307.81354-3-mmpgouride@gmail.com> (raw)
In-Reply-To: <20250415053307.81354-1-mmpgouride@gmail.com>

The semantic is reset now, rename the function to reflect that.

Signed-off-by: Alan Huang <mmpgouride@gmail.com>
---
 fs/bcachefs/btree_key_cache.c       |  6 +++---
 fs/bcachefs/btree_locking.c         |  6 +++---
 fs/bcachefs/btree_locking.h         | 10 +++++-----
 fs/bcachefs/btree_update_interior.c |  4 ++--
 4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 2b186584a291..c7ad01c1355c 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -240,7 +240,7 @@ static int btree_key_cache_create(struct btree_trans *trans,
 	ck->flags		= 1U << BKEY_CACHED_ACCESSED;
 
 	if (unlikely(key_u64s > ck->u64s)) {
-		mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED);
+		mark_btree_node_locked_reset(ck_path, 0, BTREE_NODE_UNLOCKED);
 
 		struct bkey_i *new_k = allocate_dropping_locks(trans, ret,
 				kmalloc(key_u64s * sizeof(u64), _gfp));
@@ -282,7 +282,7 @@ static int btree_key_cache_create(struct btree_trans *trans,
 	return 0;
 err:
 	bkey_cached_free(bc, ck);
-	mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED);
+	mark_btree_node_locked_reset(ck_path, 0, BTREE_NODE_UNLOCKED);
 
 	return ret;
 }
@@ -500,7 +500,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
 			atomic_long_dec(&c->btree_key_cache.nr_dirty);
 		}
 
-		mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
+		mark_btree_node_locked_reset(path, 0, BTREE_NODE_UNLOCKED);
 		if (bkey_cached_evict(&c->btree_key_cache, ck)) {
 			bkey_cached_free(&c->btree_key_cache, ck);
 		} else {
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index f4f563944340..71dbac0bcd58 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -435,7 +435,7 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p
 	six_lock_readers_add(&b->lock, readers);
 
 	if (ret)
-		mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
+		mark_btree_node_locked_reset(path, b->level, BTREE_NODE_INTENT_LOCKED);
 
 	return ret;
 }
@@ -564,7 +564,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
 	trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
 	return false;
 success:
-	mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
+	mark_btree_node_locked_reset(path, level, BTREE_NODE_INTENT_LOCKED);
 	return true;
 }
 
@@ -693,7 +693,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
 		} else {
 			if (btree_node_intent_locked(path, l)) {
 				six_lock_downgrade(&path->l[l].b->c.lock);
-				mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
+				mark_btree_node_locked_reset(path, l, BTREE_NODE_READ_LOCKED);
 			}
 			break;
 		}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 66b27c0853a5..8978f7969bef 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -63,7 +63,7 @@ static inline bool btree_node_locked(struct btree_path *path, unsigned level)
 	return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
 }
 
-static inline void mark_btree_node_locked_noreset(struct btree_path *path,
+static inline void mark_btree_node_locked_reset(struct btree_path *path,
 						  unsigned level,
 						  enum btree_node_locked_type type)
 {
@@ -80,7 +80,7 @@ static inline void mark_btree_node_locked(struct btree_trans *trans,
 					  unsigned level,
 					  enum btree_node_locked_type type)
 {
-	mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
+	mark_btree_node_locked_reset(path, level, (enum btree_node_locked_type) type);
 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
 	path->l[level].lock_taken_time = local_clock();
 #endif
@@ -134,7 +134,7 @@ static inline void btree_node_unlock(struct btree_trans *trans,
 		}
 		six_unlock_type(&path->l[level].b->c.lock, lock_type);
 		btree_trans_lock_hold_time_update(trans, path, level);
-		mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
+		mark_btree_node_locked_reset(path, level, BTREE_NODE_UNLOCKED);
 	}
 }
 
@@ -183,7 +183,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
 	EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
 	EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
 
-	mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
+	mark_btree_node_locked_reset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
 	__bch2_btree_node_unlock_write(trans, b);
 }
 
@@ -315,7 +315,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans,
 	 * write lock: thus, we need to tell the cycle detector we have a write
 	 * lock _before_ taking the lock:
 	 */
-	mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
+	mark_btree_node_locked_reset(path, b->level, BTREE_NODE_WRITE_LOCKED);
 
 	return likely(six_trylock_write(&b->lock))
 		? 0
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 55fbeeb8eaaa..29e03408a019 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -245,7 +245,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
 	mutex_unlock(&c->btree_cache.lock);
 
 	six_unlock_write(&b->c.lock);
-	mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
+	mark_btree_node_locked_reset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
 
 	bch2_trans_node_drop(trans, b);
 }
@@ -788,7 +788,7 @@ static void btree_update_nodes_written(struct btree_update *as)
 
 		mutex_unlock(&c->btree_interior_update_lock);
 
-		mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
+		mark_btree_node_locked_reset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
 		six_unlock_write(&b->c.lock);
 
 		btree_node_write_if_need(trans, b, SIX_LOCK_intent);
-- 
2.48.1


  parent reply	other threads:[~2025-04-15  5:33 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-15  5:33 [PATCH 0/4] bcachefs: Random cleanup Alan Huang
2025-04-15  5:33 ` [PATCH 1/4] bcachefs: Kill bch2_trans_unlock_noassert Alan Huang
2025-04-15 14:34   ` Kent Overstreet
2025-04-15  5:33 ` Alan Huang [this message]
2025-04-15 14:34   ` [PATCH 2/4] bcachefs: mark_btree_node_locked_noreset -> mark_btree_node_locked_reset Kent Overstreet
2025-04-15 14:40     ` Alan Huang
2025-04-15 15:10       ` Kent Overstreet
2025-04-15  5:33 ` [PATCH 3/4] bcachefs: Remove spurious +1/-1 operation Alan Huang
2025-04-15 14:35   ` Kent Overstreet
2025-04-15  5:33 ` [PATCH 4/4] bcachefs: Simplify logic Alan Huang
2025-04-15 14:36   ` Kent Overstreet

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250415053307.81354-3-mmpgouride@gmail.com \
    --to=mmpgouride@gmail.com \
    --cc=kent.overstreet@linux.dev \
    --cc=linux-bcachefs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox