public inbox for linux-bcachefs@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] bcachefs: Refactor bch2_bset_fix_lookup_table
@ 2024-08-16  4:40 Alan Huang
  0 siblings, 0 replies; only message in thread
From: Alan Huang @ 2024-08-16  4:40 UTC (permalink / raw)
  To: kent.overstreet; +Cc: linux-bcachefs, Alan Huang

bch2_bset_fix_lookup_table is too complicated to be easily understood,
the comment "l now > where" there is also incorrect when where ==
t->end_offset. This patch therefore refactor the function, the idea is
that when where >= rw_aux_tree(b, t)[t->size - 1].offset, we don't need
to adjust the rw aux tree.

Signed-off-by: Alan Huang <mmpgouride@gmail.com>
---
 fs/bcachefs/bset.c | 116 +++++++++++++++++++++++++--------------------
 1 file changed, 65 insertions(+), 51 deletions(-)

diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 1b66b2c7e018..c5fffed88c46 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -885,6 +885,34 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
 
 /* Insert */
 
+static void rw_aux_tree_build_entry(struct btree *b,
+				    struct bset_tree *t,
+				    struct bkey_packed *start,
+				    struct bkey_packed *end,
+				    unsigned l)
+{
+	if (t->size < bset_rw_tree_capacity(b, t) &&
+	    (void *) end - (void *) start > L1_CACHE_BYTES) {
+		struct bkey_packed *k = start;
+
+		while (1) {
+			k = bkey_p_next(k);
+			if (k == end)
+				break;
+
+			if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
+				memmove(&rw_aux_tree(b, t)[l + 1],
+					&rw_aux_tree(b, t)[l],
+					(void *) &rw_aux_tree(b, t)[t->size] -
+					(void *) &rw_aux_tree(b, t)[l]);
+				t->size++;
+				rw_aux_tree_set(b, t, l, k);
+				break;
+			}
+		}
+	}
+}
+
 static void bch2_bset_fix_lookup_table(struct btree *b,
 				       struct bset_tree *t,
 				       struct bkey_packed *_where,
@@ -899,34 +927,42 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
 	if (!bset_has_rw_aux_tree(t))
 		return;
 
+	if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
+build_entry:
+		rw_aux_tree_build_entry(b, t,
+					rw_aux_to_bkey(b, t, t->size - 1),
+					btree_bkey_last(b, t),
+					t->size);
+		return;
+	}
+
 	/* returns first entry >= where */
 	l = rw_aux_tree_bsearch(b, t, where);
 
-	if (!l) /* never delete first entry */
-		l++;
-	else if (l < t->size &&
-		 where < t->end_offset &&
-		 rw_aux_tree(b, t)[l].offset == where)
-		rw_aux_tree_set(b, t, l++, _where);
+	if (rw_aux_tree(b, t)[l].offset == where) {
+		if (!l) { /* never delete first entry */
+			l++;
+		} else if (where < t->end_offset) {
+			rw_aux_tree_set(b, t, l++, _where);
+		} else {
+			/* where == t->end_offset */
+			EBUG_ON(where > t->end_offset);
+			t->size--;
+			goto build_entry;
+		}
+	}
 
 	/* l now > where */
-
-	for (j = l;
-	     j < t->size &&
-	     rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
-	     j++)
-		;
-
-	if (j < t->size &&
-	    rw_aux_tree(b, t)[j].offset + shift ==
-	    rw_aux_tree(b, t)[l - 1].offset)
-		j++;
-
-	memmove(&rw_aux_tree(b, t)[l],
-		&rw_aux_tree(b, t)[j],
-		(void *) &rw_aux_tree(b, t)[t->size] -
-		(void *) &rw_aux_tree(b, t)[j]);
-	t->size -= j - l;
+	EBUG_ON(l < t->size && rw_aux_tree(b, t)[l].offset <= where);
+	if (l < t->size &&
+	    rw_aux_tree(b, t)[l].offset + shift ==
+	    rw_aux_tree(b, t)[l - 1].offset) {
+		memmove(&rw_aux_tree(b, t)[l],
+			&rw_aux_tree(b, t)[l + 1],
+			(void *) &rw_aux_tree(b, t)[t->size] -
+			(void *) &rw_aux_tree(b, t)[l + 1]);
+		t->size -= 1;
+	}
 
 	for (j = l; j < t->size; j++)
 		rw_aux_tree(b, t)[j].offset += shift;
@@ -935,34 +971,12 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
 		rw_aux_tree(b, t)[l].offset ==
 		rw_aux_tree(b, t)[l - 1].offset);
 
-	if (t->size < bset_rw_tree_capacity(b, t) &&
-	    (l < t->size
-	     ? rw_aux_tree(b, t)[l].offset
-	     : t->end_offset) -
-	    rw_aux_tree(b, t)[l - 1].offset >
-	    L1_CACHE_BYTES / sizeof(u64)) {
-		struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
-		struct bkey_packed *end = l < t->size
-			? rw_aux_to_bkey(b, t, l)
-			: btree_bkey_last(b, t);
-		struct bkey_packed *k = start;
-
-		while (1) {
-			k = bkey_p_next(k);
-			if (k == end)
-				break;
-
-			if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
-				memmove(&rw_aux_tree(b, t)[l + 1],
-					&rw_aux_tree(b, t)[l],
-					(void *) &rw_aux_tree(b, t)[t->size] -
-					(void *) &rw_aux_tree(b, t)[l]);
-				t->size++;
-				rw_aux_tree_set(b, t, l, k);
-				break;
-			}
-		}
-	}
+	rw_aux_tree_build_entry(b, t,
+				rw_aux_to_bkey(b, t, l - 1),
+				l < t->size
+				? rw_aux_to_bkey(b, t, l)
+				: btree_bkey_last(b, t),
+				l);
 
 	bch2_bset_verify_rw_aux_tree(b, t);
 	bset_aux_tree_verify(b);
-- 
2.45.2


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2024-08-16  4:40 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-08-16  4:40 [PATCH] bcachefs: Refactor bch2_bset_fix_lookup_table Alan Huang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox