* [PATCH v5 1/3] btrfs: backref: Introduce the skeleton of btrfs_backref_iter
2020-02-18 9:01 [PATCH v5 0/3] Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure Qu Wenruo
@ 2020-02-18 9:01 ` Qu Wenruo
2020-02-19 8:58 ` Nikolay Borisov
2020-02-20 15:07 ` Josef Bacik
2020-02-18 9:01 ` [PATCH v5 2/3] btrfs: backref: Implement btrfs_backref_iter_next() Qu Wenruo
` (2 subsequent siblings)
3 siblings, 2 replies; 10+ messages in thread
From: Qu Wenruo @ 2020-02-18 9:01 UTC (permalink / raw)
To: linux-btrfs; +Cc: Johannes Thumshirn
Due to the complex nature of btrfs extent tree, when we want to iterate
all backrefs of one extent, it involves quite a lot of work, like
searching the EXTENT_ITEM/METADATA_ITEM, iteration through inline and keyed
backrefs.
Normally this would result pretty complex code, something like:
btrfs_search_slot()
/* Ensure we are at EXTENT_ITEM/METADATA_ITEM */
while (1) { /* Loop for extent tree items */
while (ptr < end) { /* Loop for inlined items */
/* REAL WORK HERE */
}
next:
ret = btrfs_next_item()
/* Ensure we're still at keyed item for specified bytenr */
}
The idea of btrfs_backref_iter is to avoid such complex and hard to
read code structure, but something like the following:
iter = btrfs_backref_iter_alloc();
ret = btrfs_backref_iter_start(iter, bytenr);
if (ret < 0)
goto out;
for (; ; ret = btrfs_backref_iter_next(iter)) {
/* REAL WORK HERE */
}
out:
btrfs_backref_iter_free(iter);
This patch is just the skeleton + btrfs_backref_iter_start() code.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
fs/btrfs/backref.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++
fs/btrfs/backref.h | 60 ++++++++++++++++++++++++++++++++
2 files changed, 147 insertions(+)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index e5d85311d5d5..c78d15bb999d 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2252,3 +2252,90 @@ void free_ipath(struct inode_fs_paths *ipath)
kvfree(ipath->fspath);
kfree(ipath);
}
+
+int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
+{
+ struct btrfs_fs_info *fs_info = iter->fs_info;
+ struct btrfs_path *path = iter->path;
+ struct btrfs_extent_item *ei;
+ struct btrfs_key key;
+ int ret;
+
+ key.objectid = bytenr;
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = (u64)-1;
+ iter->bytenr = bytenr;
+
+ ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ ret = -EUCLEAN;
+ goto release;
+ }
+ if (path->slots[0] == 0) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ ret = -EUCLEAN;
+ goto release;
+ }
+ path->slots[0]--;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (!(key.type == BTRFS_EXTENT_ITEM_KEY ||
+ key.type == BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
+ ret = -ENOENT;
+ goto release;
+ }
+ memcpy(&iter->cur_key, &key, sizeof(key));
+ iter->item_ptr = btrfs_item_ptr_offset(path->nodes[0],
+ path->slots[0]);
+ iter->end_ptr = iter->item_ptr + btrfs_item_size_nr(path->nodes[0],
+ path->slots[0]);
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_extent_item);
+
+ /*
+ * Only support iteration on tree backref yet.
+ *
+ * This is extra precaustion for non skinny-metadata, where
+ * EXTENT_ITEM is also used for tree blocks, that we can only use
+ * extent flags to determine if it's a tree block.
+ */
+ if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
+ ret = -ENOTTY;
+ goto release;
+ }
+ iter->cur_ptr = iter->item_ptr + sizeof(*ei);
+
+ /* If there is no inline backref, go search for keyed backref */
+ if (iter->cur_ptr >= iter->end_ptr) {
+ ret = btrfs_next_item(fs_info->extent_root, path);
+
+ /* No inline nor keyed ref */
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto release;
+ }
+ if (ret < 0)
+ goto release;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
+ path->slots[0]);
+ if (iter->cur_key.objectid != bytenr ||
+ (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
+ iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
+ ret = -ENOENT;
+ goto release;
+ }
+ iter->cur_ptr = btrfs_item_ptr_offset(path->nodes[0],
+ path->slots[0]);
+ iter->item_ptr = iter->cur_ptr;
+ iter->end_ptr = iter->item_ptr + btrfs_item_size_nr(
+ path->nodes[0], path->slots[0]);
+ }
+
+ return 0;
+release:
+ btrfs_backref_iter_release(iter);
+ return ret;
+}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 777f61dc081e..8b1ec11d4b28 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -74,4 +74,64 @@ struct prelim_ref {
u64 wanted_disk_byte;
};
+/*
+ * Helper structure to help iterate backrefs of one extent.
+ *
+ * Now it only supports iteration for tree block in commit root.
+ */
+struct btrfs_backref_iter {
+ u64 bytenr;
+ struct btrfs_path *path;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_key cur_key;
+ unsigned long item_ptr;
+ unsigned long cur_ptr;
+ unsigned long end_ptr;
+};
+
+static inline struct btrfs_backref_iter *
+btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
+{
+ struct btrfs_backref_iter *ret;
+
+ ret = kzalloc(sizeof(*ret), gfp_flag);
+ if (!ret)
+ return NULL;
+
+ ret->path = btrfs_alloc_path();
+ if (!ret) {
+ kfree(ret);
+ return NULL;
+ }
+
+ /* Current backref iterator only supports iteration in commit root */
+ ret->path->search_commit_root = 1;
+ ret->path->skip_locking = 1;
+ ret->path->reada = READA_FORWARD;
+ ret->fs_info = fs_info;
+
+ return ret;
+}
+
+static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
+{
+ if (!iter)
+ return;
+ btrfs_free_path(iter->path);
+ kfree(iter);
+}
+
+int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
+
+static inline void
+btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
+{
+ iter->bytenr = 0;
+ iter->item_ptr = 0;
+ iter->cur_ptr = 0;
+ iter->end_ptr = 0;
+ btrfs_release_path(iter->path);
+ memset(&iter->cur_key, 0, sizeof(iter->cur_key));
+}
+
#endif
--
2.25.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH v5 3/3] btrfs: relocation: Use btrfs_backref_iter infrastructure
2020-02-18 9:01 [PATCH v5 0/3] Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure Qu Wenruo
2020-02-18 9:01 ` [PATCH v5 1/3] btrfs: backref: Introduce the skeleton of btrfs_backref_iter Qu Wenruo
2020-02-18 9:01 ` [PATCH v5 2/3] btrfs: backref: Implement btrfs_backref_iter_next() Qu Wenruo
@ 2020-02-18 9:01 ` Qu Wenruo
2020-02-20 15:09 ` Josef Bacik
2020-02-19 9:01 ` [PATCH v5 0/3] Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure Nikolay Borisov
3 siblings, 1 reply; 10+ messages in thread
From: Qu Wenruo @ 2020-02-18 9:01 UTC (permalink / raw)
To: linux-btrfs; +Cc: Johannes Thumshirn
In the core function of relocation, build_backref_tree, it needs to
iterate all backref items of one tree block.
We don't really want to spend our code and reviewers' time to going
through tons of supportive code just for the backref walk.
Use btrfs_backref_iter infrastructure to do the loop.
The backref items look would be much more easier to read:
ret = btrfs_backref_iter_start(iter, cur->bytenr);
for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
/* The really important work */
}
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
fs/btrfs/relocation.c | 193 ++++++++++++++----------------------------
1 file changed, 62 insertions(+), 131 deletions(-)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b1365a516a25..1fe34d8eef6d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -22,6 +22,7 @@
#include "print-tree.h"
#include "delalloc-space.h"
#include "block-group.h"
+#include "backref.h"
/*
* backref_node, mapping_node and tree_block start with this
@@ -604,48 +605,6 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
return btrfs_get_fs_root(fs_info, &key, false);
}
-static noinline_for_stack
-int find_inline_backref(struct extent_buffer *leaf, int slot,
- unsigned long *ptr, unsigned long *end)
-{
- struct btrfs_key key;
- struct btrfs_extent_item *ei;
- struct btrfs_tree_block_info *bi;
- u32 item_size;
-
- btrfs_item_key_to_cpu(leaf, &key, slot);
-
- item_size = btrfs_item_size_nr(leaf, slot);
- if (item_size < sizeof(*ei)) {
- btrfs_print_v0_err(leaf->fs_info);
- btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
- return 1;
- }
- ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
- WARN_ON(!(btrfs_extent_flags(leaf, ei) &
- BTRFS_EXTENT_FLAG_TREE_BLOCK));
-
- if (key.type == BTRFS_EXTENT_ITEM_KEY &&
- item_size <= sizeof(*ei) + sizeof(*bi)) {
- WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
- return 1;
- }
- if (key.type == BTRFS_METADATA_ITEM_KEY &&
- item_size <= sizeof(*ei)) {
- WARN_ON(item_size < sizeof(*ei));
- return 1;
- }
-
- if (key.type == BTRFS_EXTENT_ITEM_KEY) {
- bi = (struct btrfs_tree_block_info *)(ei + 1);
- *ptr = (unsigned long)(bi + 1);
- } else {
- *ptr = (unsigned long)(ei + 1);
- }
- *end = (unsigned long)ei + item_size;
- return 0;
-}
-
/*
* build backref tree for a given tree block. root of the backref tree
* corresponds the tree block, leaves of the backref tree correspond
@@ -665,10 +624,9 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
struct btrfs_key *node_key,
int level, u64 bytenr)
{
+ struct btrfs_backref_iter *iter;
struct backref_cache *cache = &rc->backref_cache;
- struct btrfs_path *path1; /* For searching extent root */
- struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
- struct extent_buffer *eb;
+ struct btrfs_path *path; /* For searching parent of TREE_BLOCK_REF */
struct btrfs_root *root;
struct backref_node *cur;
struct backref_node *upper;
@@ -677,9 +635,6 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
struct backref_node *exist = NULL;
struct backref_edge *edge;
struct rb_node *rb_node;
- struct btrfs_key key;
- unsigned long end;
- unsigned long ptr;
LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
LIST_HEAD(useless);
int cowonly;
@@ -687,14 +642,15 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
int err = 0;
bool need_check = true;
- path1 = btrfs_alloc_path();
- path2 = btrfs_alloc_path();
- if (!path1 || !path2) {
+ iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+ path = btrfs_alloc_path();
+ if (!path) {
err = -ENOMEM;
goto out;
}
- path1->reada = READA_FORWARD;
- path2->reada = READA_FORWARD;
+ path->reada = READA_FORWARD;
node = alloc_backref_node(cache);
if (!node) {
@@ -707,25 +663,28 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
node->lowest = 1;
cur = node;
again:
- end = 0;
- ptr = 0;
- key.objectid = cur->bytenr;
- key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = (u64)-1;
-
- path1->search_commit_root = 1;
- path1->skip_locking = 1;
- ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
- 0, 0);
+ ret = btrfs_backref_iter_start(iter, cur->bytenr);
if (ret < 0) {
err = ret;
goto out;
}
- ASSERT(ret);
- ASSERT(path1->slots[0]);
-
- path1->slots[0]--;
+ /*
+ * We skip the first btrfs_tree_block_info, as we don't use the key
+ * stored in it, but fetch it from the tree block.
+ */
+ if (btrfs_backref_has_tree_block_info(iter)) {
+ ret = btrfs_backref_iter_next(iter);
+ if (ret < 0) {
+ err = ret;
+ goto out;
+ }
+ /* No extra backref? This means the tree block is corrupted */
+ if (ret > 0) {
+ err = -EUCLEAN;
+ goto out;
+ }
+ }
WARN_ON(cur->checked);
if (!list_empty(&cur->upper)) {
/*
@@ -747,42 +706,20 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
exist = NULL;
}
- while (1) {
- cond_resched();
- eb = path1->nodes[0];
-
- if (ptr >= end) {
- if (path1->slots[0] >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(rc->extent_root, path1);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- if (ret > 0)
- break;
- eb = path1->nodes[0];
- }
+ for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
+ struct extent_buffer *eb;
+ struct btrfs_key key;
+ int type;
- btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
- if (key.objectid != cur->bytenr) {
- WARN_ON(exist);
- break;
- }
+ cond_resched();
+ eb = btrfs_backref_get_eb(iter);
- if (key.type == BTRFS_EXTENT_ITEM_KEY ||
- key.type == BTRFS_METADATA_ITEM_KEY) {
- ret = find_inline_backref(eb, path1->slots[0],
- &ptr, &end);
- if (ret)
- goto next;
- }
- }
+ key.objectid = iter->bytenr;
+ if (btrfs_backref_iter_is_inline_ref(iter)) {
+ struct btrfs_extent_inline_ref *iref;
- if (ptr < end) {
/* update key for inline back ref */
- struct btrfs_extent_inline_ref *iref;
- int type;
- iref = (struct btrfs_extent_inline_ref *)ptr;
+ iref = (struct btrfs_extent_inline_ref *)iter->cur_ptr;
type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_BLOCK);
if (type == BTRFS_REF_TYPE_INVALID) {
@@ -791,9 +728,9 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
}
key.type = type;
key.offset = btrfs_extent_inline_ref_offset(eb, iref);
-
- WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
- key.type != BTRFS_SHARED_BLOCK_REF_KEY);
+ } else {
+ key.type = iter->cur_key.type;
+ key.offset = iter->cur_key.offset;
}
/*
@@ -806,7 +743,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
(key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
exist->bytenr == key.offset))) {
exist = NULL;
- goto next;
+ continue;
}
/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
@@ -852,7 +789,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
edge->node[LOWER] = cur;
edge->node[UPPER] = upper;
- goto next;
+ continue;
} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
err = -EINVAL;
btrfs_print_v0_err(rc->extent_root->fs_info);
@@ -860,7 +797,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
NULL);
goto out;
} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
- goto next;
+ continue;
}
/*
@@ -891,20 +828,20 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
level = cur->level + 1;
/* Search the tree to find parent blocks referring the block. */
- path2->search_commit_root = 1;
- path2->skip_locking = 1;
- path2->lowest_level = level;
- ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
- path2->lowest_level = 0;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+ path->lowest_level = level;
+ ret = btrfs_search_slot(NULL, root, node_key, path, 0, 0);
+ path->lowest_level = 0;
if (ret < 0) {
err = ret;
goto out;
}
- if (ret > 0 && path2->slots[level] > 0)
- path2->slots[level]--;
+ if (ret > 0 && path->slots[level] > 0)
+ path->slots[level]--;
- eb = path2->nodes[level];
- if (btrfs_node_blockptr(eb, path2->slots[level]) !=
+ eb = path->nodes[level];
+ if (btrfs_node_blockptr(eb, path->slots[level]) !=
cur->bytenr) {
btrfs_err(root->fs_info,
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
@@ -920,7 +857,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
/* Add all nodes and edges in the path */
for (; level < BTRFS_MAX_LEVEL; level++) {
- if (!path2->nodes[level]) {
+ if (!path->nodes[level]) {
ASSERT(btrfs_root_bytenr(&root->root_item) ==
lower->bytenr);
if (should_ignore_root(root))
@@ -936,7 +873,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
goto out;
}
- eb = path2->nodes[level];
+ eb = path->nodes[level];
rb_node = tree_search(&cache->rb_root, eb->start);
if (!rb_node) {
upper = alloc_backref_node(cache);
@@ -993,20 +930,14 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
lower = upper;
upper = NULL;
}
- btrfs_release_path(path2);
-next:
- if (ptr < end) {
- ptr += btrfs_extent_inline_ref_size(key.type);
- if (ptr >= end) {
- WARN_ON(ptr > end);
- ptr = 0;
- end = 0;
- }
- }
- if (ptr >= end)
- path1->slots[0]++;
+ btrfs_release_path(path);
+ }
+ if (ret < 0) {
+ err = ret;
+ goto out;
}
- btrfs_release_path(path1);
+ ret = 0;
+ btrfs_backref_iter_release(iter);
cur->checked = 1;
WARN_ON(exist);
@@ -1124,8 +1055,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
}
}
out:
- btrfs_free_path(path1);
- btrfs_free_path(path2);
+ btrfs_backref_iter_free(iter);
+ btrfs_free_path(path);
if (err) {
while (!list_empty(&useless)) {
lower = list_entry(useless.next,
--
2.25.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH v5 0/3] Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure
2020-02-18 9:01 [PATCH v5 0/3] Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure Qu Wenruo
` (2 preceding siblings ...)
2020-02-18 9:01 ` [PATCH v5 3/3] btrfs: relocation: Use btrfs_backref_iter infrastructure Qu Wenruo
@ 2020-02-19 9:01 ` Nikolay Borisov
3 siblings, 0 replies; 10+ messages in thread
From: Nikolay Borisov @ 2020-02-19 9:01 UTC (permalink / raw)
To: Qu Wenruo, linux-btrfs
On 18.02.20 г. 11:01 ч., Qu Wenruo wrote:
> This is part 1 of the incoming refactor patches for build_backref_tree()
>
> [THE PLAN]
> The overall plan of refactoring build_backref_tree() is:
> - Refactor how we iterate through backref items
> This patchset, the smallest I guess.
>
> - Make build_backref_tree() easier to read.
> In short, that function is doing breadth-first-search to build a map
> which starts from one tree block, to all root nodes referring it.
>
> It involves backref iteration part, and a lot of backref cache only
> works.
> At least I hope to make this function less bulky and more structured.
>
> - Make build_backref_tree() independent from relocation
> The hardest I guess.
>
> Current it even accepts reloc_control as its first parameter.
> Don't have a clear plan yet, but I guess at least I should make
> build_backref_tree() to do some more coverage, other than taking
> certain relocation-dependent shortcut.
>
> [THIS PATCHSET]
> For the patchset itself, the main purpose is to change how we iterate
> through all backref items of one tree block.
>
> The old way:
>
> path->search_commit_root = 1;
> path->skip_locking = 1;
> ret = btrfs_search_slot(NULL, extent_root, path, &key, 0, 0);
> ptr = btrfs_item_offset_nr()
> end = btrfs_item_end_nr()
> /* Inline item loop */
> while (ptr < end) {
> /* Handle each inline item here */
> }
> while (1) {
> ret = btrfs_next_item();
> btrfs_item_key_to_cpu()
> if (key.objectid != bytenr ||
> !(key.type == XXX || key.type == YYY))
> break;
> /* Handle each keyed item here */
> }
>
> The new way:
>
> iterator = btrfs_backref_iterator_alloc();
> for (ret = btrfs_backref_iterator_start(iterator, bytenr);
> ret == 0; ret = btrfs_backref_iterator_next(iterator)) {
> /*
> * Handle both keyed and inlined item here.
> *
> * We can use iterator->key to determine if it's inlined or
> * keyed.
> * Even for inlined item, it can be easily converted to keyed
> * item, just like we did in build_backref_tree().
> */
> }
>
> Currently, only build_backref_tree() can utilize this infrastructure.
>
> Backref.c has more requirement, as it needs to handle iteration for both
> data and metadata, both commit root and current root.
> And more importantly, backref.c uses depth first search, thus not a
> perfect match for btrfs_backref_iterator.
>
> Extra naming suggestion is welcomed.
> The current naming, btrfs_backref_iterator_* looks pretty long to me
> already.
> Shorter naming would be much better.
>
> Changelog:
> v2:
> - Fix a completion bug in btrfs_backref_iterator_next()
> It should be btrfs_extent_inline_ref_type().
>
> v3:
> - Comment and commit message update
> - Move helper definitions to where they get first used
> - Use helpers to replace some internal open code
>
> v4:
> - Fix a bug in end_ptr calculation
> The old btrfs_item_end_nr() doesn't take LEAF_DATA_OFFSET into
> consideration, thus causes failure in btrfs/003.
>
> - Add extra check for keyed only backrefs
> btrfs_backref_iter_start() doesn't handle keyed only backrefs well.
> Add extra check to ensure callers get the correct cur_ptr set.
>
> - Shorten the name, iterator->iter
>
> v5:
> - Add the missing assignment for iter->bytenr
> This makes keyed backref not checked, causing random dead loop for
> btrfs/187.
>
> - Add comment for btrfs_backref_iter_next()
> Mostly for the return value.
>
>
> Qu Wenruo (3):
> btrfs: backref: Introduce the skeleton of btrfs_backref_iter
> btrfs: backref: Implement btrfs_backref_iter_next()
> btrfs: relocation: Use btrfs_backref_iter infrastructure
>
> fs/btrfs/backref.c | 145 +++++++++++++++++++++++++++++++
> fs/btrfs/backref.h | 94 ++++++++++++++++++++
> fs/btrfs/relocation.c | 193 ++++++++++++++----------------------------
> 3 files changed, 301 insertions(+), 131 deletions(-)
>
I tested the patchset on btrfs/balance group of tests and didn't observe
any of the regressions present in the previous version so:
Tested-by: Nikolay Borisov <nborisov@suse.com>
^ permalink raw reply [flat|nested] 10+ messages in thread