This the the 2nd major part of generic backref cache. Move it to
backref.c so we can reuse it.
Signed-off-by: Qu Wenruo <wqu@xxxxxxxx>
---
fs/btrfs/backref.c | 114 +++++++++++++++++++++++++++++++++++++++++
fs/btrfs/backref.h | 2 +
fs/btrfs/relocation.c | 115 +-----------------------------------------
3 files changed, 117 insertions(+), 114 deletions(-)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 013e833bf5bc..0a1cfa4433d3 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2938,3 +2938,117 @@ int backref_cache_add_tree_block(struct backref_cache *cache,
btrfs_backref_iter_release(iter);
return ret;
}
+
+/*
+ * In backref_cache_add_tree_block(), we have only linked the lower node to the
+ * edge, but the upper node hasn't been linked to the edge.
+ * This means we can only iterate through backref_node::upper to reach parent
+ * edges, but not through backref_node::lower to reach children edges.
+ *
+ * This function will finish the backref_node::lower to related edges, so that
+ * backref cache can be bi-directionally iterated.
+ *
+ * Also, this will add the nodes to backref cache for next run.
+ */
+int backref_cache_finish_upper_links(struct backref_cache *cache,
+ struct backref_node *start)
+{
+ struct list_head *useless_node = &cache->useless_node;
+ struct backref_edge *edge;
+ struct rb_node *rb_node;
+ LIST_HEAD(pending_edge);
+
+ ASSERT(start->checked);
+
+ /* Insert this node to cache if it's not cowonly */
+ if (!start->cowonly) {
+ rb_node = simple_insert(&cache->rb_root, start->bytenr,
+ &start->rb_node);
+ if (rb_node)
+ backref_cache_panic(cache->fs_info, start->bytenr,
+ -EEXIST);
+ list_add_tail(&start->lower, &cache->leaves);
+ }
+
+ /*
+ * Use breadth first search to iterate all related edges.
+ *
+ * The start point is all the edges of this node
+ */
+ list_for_each_entry(edge, &start->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &pending_edge);
+
+ while (!list_empty(&pending_edge)) {
+ struct backref_node *upper;
+ struct backref_node *lower;
+ struct rb_node *rb_node;
+
+ edge = list_first_entry(&pending_edge, struct backref_edge,
+ list[UPPER]);
+ list_del_init(&edge->list[UPPER]);
+ upper = edge->node[UPPER];
+ lower = edge->node[LOWER];
+
+ /* Parent is detached, no need to keep any edges */
+ if (upper->detached) {
+ list_del(&edge->list[LOWER]);
+ free_backref_edge(cache, edge);
+
+ /* Lower node is orphan, queue for cleanup */
+ if (list_empty(&lower->upper))
+ list_add(&lower->list, useless_node);
+ continue;
+ }
+
+ /*
+ * All new nodes added in current build_backref_tree() haven't
+ * been linked to the cache rb tree.
+ * So if we have upper->rb_node populated, this means a cache
+ * hit. We only need to link the edge, as @upper and all its
+ * parent have already been linked.
+ */
+ if (!RB_EMPTY_NODE(&upper->rb_node)) {
+ if (upper->lowest) {
+ list_del_init(&upper->lower);
+ upper->lowest = 0;
+ }
+
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+ continue;
+ }
+
+ /* Sanity check, we shouldn't have any unchecked nodes */
+ if (!upper->checked) {
+ ASSERT(0);
+ return -EUCLEAN;
+ }
+
+ /* Sanity check, cowonly node has non-cowonly parent */
+ if (start->cowonly != upper->cowonly) {
+ ASSERT(0);
+ return -EUCLEAN;
+ }
+
+ /* Only cache non-cowonly (subvolume trees) tree blocks */
+ if (!upper->cowonly) {
+ rb_node = simple_insert(&cache->rb_root, upper->bytenr,
+ &upper->rb_node);
+ if (rb_node) {
+ backref_cache_panic(cache->fs_info,
+ upper->bytenr, -EEXIST);
+ return -EUCLEAN;
+ }
+ }
+
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+
+ /*
+ * Also queue all the parent edges of this uncached node
+ * to finish the upper linkage
+ */
+ list_for_each_entry(edge, &upper->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &pending_edge);
+ }
+ return 0;
+}
+
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index b17bc8d5411a..c6c9f536c359 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -371,4 +371,6 @@ int backref_cache_add_tree_block(struct backref_cache *cache,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
struct backref_node *cur);
+int backref_cache_finish_upper_links(struct backref_cache *cache,
+ struct backref_node *start);
#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index d133a7df90cf..ad3896dcdb48 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -383,119 +383,6 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
return btrfs_get_fs_root(fs_info, &key, false);
}
-/*
- * In backref_cache_add_tree_block(), we have only linked the lower node to the
- * edge, but the upper node hasn't been linked to the edge.
- * This means we can only iterate through backref_node::upper to reach parent
- * edges, but not through backref_node::lower to reach children edges.
- *
- * This function will finish the backref_node::lower to related edges, so that
- * backref cache can be bi-directionally iterated.
- *
- * Also, this will add the nodes to backref cache for next run.
- */
-static int finish_upper_links(struct backref_cache *cache,
- struct backref_node *start)
-{
- struct list_head *useless_node = &cache->useless_node;
- struct backref_edge *edge;
- struct rb_node *rb_node;
- LIST_HEAD(pending_edge);
-
- ASSERT(start->checked);
-
- /* Insert this node to cache if it's not cowonly */
- if (!start->cowonly) {
- rb_node = simple_insert(&cache->rb_root, start->bytenr,
- &start->rb_node);
- if (rb_node)
- backref_cache_panic(cache->fs_info, start->bytenr,
- -EEXIST);
- list_add_tail(&start->lower, &cache->leaves);
- }
-
- /*
- * Use breadth first search to iterate all related edges.
- *
- * The start point is all the edges of this node
- */
- list_for_each_entry(edge, &start->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &pending_edge);
-
- while (!list_empty(&pending_edge)) {
- struct backref_node *upper;
- struct backref_node *lower;
- struct rb_node *rb_node;
-
- edge = list_first_entry(&pending_edge, struct backref_edge,
- list[UPPER]);
- list_del_init(&edge->list[UPPER]);
- upper = edge->node[UPPER];
- lower = edge->node[LOWER];
-
- /* Parent is detached, no need to keep any edges */
- if (upper->detached) {
- list_del(&edge->list[LOWER]);
- free_backref_edge(cache, edge);
-
- /* Lower node is orphan, queue for cleanup */
- if (list_empty(&lower->upper))
- list_add(&lower->list, useless_node);
- continue;
- }
-
- /*
- * All new nodes added in current build_backref_tree() haven't
- * been linked to the cache rb tree.
- * So if we have upper->rb_node populated, this means a cache
- * hit. We only need to link the edge, as @upper and all its
- * parent have already been linked.
- */
- if (!RB_EMPTY_NODE(&upper->rb_node)) {
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
-
- list_add_tail(&edge->list[UPPER], &upper->lower);
- continue;
- }
-
- /* Sanity check, we shouldn't have any unchecked nodes */
- if (!upper->checked) {
- ASSERT(0);
- return -EUCLEAN;
- }
-
- /* Sanity check, cowonly node has non-cowonly parent */
- if (start->cowonly != upper->cowonly) {
- ASSERT(0);
- return -EUCLEAN;
- }
-
- /* Only cache non-cowonly (subvolume trees) tree blocks */
- if (!upper->cowonly) {
- rb_node = simple_insert(&cache->rb_root, upper->bytenr,
- &upper->rb_node);
- if (rb_node) {
- backref_cache_panic(cache->fs_info,
- upper->bytenr, -EEXIST);
- return -EUCLEAN;
- }
- }
-
- list_add_tail(&edge->list[UPPER], &upper->lower);
-
- /*
- * Also queue all the parent edges of this uncached node
- * to finish the upper linkage
- */
- list_for_each_entry(edge, &upper->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &pending_edge);
- }
- return 0;
-}
-
/*
* For useless nodes, do two major clean ups:
* - Cleanup the children edges and nodes
@@ -636,7 +523,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
} while (edge);
/* Finish the upper linkage of newly added edges/nodes */
- ret = finish_upper_links(cache, node);
+ ret = backref_cache_finish_upper_links(cache, node);
if (ret < 0) {
err = ret;
goto out;
--
2.25.1