2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 19:53:59 +08:00

btrfs: qgroup: Cleanup old subtree swap code

Since it's replaced by new delayed subtree swap code, remove the
original code.

The cleanup is small since most of its core function is still used by
delayed subtree swap trace.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2019-01-23 15:15:18 +08:00 committed by David Sterba
parent f616f5cd9d
commit 9627736b75
2 changed files with 0 additions and 100 deletions

View File

@ -2070,100 +2070,6 @@ out:
return ret;
}
/*
* Inform qgroup to trace subtree swap used in balance.
*
* Unlike btrfs_qgroup_trace_subtree(), this function will only trace
* new tree blocks whose generation is equal to (or larger than) @last_snapshot.
*
* Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
* @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
* and then go down @src_eb (pointed by @src_parent and @src_slot) to find
* the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
* and skip all tree blocks whose generation is smaller than last_snapshot.
*
* This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
* which could be the cause of very slow balance if the file tree is large.
*
* @src_parent, @src_slot: pointer to src (file tree) eb.
* @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
*/
int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *bg_cache,
struct extent_buffer *src_parent, int src_slot,
struct extent_buffer *dst_parent, int dst_slot,
u64 last_snapshot)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key first_key;
struct extent_buffer *src_eb = NULL;
struct extent_buffer *dst_eb = NULL;
bool trace_leaf = false;
u64 child_gen;
u64 child_bytenr;
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
/* Check parameter order */
if (btrfs_node_ptr_generation(src_parent, src_slot) >
btrfs_node_ptr_generation(dst_parent, dst_slot)) {
btrfs_err_rl(fs_info,
"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
btrfs_node_ptr_generation(src_parent, src_slot),
btrfs_node_ptr_generation(dst_parent, dst_slot));
return -EUCLEAN;
}
/*
* Only trace leaf if we're relocating data block groups, this could
* reduce tons of data extents tracing for meta/sys bg relocation.
*/
if (bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)
trace_leaf = true;
/* Read out real @src_eb, pointed by @src_parent and @src_slot */
child_bytenr = btrfs_node_blockptr(src_parent, src_slot);
child_gen = btrfs_node_ptr_generation(src_parent, src_slot);
btrfs_node_key_to_cpu(src_parent, &first_key, src_slot);
src_eb = read_tree_block(fs_info, child_bytenr, child_gen,
btrfs_header_level(src_parent) - 1, &first_key);
if (IS_ERR(src_eb)) {
ret = PTR_ERR(src_eb);
goto out;
}
/* Read out real @dst_eb, pointed by @src_parent and @src_slot */
child_bytenr = btrfs_node_blockptr(dst_parent, dst_slot);
child_gen = btrfs_node_ptr_generation(dst_parent, dst_slot);
btrfs_node_key_to_cpu(dst_parent, &first_key, dst_slot);
dst_eb = read_tree_block(fs_info, child_bytenr, child_gen,
btrfs_header_level(dst_parent) - 1, &first_key);
if (IS_ERR(dst_eb)) {
ret = PTR_ERR(dst_eb);
goto out;
}
if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
ret = -EINVAL;
goto out;
}
/* Do the generation aware breadth-first search */
ret = qgroup_trace_subtree_swap(trans, src_eb, dst_eb, last_snapshot,
trace_leaf);
if (ret < 0)
goto out;
ret = 0;
out:
free_extent_buffer(src_eb);
free_extent_buffer(dst_eb);
return ret;
}
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)

View File

@ -316,12 +316,6 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *bg_cache,
struct extent_buffer *src_parent, int src_slot,
struct extent_buffer *dst_parent, int dst_slot,
u64 last_snapshot);
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes, struct ulist *old_roots,
struct ulist *new_roots);