mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 15:04:27 +08:00
btrfs: avoid useless rbtree iterations when attempting to merge extent map
When trying to merge an extent map that was just inserted or unpinned, we will try to merge it with any adjacent extent map that is suitable. However we will only check if our extent map is mergeable after searching for the previous and next extent maps in the rbtree, meaning that we are doing unnecessary calls to rb_prev() and rb_next() in case our extent map is not mergeable (it's compressed, in the list of modifed extents, being logged or pinned), wasting CPU time chasing rbtree pointers and pulling in unnecessary cache lines. So change the logic to check first if an extent map is mergeable before searching for the next and previous extent maps in the rbtree. Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
00deaf04df
commit
1a9fb16c60
@ -187,31 +187,32 @@ static inline u64 extent_map_block_end(const struct extent_map *em)
|
||||
return em->block_start + em->block_len;
|
||||
}
|
||||
|
||||
/* Check to see if two extent_map structs are adjacent and safe to merge. */
|
||||
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
|
||||
static bool can_merge_extent_map(const struct extent_map *em)
|
||||
{
|
||||
if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
|
||||
return 0;
|
||||
if (test_bit(EXTENT_FLAG_PINNED, &em->flags))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* don't merge compressed extents, we need to know their
|
||||
* actual size
|
||||
*/
|
||||
if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
|
||||
return 0;
|
||||
/* Don't merge compressed extents, we need to know their actual size. */
|
||||
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
|
||||
return false;
|
||||
|
||||
if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
|
||||
test_bit(EXTENT_FLAG_LOGGING, &next->flags))
|
||||
return 0;
|
||||
if (test_bit(EXTENT_FLAG_LOGGING, &em->flags))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't want to merge stuff that hasn't been written to the log yet
|
||||
* since it may not reflect exactly what is on disk, and that would be
|
||||
* bad.
|
||||
*/
|
||||
if (!list_empty(&prev->list) || !list_empty(&next->list))
|
||||
return 0;
|
||||
if (!list_empty(&em->list))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Check to see if two extent_map structs are adjacent and safe to merge. */
|
||||
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
|
||||
{
|
||||
if (extent_map_end(prev) == next->start &&
|
||||
prev->flags == next->flags &&
|
||||
((next->block_start == EXTENT_MAP_HOLE &&
|
||||
@ -241,11 +242,14 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
|
||||
if (refcount_read(&em->refs) > 2)
|
||||
return;
|
||||
|
||||
if (!can_merge_extent_map(em))
|
||||
return;
|
||||
|
||||
if (em->start != 0) {
|
||||
rb = rb_prev(&em->rb_node);
|
||||
if (rb)
|
||||
merge = rb_entry(rb, struct extent_map, rb_node);
|
||||
if (rb && mergable_maps(merge, em)) {
|
||||
if (rb && can_merge_extent_map(merge) && mergable_maps(merge, em)) {
|
||||
em->start = merge->start;
|
||||
em->orig_start = merge->orig_start;
|
||||
em->len += merge->len;
|
||||
@ -265,7 +269,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
|
||||
rb = rb_next(&em->rb_node);
|
||||
if (rb)
|
||||
merge = rb_entry(rb, struct extent_map, rb_node);
|
||||
if (rb && mergable_maps(em, merge)) {
|
||||
if (rb && can_merge_extent_map(merge) && mergable_maps(em, merge)) {
|
||||
em->len += merge->len;
|
||||
em->block_len += merge->block_len;
|
||||
rb_erase_cached(&merge->rb_node, &tree->map);
|
||||
|
Loading…
Reference in New Issue
Block a user