2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

Btrfs: Allocator improvements

Reduce CPU time searching for free blocks by optimizing find_first_extent_bit

Fix find_free_extent to make better use of the last_alloc hint.  Before it
was often finding blocks just before the hint.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2008-02-18 12:12:38 -05:00
parent 9afbb0b752
commit d7fc640e6f
4 changed files with 70 additions and 15 deletions

View File

@ -951,7 +951,7 @@ void btrfs_throttle(struct btrfs_root *root)
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
{
balance_dirty_pages_ratelimited_nr(
root->fs_info->btree_inode->i_mapping, 1);
root->fs_info->btree_inode->i_mapping, 1);
}
void btrfs_set_buffer_defrag(struct extent_buffer *buf)

View File

@ -188,9 +188,10 @@ static u64 noinline find_search_start(struct btrfs_root *root,
{
int ret;
struct btrfs_block_group_cache *cache = *cache_ret;
struct extent_io_tree *free_space_cache;
struct extent_state *state;
u64 last;
u64 start = 0;
u64 end = 0;
u64 cache_miss = 0;
u64 total_fs_bytes;
int wrapped = 0;
@ -199,6 +200,8 @@ static u64 noinline find_search_start(struct btrfs_root *root,
goto out;
}
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
free_space_cache = &root->fs_info->free_space_cache;
again:
ret = cache_block_group(root, cache);
if (ret)
@ -206,22 +209,27 @@ again:
last = max(search_start, cache->key.objectid);
spin_lock_irq(&free_space_cache->lock);
state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
while(1) {
ret = find_first_extent_bit(&root->fs_info->free_space_cache,
last, &start, &end, EXTENT_DIRTY);
if (ret) {
if (!state) {
if (!cache_miss)
cache_miss = last;
spin_unlock_irq(&free_space_cache->lock);
goto new_group;
}
start = max(last, start);
last = end + 1;
start = max(last, state->start);
last = state->end + 1;
if (last - start < num) {
if (last == cache->key.objectid + cache->key.offset)
cache_miss = start;
do {
state = extent_state_next(state);
} while(state && !(state->state & EXTENT_DIRTY));
continue;
}
spin_unlock_irq(&free_space_cache->lock);
if (data != BTRFS_BLOCK_GROUP_MIXED &&
start + num > cache->key.objectid + cache->key.offset)
goto new_group;
@ -1420,6 +1428,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group;
int full_scan = 0;
int wrapped = 0;
int empty_cluster;
u64 cached_start;
WARN_ON(num_bytes < root->sectorsize);
@ -1431,11 +1440,15 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
data = BTRFS_BLOCK_GROUP_MIXED;
}
if (!data)
if (!data) {
last_ptr = &root->fs_info->last_alloc;
empty_cluster = 128 * 1024;
}
if (data && btrfs_test_opt(root, SSD))
if (data && btrfs_test_opt(root, SSD)) {
last_ptr = &root->fs_info->last_data_alloc;
empty_cluster = 2 * 1024 * 1024;
}
if (last_ptr) {
if (*last_ptr)
@ -1443,8 +1456,9 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
else {
hint_byte = hint_byte &
~((u64)BTRFS_BLOCK_GROUP_SIZE - 1);
empty_size += 2 * 1024 * 1024;
empty_size += empty_cluster;
}
search_start = max(search_start, hint_byte);
}
search_end = min(search_end,
@ -1476,7 +1490,7 @@ check_failed:
if (last_ptr && *last_ptr && search_start != *last_ptr) {
*last_ptr = 0;
if (!empty_size) {
empty_size += 2 * 1024 * 1024;
empty_size += empty_cluster;
total_needed += empty_size;
}
search_start = find_search_start(root, &block_group,

View File

@ -255,7 +255,7 @@ static int merge_state(struct extent_io_tree *tree,
state->start = other->start;
other->tree = NULL;
if (tree->last == other)
tree->last = NULL;
tree->last = state;
rb_erase(&other->rb_node, &tree->state);
free_extent_state(other);
}
@ -268,7 +268,7 @@ static int merge_state(struct extent_io_tree *tree,
other->start = state->start;
state->tree = NULL;
if (tree->last == state)
tree->last = NULL;
tree->last = other;
rb_erase(&state->rb_node, &tree->state);
free_extent_state(state);
}
@ -397,8 +397,9 @@ static int clear_state_bit(struct extent_io_tree *tree,
if (delete || state->state == 0) {
if (state->tree) {
clear_state_cb(tree, state, state->state);
if (tree->last == state)
tree->last = NULL;
if (tree->last == state) {
tree->last = extent_state_next(state);
}
rb_erase(&state->rb_node, &tree->state);
state->tree = NULL;
free_extent_state(state);
@ -962,6 +963,35 @@ out:
}
EXPORT_SYMBOL(find_first_extent_bit);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
u64 start, int bits)
{
struct rb_node *node;
struct extent_state *state;
/*
* this search will find all the extents that end after
* our range starts.
*/
node = tree_search(tree, start);
if (!node || IS_ERR(node)) {
goto out;
}
while(1) {
state = rb_entry(node, struct extent_state, rb_node);
if (state->end >= start && (state->state & bits)) {
return state;
}
node = rb_next(node);
if (!node)
break;
}
out:
return NULL;
}
EXPORT_SYMBOL(find_first_extent_bit_state);
u64 find_lock_delalloc_range(struct extent_io_tree *tree,
u64 *start, u64 *end, u64 max_bytes)
{

View File

@ -81,6 +81,15 @@ struct extent_buffer {
struct extent_map_tree;
static inline struct extent_state *extent_state_next(struct extent_state *state)
{
struct rb_node *node;
node = rb_next(&state->rb_node);
if (!node)
return NULL;
return rb_entry(node, struct extent_state, rb_node);
}
typedef struct extent_map *(get_extent_t)(struct inode *inode,
struct page *page,
size_t page_offset,
@ -122,6 +131,8 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
u64 start, int bits);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
int extent_write_full_page(struct extent_io_tree *tree, struct page *page,