mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
btrfs: load free space cache asynchronously
While documenting the usage of the commit_root_sem, I noticed that we do not actually take the commit_root_sem in the case of the free space cache. This is problematic because we're supposed to hold that sem while we're reading the commit roots, which is what we do for the free space cache. The reason I did it inline when I originally wrote the code was because there's the case of unpinning where we need to make sure that the free space cache is loaded if we're going to use the free space cache. But we can accomplish the same thing by simply waiting for the cache to be loaded. Rework this code to load the free space cache asynchronously. This allows us to greatly cleanup the caching code because now it's all shared by the various caching methods. We also are now in a position to have the commit_root semaphore held while we're loading the free space cache. And finally our modification of ->last_byte_to_unpin is removed because it can be handled in the proper way on commit. Some care must be taken when replaying the log, when we expect that the free space cache will be read entirely before we start excluding space to replay. This could lead to overwriting space during replay. Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
4d7240f0ab
commit
e747853cae
@ -424,6 +424,23 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool space_cache_v1_done(struct btrfs_block_group *cache)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
spin_lock(&cache->lock);
|
||||
ret = cache->cached != BTRFS_CACHE_FAST;
|
||||
spin_unlock(&cache->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
|
||||
struct btrfs_caching_control *caching_ctl)
|
||||
{
|
||||
wait_event(caching_ctl->wait, space_cache_v1_done(cache));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
static void fragment_free_space(struct btrfs_block_group *block_group)
|
||||
{
|
||||
@ -639,11 +656,28 @@ static noinline void caching_thread(struct btrfs_work *work)
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
down_read(&fs_info->commit_root_sem);
|
||||
|
||||
if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
|
||||
ret = load_free_space_cache(block_group);
|
||||
if (ret == 1) {
|
||||
ret = 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* We failed to load the space cache, set ourselves to
|
||||
* CACHE_STARTED and carry on.
|
||||
*/
|
||||
spin_lock(&block_group->lock);
|
||||
block_group->cached = BTRFS_CACHE_STARTED;
|
||||
spin_unlock(&block_group->lock);
|
||||
wake_up(&caching_ctl->wait);
|
||||
}
|
||||
|
||||
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
|
||||
ret = load_free_space_tree(caching_ctl);
|
||||
else
|
||||
ret = load_extent_tree_free(caching_ctl);
|
||||
|
||||
done:
|
||||
spin_lock(&block_group->lock);
|
||||
block_group->caching_ctl = NULL;
|
||||
block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
|
||||
@ -679,7 +713,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
struct btrfs_caching_control *caching_ctl = NULL;
|
||||
int ret = 0;
|
||||
|
||||
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
|
||||
@ -691,84 +725,28 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
|
||||
init_waitqueue_head(&caching_ctl->wait);
|
||||
caching_ctl->block_group = cache;
|
||||
caching_ctl->progress = cache->start;
|
||||
refcount_set(&caching_ctl->count, 1);
|
||||
refcount_set(&caching_ctl->count, 2);
|
||||
btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
|
||||
|
||||
spin_lock(&cache->lock);
|
||||
if (cache->cached != BTRFS_CACHE_NO) {
|
||||
spin_unlock(&cache->lock);
|
||||
kfree(caching_ctl);
|
||||
return 0;
|
||||
|
||||
caching_ctl = cache->caching_ctl;
|
||||
if (caching_ctl)
|
||||
refcount_inc(&caching_ctl->count);
|
||||
spin_unlock(&cache->lock);
|
||||
goto out;
|
||||
}
|
||||
WARN_ON(cache->caching_ctl);
|
||||
cache->caching_ctl = caching_ctl;
|
||||
cache->cached = BTRFS_CACHE_FAST;
|
||||
if (btrfs_test_opt(fs_info, SPACE_CACHE))
|
||||
cache->cached = BTRFS_CACHE_FAST;
|
||||
else
|
||||
cache->cached = BTRFS_CACHE_STARTED;
|
||||
cache->has_caching_ctl = 1;
|
||||
spin_unlock(&cache->lock);
|
||||
|
||||
if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
ret = load_free_space_cache(cache);
|
||||
|
||||
spin_lock(&cache->lock);
|
||||
if (ret == 1) {
|
||||
cache->caching_ctl = NULL;
|
||||
cache->cached = BTRFS_CACHE_FINISHED;
|
||||
cache->last_byte_to_unpin = (u64)-1;
|
||||
caching_ctl->progress = (u64)-1;
|
||||
} else {
|
||||
if (load_cache_only) {
|
||||
cache->caching_ctl = NULL;
|
||||
cache->cached = BTRFS_CACHE_NO;
|
||||
} else {
|
||||
cache->cached = BTRFS_CACHE_STARTED;
|
||||
cache->has_caching_ctl = 1;
|
||||
}
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
if (ret == 1 &&
|
||||
btrfs_should_fragment_free_space(cache)) {
|
||||
u64 bytes_used;
|
||||
|
||||
spin_lock(&cache->space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
bytes_used = cache->length - cache->used;
|
||||
cache->space_info->bytes_used += bytes_used >> 1;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&cache->space_info->lock);
|
||||
fragment_free_space(cache);
|
||||
}
|
||||
#endif
|
||||
mutex_unlock(&caching_ctl->mutex);
|
||||
|
||||
wake_up(&caching_ctl->wait);
|
||||
if (ret == 1) {
|
||||
btrfs_put_caching_control(caching_ctl);
|
||||
btrfs_free_excluded_extents(cache);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We're either using the free space tree or no caching at all.
|
||||
* Set cached to the appropriate value and wakeup any waiters.
|
||||
*/
|
||||
spin_lock(&cache->lock);
|
||||
if (load_cache_only) {
|
||||
cache->caching_ctl = NULL;
|
||||
cache->cached = BTRFS_CACHE_NO;
|
||||
} else {
|
||||
cache->cached = BTRFS_CACHE_STARTED;
|
||||
cache->has_caching_ctl = 1;
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
wake_up(&caching_ctl->wait);
|
||||
}
|
||||
|
||||
if (load_cache_only) {
|
||||
btrfs_put_caching_control(caching_ctl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
down_write(&fs_info->commit_root_sem);
|
||||
refcount_inc(&caching_ctl->count);
|
||||
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
|
||||
@ -777,6 +755,11 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
|
||||
btrfs_get_block_group(cache);
|
||||
|
||||
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
|
||||
out:
|
||||
if (load_cache_only && caching_ctl)
|
||||
btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
|
||||
if (caching_ctl)
|
||||
btrfs_put_caching_control(caching_ctl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -268,6 +268,8 @@ void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
|
||||
u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
|
||||
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
|
||||
int btrfs_free_block_groups(struct btrfs_fs_info *info);
|
||||
void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
|
||||
struct btrfs_caching_control *caching_ctl);
|
||||
|
||||
static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
|
@ -2641,6 +2641,11 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
|
||||
BUG_ON(!btrfs_block_group_done(block_group));
|
||||
ret = btrfs_remove_free_space(block_group, start, num_bytes);
|
||||
} else {
|
||||
/*
|
||||
* We must wait for v1 caching to finish, otherwise we may not
|
||||
* remove our space.
|
||||
*/
|
||||
btrfs_wait_space_cache_v1_finished(block_group, caching_ctl);
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
|
||||
if (start >= caching_ctl->progress) {
|
||||
|
Loading…
Reference in New Issue
Block a user