btrfs: rename delete_unused_bgs_mutex to reclaim_bgs_lock

As a preparation for extending the block group deletion use case, rename
the unused_bgs_mutex to reclaim_bgs_lock.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Johannes Thumshirn 2021-04-19 16:41:01 +09:00 committed by David Sterba
parent 01e86008aa
commit f33720657d
4 changed files with 31 additions and 30 deletions

View File

@ -1289,7 +1289,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* Long running balances can keep us blocked here for eternity, so
* simply skip deletion if we're unable to get the mutex.
*/
if (!mutex_trylock(&fs_info->delete_unused_bgs_mutex))
if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
return;
spin_lock(&fs_info->unused_bgs_lock);
@ -1462,12 +1462,12 @@ next:
spin_lock(&fs_info->unused_bgs_lock);
}
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
return;
flip_async:
btrfs_end_transaction(trans);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_put_block_group(block_group);
btrfs_discard_punt_unused_bgs_list(fs_info);
}

View File

@ -963,7 +963,8 @@ struct btrfs_fs_info {
spinlock_t unused_bgs_lock;
struct list_head unused_bgs;
struct mutex unused_bg_unpin_mutex;
struct mutex delete_unused_bgs_mutex;
/* Protect block groups that are going to be deleted */
struct mutex reclaim_bgs_lock;
/* Cached block sizes */
u32 nodesize;

View File

@ -1890,10 +1890,10 @@ static int cleaner_kthread(void *arg)
btrfs_run_defrag_inodes(fs_info);
/*
* Acquires fs_info->delete_unused_bgs_mutex to avoid racing
* Acquires fs_info->reclaim_bgs_lock to avoid racing
* with relocation (btrfs_relocate_chunk) and relocation
* acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
* after acquiring fs_info->delete_unused_bgs_mutex. So we
* after acquiring fs_info->reclaim_bgs_lock. So we
* can't hold, nor need to, fs_info->cleaner_mutex when deleting
* unused block groups.
*/
@ -2876,7 +2876,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->treelog_bg_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->unused_bg_unpin_mutex);
mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reclaim_bgs_lock);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
mutex_init(&fs_info->zoned_meta_io_lock);

View File

@ -3118,7 +3118,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
* we release the path used to search the chunk/dev tree and before
* the current task acquires this mutex and calls us.
*/
lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
lockdep_assert_held(&fs_info->reclaim_bgs_lock);
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
@ -3188,10 +3188,10 @@ again:
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
mutex_lock(&fs_info->delete_unused_bgs_mutex);
mutex_lock(&fs_info->reclaim_bgs_lock);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto error;
}
BUG_ON(ret == 0); /* Corruption */
@ -3199,7 +3199,7 @@ again:
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
if (ret)
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret < 0)
goto error;
if (ret > 0)
@ -3220,7 +3220,7 @@ again:
else
BUG_ON(ret);
}
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (found_key.offset == 0)
break;
@ -3760,10 +3760,10 @@ again:
goto error;
}
mutex_lock(&fs_info->delete_unused_bgs_mutex);
mutex_lock(&fs_info->reclaim_bgs_lock);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto error;
}
@ -3777,7 +3777,7 @@ again:
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
if (ret) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
ret = 0;
break;
}
@ -3787,7 +3787,7 @@ again:
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
break;
}
@ -3804,12 +3804,12 @@ again:
btrfs_release_path(path);
if (!ret) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto loop;
}
if (counting) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
spin_lock(&fs_info->balance_lock);
bctl->stat.expected++;
spin_unlock(&fs_info->balance_lock);
@ -3834,7 +3834,7 @@ again:
count_meta < bctl->meta.limit_min)
|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
count_sys < bctl->sys.limit_min)) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto loop;
}
@ -3848,7 +3848,7 @@ again:
ret = btrfs_may_alloc_data_chunk(fs_info,
found_key.offset);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto error;
} else if (ret == 1) {
chunk_reserved = 1;
@ -3856,7 +3856,7 @@ again:
}
ret = btrfs_relocate_chunk(fs_info, found_key.offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret == -ENOSPC) {
enospc_errors++;
} else if (ret == -ETXTBSY) {
@ -4741,16 +4741,16 @@ again:
key.type = BTRFS_DEV_EXTENT_KEY;
do {
mutex_lock(&fs_info->delete_unused_bgs_mutex);
mutex_lock(&fs_info->reclaim_bgs_lock);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto done;
}
ret = btrfs_previous_item(root, path, 0, key.type);
if (ret) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret < 0)
goto done;
ret = 0;
@ -4763,7 +4763,7 @@ again:
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_release_path(path);
break;
}
@ -4772,7 +4772,7 @@ again:
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_release_path(path);
break;
}
@ -4788,12 +4788,12 @@ again:
*/
ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto done;
}
ret = btrfs_relocate_chunk(fs_info, chunk_offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret == -ENOSPC) {
failed++;
} else if (ret) {
@ -8068,7 +8068,7 @@ static int relocating_repair_kthread(void *data)
return -EBUSY;
}
mutex_lock(&fs_info->delete_unused_bgs_mutex);
mutex_lock(&fs_info->reclaim_bgs_lock);
/* Ensure block group still exists */
cache = btrfs_lookup_block_group(fs_info, target);
@ -8090,7 +8090,7 @@ static int relocating_repair_kthread(void *data)
out:
if (cache)
btrfs_put_block_group(cache);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_exclop_finish(fs_info);
return ret;