mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
btrfs: improve global reserve stealing logic
[ Upstream commit 7f9fe61440
]
For unlink transactions and block group removal
btrfs_start_transaction_fallback_global_rsv will first try to start an
ordinary transaction and if it fails it will fall back to reserving the
required amount by stealing from the global reserve. This is problematic
because of all the same reasons we had with previous iterations of the
ENOSPC handling, thundering herd. We get a bunch of failures all at
once, everybody tries to allocate from the global reserve, some win and
some lose, we get an ENSOPC.
Fix this behavior by introducing BTRFS_RESERVE_FLUSH_ALL_STEAL. It's
used to mark unlink reservation. To fix this we need to integrate this
logic into the normal ENOSPC infrastructure. We still go through all of
the normal flushing work, and at the moment we begin to fail all the
tickets we try to satisfy any tickets that are allowed to steal by
stealing from the global reserve. If this works we start the flushing
system over again just like we would with a normal ticket satisfaction.
This serializes our global reserve stealing, so we don't have the
thundering herd problem.
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Tested-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
590aad8835
commit
1e42a1857b
@ -1167,7 +1167,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
||||
free_extent_map(em);
|
||||
|
||||
return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
|
||||
num_items, 1);
|
||||
num_items);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2465,6 +2465,7 @@ enum btrfs_reserve_flush_enum {
|
||||
BTRFS_RESERVE_FLUSH_LIMIT,
|
||||
BTRFS_RESERVE_FLUSH_EVICT,
|
||||
BTRFS_RESERVE_FLUSH_ALL,
|
||||
BTRFS_RESERVE_FLUSH_ALL_STEAL,
|
||||
};
|
||||
|
||||
enum btrfs_flush_state {
|
||||
|
@ -4250,7 +4250,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
|
||||
* 1 for the inode ref
|
||||
* 1 for the inode
|
||||
*/
|
||||
return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
|
||||
return btrfs_start_transaction_fallback_global_rsv(root, 5);
|
||||
}
|
||||
|
||||
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
|
@ -689,6 +689,34 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
|
||||
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
|
||||
}
|
||||
|
||||
static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *space_info,
|
||||
struct reserve_ticket *ticket)
|
||||
{
|
||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
u64 min_bytes;
|
||||
|
||||
if (global_rsv->space_info != space_info)
|
||||
return false;
|
||||
|
||||
spin_lock(&global_rsv->lock);
|
||||
min_bytes = div_factor(global_rsv->size, 5);
|
||||
if (global_rsv->reserved < min_bytes + ticket->bytes) {
|
||||
spin_unlock(&global_rsv->lock);
|
||||
return false;
|
||||
}
|
||||
global_rsv->reserved -= ticket->bytes;
|
||||
ticket->bytes = 0;
|
||||
list_del_init(&ticket->list);
|
||||
wake_up(&ticket->wait);
|
||||
space_info->tickets_id++;
|
||||
if (global_rsv->reserved < global_rsv->size)
|
||||
global_rsv->full = 0;
|
||||
spin_unlock(&global_rsv->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
|
||||
* @fs_info - fs_info for this fs
|
||||
@ -721,6 +749,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
|
||||
ticket = list_first_entry(&space_info->tickets,
|
||||
struct reserve_ticket, list);
|
||||
|
||||
if (ticket->steal &&
|
||||
steal_from_global_rsv(fs_info, space_info, ticket))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* may_commit_transaction will avoid committing the transaction
|
||||
* if it doesn't feel like the space reclaimed by the commit
|
||||
@ -940,6 +972,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
|
||||
|
||||
switch (flush) {
|
||||
case BTRFS_RESERVE_FLUSH_ALL:
|
||||
case BTRFS_RESERVE_FLUSH_ALL_STEAL:
|
||||
wait_reserve_ticket(fs_info, space_info, ticket);
|
||||
break;
|
||||
case BTRFS_RESERVE_FLUSH_LIMIT:
|
||||
@ -1039,7 +1072,9 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
|
||||
ticket.bytes = orig_bytes;
|
||||
ticket.error = 0;
|
||||
init_waitqueue_head(&ticket.wait);
|
||||
if (flush == BTRFS_RESERVE_FLUSH_ALL) {
|
||||
ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
|
||||
if (flush == BTRFS_RESERVE_FLUSH_ALL ||
|
||||
flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
|
||||
list_add_tail(&ticket.list, &space_info->tickets);
|
||||
if (!space_info->flush) {
|
||||
space_info->flush = 1;
|
||||
|
@ -72,6 +72,7 @@ struct btrfs_space_info {
|
||||
struct reserve_ticket {
|
||||
u64 bytes;
|
||||
int error;
|
||||
bool steal;
|
||||
struct list_head list;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
@ -491,7 +491,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
|
||||
* refill that amount for whatever is missing in the reserve.
|
||||
*/
|
||||
num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
|
||||
if (delayed_refs_rsv->full == 0) {
|
||||
if (flush == BTRFS_RESERVE_FLUSH_ALL &&
|
||||
delayed_refs_rsv->full == 0) {
|
||||
delayed_refs_bytes = num_bytes;
|
||||
num_bytes <<= 1;
|
||||
}
|
||||
@ -627,43 +628,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
|
||||
|
||||
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
|
||||
struct btrfs_root *root,
|
||||
unsigned int num_items,
|
||||
int min_factor)
|
||||
unsigned int num_items)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_trans_handle *trans;
|
||||
u64 num_bytes;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We have two callers: unlink and block group removal. The
|
||||
* former should succeed even if we will temporarily exceed
|
||||
* quota and the latter operates on the extent root so
|
||||
* qgroup enforcement is ignored anyway.
|
||||
*/
|
||||
trans = start_transaction(root, num_items, TRANS_START,
|
||||
BTRFS_RESERVE_FLUSH_ALL, false);
|
||||
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
|
||||
return trans;
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans))
|
||||
return trans;
|
||||
|
||||
num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
|
||||
ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
|
||||
num_bytes, min_factor);
|
||||
if (ret) {
|
||||
btrfs_end_transaction(trans);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
trans->block_rsv = &fs_info->trans_block_rsv;
|
||||
trans->bytes_reserved = num_bytes;
|
||||
trace_btrfs_space_reservation(fs_info, "transaction",
|
||||
trans->transid, num_bytes, 1);
|
||||
|
||||
return trans;
|
||||
return start_transaction(root, num_items, TRANS_START,
|
||||
BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
|
||||
}
|
||||
|
||||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
|
||||
|
@ -181,8 +181,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
|
||||
unsigned int num_items);
|
||||
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
|
||||
struct btrfs_root *root,
|
||||
unsigned int num_items,
|
||||
int min_factor);
|
||||
unsigned int num_items);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
|
||||
|
Loading…
Reference in New Issue
Block a user