mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
btrfs: handle allocation failure in btrfs_wq_submit_bio gracefully
btrfs_wq_submit_bio is used for writeback under memory pressure. Instead of failing the I/O when we can't allocate the async_submit_bio, just punt back to the synchronous submission path. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Tested-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
82443fd55c
commit
ea1f0cedef
@ -759,16 +759,23 @@ static void run_one_async_free(struct btrfs_work *work)
|
||||
kfree(async);
|
||||
}
|
||||
|
||||
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start)
|
||||
/*
|
||||
* Submit bio to an async queue.
|
||||
*
|
||||
* Retrun:
|
||||
* - true if the work has been succesfuly submitted
|
||||
* - false in case of error
|
||||
*/
|
||||
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
|
||||
u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
struct async_submit_bio *async;
|
||||
|
||||
async = kmalloc(sizeof(*async), GFP_NOFS);
|
||||
if (!async)
|
||||
return BLK_STS_RESOURCE;
|
||||
return false;
|
||||
|
||||
async->inode = inode;
|
||||
async->bio = bio;
|
||||
@ -786,7 +793,7 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
|
||||
btrfs_queue_work(fs_info->hipri_workers, &async->work);
|
||||
else
|
||||
btrfs_queue_work(fs_info->workers, &async->work);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static blk_status_t btree_csum_one_bio(struct bio *bio)
|
||||
@ -840,25 +847,23 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
|
||||
btrfs_submit_bio(fs_info, bio, mirror_num);
|
||||
return;
|
||||
}
|
||||
if (!should_async_write(fs_info, BTRFS_I(inode))) {
|
||||
ret = btree_csum_one_bio(bio);
|
||||
if (!ret) {
|
||||
btrfs_submit_bio(fs_info, bio, mirror_num);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* kthread helpers are used to submit writes so that
|
||||
* checksumming can happen in parallel across all CPUs
|
||||
*/
|
||||
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
|
||||
btree_submit_bio_start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kthread helpers are used to submit writes so that checksumming can
|
||||
* happen in parallel across all CPUs.
|
||||
*/
|
||||
if (should_async_write(fs_info, BTRFS_I(inode)) &&
|
||||
btrfs_wq_submit_bio(inode, bio, mirror_num, 0, btree_submit_bio_start))
|
||||
return;
|
||||
|
||||
ret = btree_csum_one_bio(bio);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
btrfs_submit_bio(fs_info, bio, mirror_num);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
|
@ -114,9 +114,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
||||
int atomic);
|
||||
int btrfs_read_extent_buffer(struct extent_buffer *buf, u64 parent_transid,
|
||||
int level, struct btrfs_key *first_key);
|
||||
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start);
|
||||
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
|
||||
u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start);
|
||||
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
|
||||
int mirror_num);
|
||||
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
|
||||
|
@ -2674,11 +2674,10 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro
|
||||
if (!(bi->flags & BTRFS_INODE_NODATASUM) &&
|
||||
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
|
||||
!btrfs_is_data_reloc_root(bi->root)) {
|
||||
if (!atomic_read(&bi->sync_writers)) {
|
||||
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
|
||||
btrfs_submit_bio_start);
|
||||
goto out;
|
||||
}
|
||||
if (!atomic_read(&bi->sync_writers) &&
|
||||
btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
|
||||
btrfs_submit_bio_start))
|
||||
return;
|
||||
|
||||
ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
|
||||
if (ret)
|
||||
@ -8027,9 +8026,11 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
|
||||
|
||||
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
|
||||
/* Check btrfs_submit_data_write_bio() for async submit rules */
|
||||
if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers))
|
||||
return btrfs_wq_submit_bio(inode, bio, 0, file_offset,
|
||||
btrfs_submit_bio_start_direct_io);
|
||||
if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers) &&
|
||||
btrfs_wq_submit_bio(inode, bio, 0, file_offset,
|
||||
btrfs_submit_bio_start_direct_io))
|
||||
return BLK_STS_OK;
|
||||
|
||||
/*
|
||||
* If we aren't doing async submit, calculate the csum of the
|
||||
* bio now.
|
||||
|
Loading…
Reference in New Issue
Block a user