2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

btrfs: remove btrfs_bio_alloc() helper

The helper btrfs_bio_alloc() is almost the same as btrfs_io_bio_alloc(),
except it's allocating using BIO_MAX_VECS as @nr_iovecs, and initializes
bio->bi_iter.bi_sector.

However the naming itself is not using "btrfs_io_bio" to indicate its
parameter is "strcut btrfs_io_bio" and can be easily confused with
"struct btrfs_bio".

Considering assigned bio->bi_iter.bi_sector is such a simple work and
there are already tons of call sites doing that manually, there is no
need to do that in a helper.

Remove btrfs_bio_alloc() helper, and enhance btrfs_io_bio_alloc()
function to provide a fail-safe value for its @nr_iovecs.

And then replace all btrfs_bio_alloc() callers with
btrfs_io_bio_alloc().

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2021-09-15 15:17:17 +08:00 committed by David Sterba
parent 4c66461179
commit cd8e0cca95
4 changed files with 19 additions and 25 deletions

View File

@ -418,7 +418,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
bio = btrfs_bio_alloc(first_byte);
bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
@ -490,7 +491,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bio_endio(bio);
}
bio = btrfs_bio_alloc(first_byte);
bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
@ -748,7 +750,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size;
comp_bio = btrfs_bio_alloc(cur_disk_byte);
comp_bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT;
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@ -806,7 +809,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_endio(comp_bio);
}
comp_bio = btrfs_bio_alloc(cur_disk_byte);
comp_bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT;
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;

View File

@ -3121,16 +3121,16 @@ static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
}
/*
* The following helpers allocate a bio. As it's backed by a bioset, it'll
* never fail. We're returning a bio right now but you can call btrfs_io_bio
* for the appropriate container_of magic
* Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
*
* The bio allocation is backed by bioset and does not fail.
*/
struct bio *btrfs_bio_alloc(u64 first_byte)
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
{
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
bio->bi_iter.bi_sector = first_byte >> 9;
ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
btrfs_io_bio_init(btrfs_io_bio(bio));
return bio;
}
@ -3148,16 +3148,6 @@ struct bio *btrfs_bio_clone(struct bio *bio)
return new;
}
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
{
struct bio *bio;
/* Bio allocation backed by a bioset does not fail */
bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
btrfs_io_bio_init(btrfs_io_bio(bio));
return bio;
}
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
{
struct bio *bio;
@ -3307,14 +3297,15 @@ static int alloc_new_bio(struct btrfs_inode *inode,
struct bio *bio;
int ret;
bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
/*
* For compressed page range, its disk_bytenr is always @disk_bytenr
* passed in, no matter if we have added any range into previous bio.
*/
if (bio_flags & EXTENT_BIO_COMPRESSED)
bio = btrfs_bio_alloc(disk_bytenr);
bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
else
bio = btrfs_bio_alloc(disk_bytenr + offset);
bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
bio_ctrl->bio = bio;
bio_ctrl->bio_flags = bio_flags;
bio->bi_end_io = end_io_func;

View File

@ -278,7 +278,6 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
u32 bits_to_clear, unsigned long page_ops);
struct bio *btrfs_bio_alloc(u64 first_byte);
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
struct bio *btrfs_bio_clone(struct bio *bio);
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);

View File

@ -2225,7 +2225,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
goto bioc_out;
}
bio = btrfs_io_bio_alloc(0);
bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio->bi_iter.bi_sector = logical >> 9;
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
@ -2841,7 +2841,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
if (ret || !bioc || !bioc->raid_map)
goto bioc_out;
bio = btrfs_io_bio_alloc(0);
bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio->bi_iter.bi_sector = sparity->logic_start >> 9;
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;