btrfs: simplify parameters of btrfs_lookup_bio_sums

The csums argument is always NULL now, so remove it and always allocate
the csums array in the btrfs_bio.  Also pass the btrfs_bio instead of
inode + bio to document that this function requires a btrfs_bio and
not just any bio.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Christoph Hellwig 2023-01-21 07:50:02 +01:00 committed by David Sterba
parent 5fa356531e
commit 4ae2edf12d
4 changed files with 25 additions and 37 deletions

View File

@ -801,7 +801,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
*/
btrfs_bio(comp_bio)->file_offset = file_offset;
ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
ret = btrfs_lookup_bio_sums(btrfs_bio(comp_bio));
if (ret) {
btrfs_bio_end_io(btrfs_bio(comp_bio), ret);
break;

View File

@ -380,32 +380,25 @@ static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
/*
* Lookup the checksum for the read bio in csum tree.
*
* @inode: inode that the bio is for.
* @bio: bio to look up.
* @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
* checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
* NULL, the checksum buffer is allocated and returned in
* btrfs_bio(bio)->csum instead.
*
* Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
*/
blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst)
blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_bio *bbio = NULL;
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
struct bio *bio = &bbio->bio;
struct btrfs_path *path;
const u32 sectorsize = fs_info->sectorsize;
const u32 csum_size = fs_info->csum_size;
u32 orig_len = bio->bi_iter.bi_size;
u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 cur_disk_bytenr;
u8 *csum;
const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
int count = 0;
blk_status_t ret = BLK_STS_OK;
if ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
if ((inode->flags & BTRFS_INODE_NODATASUM) ||
test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
return BLK_STS_OK;
@ -426,21 +419,14 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
if (!path)
return BLK_STS_RESOURCE;
if (!dst) {
bbio = btrfs_bio(bio);
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
if (!bbio->csum) {
btrfs_free_path(path);
return BLK_STS_RESOURCE;
}
} else {
bbio->csum = bbio->csum_inline;
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
if (!bbio->csum) {
btrfs_free_path(path);
return BLK_STS_RESOURCE;
}
csum = bbio->csum;
} else {
csum = dst;
bbio->csum = bbio->csum_inline;
}
/*
@ -456,7 +442,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
* read from the commit root and sidestep a nasty deadlock
* between reading the free space cache and updating the csum tree.
*/
if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
if (btrfs_is_free_space_inode(inode)) {
path->search_commit_root = 1;
path->skip_locking = 1;
}
@ -479,14 +465,15 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
fs_info->sectorsize_bits;
csum_dst = csum + sector_offset * csum_size;
csum_dst = bbio->csum + sector_offset * csum_size;
count = search_csum_tree(fs_info, path, cur_disk_bytenr,
search_len, csum_dst);
if (count < 0) {
ret = errno_to_blk_status(count);
if (bbio)
btrfs_bio_free_csum(bbio);
if (bbio->csum != bbio->csum_inline)
kfree(bbio->csum);
bbio->csum = NULL;
break;
}
@ -504,12 +491,13 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
memset(csum_dst, 0, csum_size);
count = 1;
if (BTRFS_I(inode)->root->root_key.objectid ==
if (inode->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
u64 file_offset;
int ret;
ret = search_file_offset_in_bio(bio, inode,
ret = search_file_offset_in_bio(bio,
&inode->vfs_inode,
cur_disk_bytenr, &file_offset);
if (ret)
set_extent_bits(io_tree, file_offset,

View File

@ -38,7 +38,7 @@ static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len);
blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst);
blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio);
int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid, u64 pos,
u64 num_bytes);

View File

@ -2780,7 +2780,7 @@ void btrfs_submit_data_read_bio(struct btrfs_inode *inode, struct bio *bio,
* Lookup bio sums does extra checks around whether we need to csum or
* not, which is why we ignore skip_sum here.
*/
ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL);
ret = btrfs_lookup_bio_sums(btrfs_bio(bio));
if (ret) {
btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
@ -8012,7 +8012,7 @@ static void btrfs_submit_dio_bio(struct bio *bio, struct btrfs_inode *inode,
return;
}
} else {
ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL);
ret = btrfs_lookup_bio_sums(btrfs_bio(bio));
if (ret) {
btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
@ -10279,7 +10279,7 @@ static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
blk_status_t ret;
if (!priv->skip_csum) {
ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL);
ret = btrfs_lookup_bio_sums(btrfs_bio(bio));
if (ret)
return ret;
}