mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
btrfs: use bio op accessors
This should be the easier cases to convert btrfs to bio_set_op_attrs/bio_op. They are mostly just cut and replace type of changes. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
1f7ad75b13
commit
37226b2111
@ -1673,7 +1673,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
||||
}
|
||||
bio->bi_bdev = block_ctx->dev->bdev;
|
||||
bio->bi_iter.bi_sector = dev_bytenr >> 9;
|
||||
bio->bi_rw = READ;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
for (j = i; j < num_pages; j++) {
|
||||
ret = bio_add_page(bio, block_ctx->pagev[j],
|
||||
@ -2922,7 +2922,6 @@ int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
|
||||
static void __btrfsic_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct btrfsic_dev_state *dev_state;
|
||||
int rw = bio->bi_rw;
|
||||
|
||||
if (!btrfsic_is_initialized)
|
||||
return;
|
||||
@ -2932,7 +2931,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
|
||||
* btrfsic_mount(), this might return NULL */
|
||||
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
|
||||
if (NULL != dev_state &&
|
||||
(rw & WRITE) && NULL != bio->bi_io_vec) {
|
||||
(bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
|
||||
unsigned int i;
|
||||
u64 dev_bytenr;
|
||||
u64 cur_bytenr;
|
||||
@ -2944,9 +2943,9 @@ static void __btrfsic_submit_bio(struct bio *bio)
|
||||
if (dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||
printk(KERN_INFO
|
||||
"submit_bio(rw=0x%x, bi_vcnt=%u,"
|
||||
"submit_bio(rw=%d,0x%lx, bi_vcnt=%u,"
|
||||
" bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
|
||||
rw, bio->bi_vcnt,
|
||||
bio_op(bio), bio->bi_rw, bio->bi_vcnt,
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
dev_bytenr, bio->bi_bdev);
|
||||
|
||||
@ -2977,18 +2976,18 @@ static void __btrfsic_submit_bio(struct bio *bio)
|
||||
btrfsic_process_written_block(dev_state, dev_bytenr,
|
||||
mapped_datav, bio->bi_vcnt,
|
||||
bio, &bio_is_patched,
|
||||
NULL, rw);
|
||||
NULL, bio->bi_rw);
|
||||
while (i > 0) {
|
||||
i--;
|
||||
kunmap(bio->bi_io_vec[i].bv_page);
|
||||
}
|
||||
kfree(mapped_datav);
|
||||
} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
|
||||
} else if (NULL != dev_state && (bio->bi_rw & REQ_FLUSH)) {
|
||||
if (dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||
printk(KERN_INFO
|
||||
"submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
|
||||
rw, bio->bi_bdev);
|
||||
"submit_bio(rw=%d,0x%lx FLUSH, bdev=%p)\n",
|
||||
bio_op(bio), bio->bi_rw, bio->bi_bdev);
|
||||
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
|
||||
if ((dev_state->state->print_mask &
|
||||
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
|
||||
@ -3006,7 +3005,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
|
||||
block->never_written = 0;
|
||||
block->iodone_w_error = 0;
|
||||
block->flush_gen = dev_state->last_flush_gen + 1;
|
||||
block->submit_bio_bh_rw = rw;
|
||||
block->submit_bio_bh_rw = bio->bi_rw;
|
||||
block->orig_bio_bh_private = bio->bi_private;
|
||||
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
|
||||
block->next_in_same_bio = NULL;
|
||||
|
@ -363,6 +363,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
kfree(cb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_private = cb;
|
||||
bio->bi_end_io = end_compressed_bio_write;
|
||||
atomic_inc(&cb->pending_bios);
|
||||
@ -408,6 +409,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
|
||||
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
|
||||
BUG_ON(!bio);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_private = cb;
|
||||
bio->bi_end_io = end_compressed_bio_write;
|
||||
bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
@ -646,6 +648,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
|
||||
if (!comp_bio)
|
||||
goto fail2;
|
||||
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
atomic_inc(&cb->pending_bios);
|
||||
@ -699,6 +702,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
|
||||
GFP_NOFS);
|
||||
BUG_ON(!comp_bio);
|
||||
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
|
||||
|
@ -727,7 +727,7 @@ static void end_workqueue_bio(struct bio *bio)
|
||||
fs_info = end_io_wq->info;
|
||||
end_io_wq->error = bio->bi_error;
|
||||
|
||||
if (bio->bi_rw & REQ_WRITE) {
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
|
||||
wq = fs_info->endio_meta_write_workers;
|
||||
func = btrfs_endio_meta_write_helper;
|
||||
@ -873,7 +873,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
|
||||
atomic_inc(&fs_info->nr_async_submits);
|
||||
|
||||
if (rw & REQ_SYNC)
|
||||
if (bio->bi_rw & REQ_SYNC)
|
||||
btrfs_set_work_high_priority(&async->work);
|
||||
|
||||
btrfs_queue_work(fs_info->workers, &async->work);
|
||||
@ -951,7 +951,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
int async = check_async_write(inode, bio_flags);
|
||||
int ret;
|
||||
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
/*
|
||||
* called for a read, do the setup so that checksum validation
|
||||
* can happen in the async kernel threads
|
||||
@ -3486,7 +3486,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
bio->bi_bdev = device->bdev;
|
||||
bio->bi_rw = WRITE_FLUSH;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
init_completion(&device->flush_wait);
|
||||
bio->bi_private = &device->flush_wait;
|
||||
device->flush_bio = bio;
|
||||
|
@ -1910,7 +1910,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
if (btrfs_is_free_space_inode(inode))
|
||||
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
|
||||
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -7783,7 +7783,7 @@ static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
BUG_ON(rw & REQ_WRITE);
|
||||
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
|
||||
|
||||
bio_get(bio);
|
||||
|
||||
@ -7843,7 +7843,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
int read_mode;
|
||||
int ret;
|
||||
|
||||
BUG_ON(failed_bio->bi_rw & REQ_WRITE);
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
|
||||
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
|
||||
if (ret)
|
||||
@ -7871,6 +7871,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
free_io_failure(inode, failrec);
|
||||
return -EIO;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||
|
||||
btrfs_debug(BTRFS_I(inode)->root->fs_info,
|
||||
"Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
|
||||
@ -8185,8 +8186,8 @@ static void btrfs_end_dio_bio(struct bio *bio)
|
||||
|
||||
if (err)
|
||||
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
|
||||
"direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
|
||||
btrfs_ino(dip->inode), bio->bi_rw,
|
||||
"direct IO failed ino %llu rw %d,%lu sector %#Lx len %u err no %d",
|
||||
btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size, err);
|
||||
|
||||
@ -8264,7 +8265,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
|
||||
int async_submit)
|
||||
{
|
||||
struct btrfs_dio_private *dip = bio->bi_private;
|
||||
int write = rw & REQ_WRITE;
|
||||
bool write = bio_op(bio) == REQ_OP_WRITE;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
@ -8330,8 +8331,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
int i;
|
||||
|
||||
map_length = orig_bio->bi_iter.bi_size;
|
||||
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
|
||||
start_sector << 9, &map_length, NULL, 0);
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
@ -8351,6 +8352,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
|
||||
bio->bi_private = dip;
|
||||
bio->bi_end_io = btrfs_end_dio_bio;
|
||||
btrfs_io_bio(bio)->logical = file_offset;
|
||||
@ -8388,12 +8390,13 @@ next_block:
|
||||
start_sector, GFP_NOFS);
|
||||
if (!bio)
|
||||
goto out_err;
|
||||
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
|
||||
bio->bi_private = dip;
|
||||
bio->bi_end_io = btrfs_end_dio_bio;
|
||||
btrfs_io_bio(bio)->logical = file_offset;
|
||||
|
||||
map_length = orig_bio->bi_iter.bi_size;
|
||||
ret = btrfs_map_block(root->fs_info, rw,
|
||||
ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
|
||||
start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
if (ret) {
|
||||
|
@ -1320,7 +1320,7 @@ write_data:
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_write_end_io;
|
||||
bio->bi_rw = WRITE;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
@ -1575,7 +1575,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_rmw_end_io;
|
||||
bio->bi_rw = READ;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_RAID56);
|
||||
@ -2100,7 +2100,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_recover_end_io;
|
||||
bio->bi_rw = READ;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_RAID56);
|
||||
@ -2437,7 +2437,7 @@ submit_write:
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_write_end_io;
|
||||
bio->bi_rw = WRITE;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
@ -2616,7 +2616,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid56_parity_scrub_end_io;
|
||||
bio->bi_rw = READ;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_RAID56);
|
||||
|
@ -1504,7 +1504,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
|
||||
sblock->no_io_error_seen = 0;
|
||||
} else {
|
||||
bio->bi_iter.bi_sector = page->physical >> 9;
|
||||
bio->bi_rw = READ;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
if (btrfsic_submit_bio_wait(bio))
|
||||
sblock->no_io_error_seen = 0;
|
||||
@ -1584,7 +1584,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
|
||||
return -EIO;
|
||||
bio->bi_bdev = page_bad->dev->bdev;
|
||||
bio->bi_iter.bi_sector = page_bad->physical >> 9;
|
||||
bio->bi_rw = WRITE;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
|
||||
if (PAGE_SIZE != ret) {
|
||||
@ -1686,7 +1686,7 @@ again:
|
||||
bio->bi_end_io = scrub_wr_bio_end_io;
|
||||
bio->bi_bdev = sbio->dev->bdev;
|
||||
bio->bi_iter.bi_sector = sbio->physical >> 9;
|
||||
bio->bi_rw = WRITE;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
sbio->err = 0;
|
||||
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
|
||||
spage->physical_for_dev_replace ||
|
||||
@ -2091,7 +2091,7 @@ again:
|
||||
bio->bi_end_io = scrub_bio_end_io;
|
||||
bio->bi_bdev = sbio->dev->bdev;
|
||||
bio->bi_iter.bi_sector = sbio->physical >> 9;
|
||||
bio->bi_rw = READ;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
sbio->err = 0;
|
||||
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
|
||||
spage->physical ||
|
||||
@ -4440,7 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio->bi_rw = WRITE_SYNC;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (ret != PAGE_SIZE) {
|
||||
leave_with_eio:
|
||||
|
@ -5942,7 +5942,7 @@ static void btrfs_end_bio(struct bio *bio)
|
||||
BUG_ON(stripe_index >= bbio->num_stripes);
|
||||
dev = bbio->stripes[stripe_index].dev;
|
||||
if (dev->bdev) {
|
||||
if (bio->bi_rw & WRITE)
|
||||
if (bio_op(bio) == REQ_OP_WRITE)
|
||||
btrfs_dev_stat_inc(dev,
|
||||
BTRFS_DEV_STAT_WRITE_ERRS);
|
||||
else
|
||||
@ -6007,7 +6007,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
}
|
||||
|
||||
/* don't bother with additional async steps for reads, right now */
|
||||
if (!(bio->bi_rw & REQ_WRITE)) {
|
||||
if (bio_op(bio) == REQ_OP_READ) {
|
||||
bio_get(bio);
|
||||
btrfsic_submit_bio(bio);
|
||||
bio_put(bio);
|
||||
@ -6111,8 +6111,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
map_length = length;
|
||||
|
||||
btrfs_bio_counter_inc_blocked(root->fs_info);
|
||||
ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
|
||||
mirror_num, 1);
|
||||
ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
|
||||
&map_length, &bbio, mirror_num, 1);
|
||||
if (ret) {
|
||||
btrfs_bio_counter_dec(root->fs_info);
|
||||
return ret;
|
||||
@ -6126,10 +6126,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
|
||||
((rw & WRITE) || (mirror_num > 1))) {
|
||||
((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
|
||||
/* In this case, map_length has been set to the length of
|
||||
a single stripe; not the whole write */
|
||||
if (rw & WRITE) {
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
ret = raid56_parity_write(root, bio, bbio, map_length);
|
||||
} else {
|
||||
ret = raid56_parity_recover(root, bio, bbio, map_length,
|
||||
@ -6148,7 +6148,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
|
||||
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
|
||||
dev = bbio->stripes[dev_nr].dev;
|
||||
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
|
||||
if (!dev || !dev->bdev ||
|
||||
(bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
|
||||
bbio_error(bbio, first_bio, logical);
|
||||
continue;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user