mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
btrfs: zoned: use ZONE_APPEND write for zoned mode
Enable zone append writing for zoned mode. When using zone append, a bio is issued to the start of a target zone and the device decides to place it inside the zone. Upon completion the device reports the actual written position back to the host. Three parts are necessary to enable zone append mode. First, modify the bio to use REQ_OP_ZONE_APPEND in btrfs_submit_bio_hook() and adjust the bi_sector to point the beginning of the zone. Second, record the returned physical address (and disk/partno) to the ordered extent in end_bio_extent_writepage() after the bio has been completed. We cannot resolve the physical address to the logical address because we can neither take locks nor allocate a buffer in this end_bio context. So, we need to record the physical address to resolve it later in btrfs_finish_ordered_io(). And finally, rewrite the logical addresses of the extent mapping and checksum data according to the physical address using btrfs_rmap_block. If the returned address matches the originally allocated address, we can skip this rewriting process. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
24533f6a9a
commit
d8e3fb106f
@ -2735,6 +2735,7 @@ static void end_bio_extent_writepage(struct bio *bio)
|
||||
u64 start;
|
||||
u64 end;
|
||||
struct bvec_iter_all iter_all;
|
||||
bool first_bvec = true;
|
||||
|
||||
ASSERT(!bio_flagged(bio, BIO_CLONED));
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
@ -2761,6 +2762,11 @@ static void end_bio_extent_writepage(struct bio *bio)
|
||||
start = page_offset(page);
|
||||
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
||||
|
||||
if (first_bvec) {
|
||||
btrfs_record_physical_zoned(inode, start, bio);
|
||||
first_bvec = false;
|
||||
}
|
||||
|
||||
end_extent_writepage(page, error, start, end);
|
||||
end_page_writeback(page);
|
||||
}
|
||||
@ -3664,6 +3670,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
|
||||
struct extent_map *em;
|
||||
int ret = 0;
|
||||
int nr = 0;
|
||||
u32 opf = REQ_OP_WRITE;
|
||||
const unsigned int write_flags = wbc_to_write_flags(wbc);
|
||||
bool compressed;
|
||||
|
||||
@ -3710,6 +3717,10 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
|
||||
|
||||
/* Note that em_end from extent_map_end() is exclusive */
|
||||
iosize = min(em_end, end + 1) - cur;
|
||||
|
||||
if (btrfs_use_zone_append(inode, em))
|
||||
opf = REQ_OP_ZONE_APPEND;
|
||||
|
||||
free_extent_map(em);
|
||||
em = NULL;
|
||||
|
||||
@ -3735,8 +3746,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
|
||||
page->index, cur, end);
|
||||
}
|
||||
|
||||
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
|
||||
page, disk_bytenr, iosize,
|
||||
ret = submit_extent_page(opf | write_flags, wbc, page,
|
||||
disk_bytenr, iosize,
|
||||
cur - page_offset(page), &epd->bio,
|
||||
end_bio_extent_writepage,
|
||||
0, 0, 0, false);
|
||||
|
@ -2168,8 +2168,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
* commit waits for their completion, to avoid data loss if we fsync,
|
||||
* the current transaction commits before the ordered extents complete
|
||||
* and a power failure happens right after that.
|
||||
*
|
||||
* For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
|
||||
* logical address recorded in the ordered extent may change. We need
|
||||
* to wait for the IO to stabilize the logical address.
|
||||
*/
|
||||
if (full_sync) {
|
||||
if (full_sync || btrfs_is_zoned(fs_info)) {
|
||||
ret = btrfs_wait_ordered_range(inode, start, len);
|
||||
} else {
|
||||
/*
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include "delalloc-space.h"
|
||||
#include "block-group.h"
|
||||
#include "space-info.h"
|
||||
#include "zoned.h"
|
||||
|
||||
struct btrfs_iget_args {
|
||||
u64 ino;
|
||||
@ -2874,6 +2875,9 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ordered_extent->disk)
|
||||
btrfs_rewrite_logical_zoned(ordered_extent);
|
||||
|
||||
btrfs_free_io_failure_record(inode, start, end);
|
||||
|
||||
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
|
||||
|
@ -199,6 +199,9 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
|
||||
entry->compress_type = compress_type;
|
||||
entry->truncated_len = (u64)-1;
|
||||
entry->qgroup_rsv = ret;
|
||||
entry->physical = (u64)-1;
|
||||
entry->disk = NULL;
|
||||
entry->partno = (u8)-1;
|
||||
|
||||
ASSERT(type == BTRFS_ORDERED_REGULAR ||
|
||||
type == BTRFS_ORDERED_NOCOW ||
|
||||
|
@ -139,6 +139,14 @@ struct btrfs_ordered_extent {
|
||||
struct completion completion;
|
||||
struct btrfs_work flush_work;
|
||||
struct list_head work_list;
|
||||
|
||||
/*
|
||||
* Used to reverse-map physical address returned from ZONE_APPEND write
|
||||
* command in a workqueue context
|
||||
*/
|
||||
u64 physical;
|
||||
struct gendisk *disk;
|
||||
u8 partno;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -6500,6 +6500,20 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
|
||||
btrfs_io_bio(bio)->device = dev;
|
||||
bio->bi_end_io = btrfs_end_bio;
|
||||
bio->bi_iter.bi_sector = physical >> 9;
|
||||
/*
|
||||
* For zone append writing, bi_sector must point the beginning of the
|
||||
* zone
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
|
||||
if (btrfs_dev_is_sequential(dev, physical)) {
|
||||
u64 zone_start = round_down(physical, fs_info->zone_size);
|
||||
|
||||
bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
|
||||
} else {
|
||||
bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
|
||||
bio->bi_opf |= REQ_OP_WRITE;
|
||||
}
|
||||
}
|
||||
btrfs_debug_in_rcu(fs_info,
|
||||
"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
|
||||
bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
|
||||
|
@ -1247,3 +1247,73 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
|
||||
|
||||
if (bio_op(bio) != REQ_OP_ZONE_APPEND)
|
||||
return;
|
||||
|
||||
ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
|
||||
if (WARN_ON(!ordered))
|
||||
return;
|
||||
|
||||
ordered->physical = physical;
|
||||
ordered->disk = bio->bi_disk;
|
||||
ordered->partno = bio->bi_partno;
|
||||
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
}
|
||||
|
||||
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
|
||||
{
|
||||
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct extent_map_tree *em_tree;
|
||||
struct extent_map *em;
|
||||
struct btrfs_ordered_sum *sum;
|
||||
struct block_device *bdev;
|
||||
u64 orig_logical = ordered->disk_bytenr;
|
||||
u64 *logical = NULL;
|
||||
int nr, stripe_len;
|
||||
|
||||
/* Zoned devices should not have partitions. So, we can assume it is 0 */
|
||||
ASSERT(ordered->partno == 0);
|
||||
bdev = bdgrab(ordered->disk->part0);
|
||||
if (WARN_ON(!bdev))
|
||||
return;
|
||||
|
||||
if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
|
||||
ordered->physical, &logical, &nr,
|
||||
&stripe_len)))
|
||||
goto out;
|
||||
|
||||
WARN_ON(nr != 1);
|
||||
|
||||
if (orig_logical == *logical)
|
||||
goto out;
|
||||
|
||||
ordered->disk_bytenr = *logical;
|
||||
|
||||
em_tree = &inode->extent_tree;
|
||||
write_lock(&em_tree->lock);
|
||||
em = search_extent_mapping(em_tree, ordered->file_offset,
|
||||
ordered->num_bytes);
|
||||
em->block_start = *logical;
|
||||
free_extent_map(em);
|
||||
write_unlock(&em_tree->lock);
|
||||
|
||||
list_for_each_entry(sum, &ordered->list, list) {
|
||||
if (*logical < orig_logical)
|
||||
sum->bytenr -= orig_logical - *logical;
|
||||
else
|
||||
sum->bytenr += *logical - orig_logical;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(logical);
|
||||
bdput(bdev);
|
||||
}
|
||||
|
@ -47,6 +47,9 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
|
||||
struct extent_buffer *eb);
|
||||
void btrfs_free_redirty_list(struct btrfs_transaction *trans);
|
||||
bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
|
||||
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
|
||||
struct bio *bio);
|
||||
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
|
||||
struct blk_zone *zone)
|
||||
@ -139,6 +142,15 @@ static inline bool btrfs_use_zone_append(struct btrfs_inode *inode,
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void btrfs_record_physical_zoned(struct inode *inode,
|
||||
u64 file_offset, struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void btrfs_rewrite_logical_zoned(
|
||||
struct btrfs_ordered_extent *ordered) { }
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
|
||||
|
Loading…
Reference in New Issue
Block a user