mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
for-6.8-rc1-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmWurp4ACgkQxWXV+ddt WDsqSg/+OS5/1Cr2W6/3ns2hannEeAzYUeoRDNhNHluHOSufXS52QTckQdiA62BO iMKGoIxZIn9BQPlvil1hi+jIEt/9qsRt/Qc6oBnzvlto21tJCoS486PJAShu6Sj5 jXKxtR7d6WrJEfk65uzatk1SbRguRKFxSrFlkaOeOHAmWsD54p/BnsZ/pqxPjF8W LOFvwdhbTw3pzQ873b+hJg16rm4IenAnuazZNmXRdSufgdPEcArv0l7fMr4xTBvO DBQXoM5GBGVHV2+IsrZiK39p7khz9ej2Ob4rps/x6PduC+GPxGtm6iLy8dZts+hV D1FOHh3fqWmV2LQIzLNNu9N7sj5sF5dNFRZHSkq4qFNVNQYfvyFg43iJKfUnMY/s puUm7ElSF3tLC2pRys0m/jDfkykZVFFZzbayfYQn+jRKuUASyXnWqmCKlljkLJD5 ekFXPpor+SQzQso9x0OpAjkSIUmmYFqSvoJCCczPFoo/3EDPv4C6VGOPEQyN6dDH nBjn7fLXmn4hpdEKia+LU1MhajFis+SUlmjaoTh7UfCCzXDosDOPThRC1Kx0rNlY t4KON8pMUCK3iGEce+7iOSwEImDDU4B7DUARey/sF0C8cs7jRsX8bf8eFTrEId8M 4C2sLmTw0JJ5n2I2soyTi9fHrGJnJamUlzp/hLrp8JyMzy6qBrs= =38MW -----END PGP SIGNATURE----- Merge tag 'for-6.8-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: - zoned mode fixes: - fix slowdown when writing large file sequentially by looking up block groups with enough space faster - locking fixes when activating a zone - new mount API fixes: - preserve mount options for a ro/rw mount of the same subvolume - scrub fixes: - fix use-after-free in case the chunk length is not aligned to 64K, this does not happen normally but has been reported on images converted from ext4 - similar alignment check was missing with raid-stripe-tree - subvolume deletion fixes: - prevent calling ioctl on already deleted subvolume - properly track flag tracking a deleted subvolume - in subpage mode, fix decompression of an inline extent (zlib, lzo, zstd) - fix crash when starting writeback on a folio, after integration with recent MM changes this needs to be started conditionally - reject unknown flags in defrag ioctl - error handling, API fixes, minor warning fixes * tag 'for-6.8-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: scrub: limit RST scrub to chunk boundary btrfs: scrub: avoid use-after-free when chunk length is not 64K aligned btrfs: don't unconditionally call folio_start_writeback in subpage btrfs: use the original mount's mount options for the legacy reconfigure btrfs: don't warn if discard range is not aligned to sector btrfs: tree-checker: fix inline ref size in error messages btrfs: zstd: fix and simplify the inline extent decompression btrfs: lzo: fix and simplify the inline extent decompression btrfs: zlib: fix and simplify the inline extent decompression btrfs: defrag: reject unknown flags of btrfs_ioctl_defrag_range_args btrfs: avoid copying BTRFS_ROOT_SUBVOL_DEAD flag to snapshot of subvolume being deleted btrfs: don't abort filesystem when attempting to snapshot deleted subvolume btrfs: zoned: fix lock ordering in btrfs_zone_activate() btrfs: fix unbalanced unlock of mapping_tree_lock btrfs: ref-verify: free ref cache before clearing mount opt btrfs: fix kvcalloc() arguments order in btrfs_ioctl_send() btrfs: zoned: optimize hint byte for zoned allocator btrfs: zoned: factor out prepare_allocation_zoned()
This commit is contained in:
commit
5d9248eed4
@ -141,16 +141,16 @@ static int compression_decompress_bio(struct list_head *ws,
|
||||
}
|
||||
|
||||
static int compression_decompress(int type, struct list_head *ws,
|
||||
const u8 *data_in, struct page *dest_page,
|
||||
unsigned long start_byte, size_t srclen, size_t destlen)
|
||||
const u8 *data_in, struct page *dest_page,
|
||||
unsigned long dest_pgoff, size_t srclen, size_t destlen)
|
||||
{
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
dest_pgoff, srclen, destlen);
|
||||
case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
dest_pgoff, srclen, destlen);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
dest_pgoff, srclen, destlen);
|
||||
case BTRFS_COMPRESS_NONE:
|
||||
default:
|
||||
/*
|
||||
@ -1037,14 +1037,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
|
||||
* start_byte tells us the offset into the compressed data we're interested in
|
||||
*/
|
||||
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
|
||||
unsigned long start_byte, size_t srclen, size_t destlen)
|
||||
unsigned long dest_pgoff, size_t srclen, size_t destlen)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
|
||||
struct list_head *workspace;
|
||||
const u32 sectorsize = fs_info->sectorsize;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The full destination page range should not exceed the page size.
|
||||
* And the @destlen should not exceed sectorsize, as this is only called for
|
||||
* inline file extents, which should not exceed sectorsize.
|
||||
*/
|
||||
ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
|
||||
|
||||
workspace = get_workspace(type, 0);
|
||||
ret = compression_decompress(type, workspace, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
dest_pgoff, srclen, destlen);
|
||||
put_workspace(type, workspace);
|
||||
|
||||
return ret;
|
||||
|
@ -148,7 +148,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
unsigned long *total_in, unsigned long *total_out);
|
||||
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||
int zlib_decompress(struct list_head *ws, const u8 *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
||||
size_t destlen);
|
||||
struct list_head *zlib_alloc_workspace(unsigned int level);
|
||||
void zlib_free_workspace(struct list_head *ws);
|
||||
@ -159,7 +159,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
unsigned long *total_in, unsigned long *total_out);
|
||||
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||
int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
||||
size_t destlen);
|
||||
struct list_head *lzo_alloc_workspace(unsigned int level);
|
||||
void lzo_free_workspace(struct list_head *ws);
|
||||
@ -169,7 +169,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
unsigned long *total_in, unsigned long *total_out);
|
||||
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||
int zstd_decompress(struct list_head *ws, const u8 *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
||||
size_t destlen);
|
||||
void zstd_init_workspace_manager(void);
|
||||
void zstd_cleanup_workspace_manager(void);
|
||||
|
@ -1260,7 +1260,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
|
||||
u64 bytes_left, end;
|
||||
u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
|
||||
|
||||
if (WARN_ON(start != aligned_start)) {
|
||||
/* Adjust the range to be aligned to 512B sectors if necessary. */
|
||||
if (start != aligned_start) {
|
||||
len -= aligned_start - start;
|
||||
len = round_down(len, 1 << SECTOR_SHIFT);
|
||||
start = aligned_start;
|
||||
@ -4298,6 +4299,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl)
|
||||
{
|
||||
if (ffe_ctl->for_treelog) {
|
||||
spin_lock(&fs_info->treelog_bg_lock);
|
||||
if (fs_info->treelog_bg)
|
||||
ffe_ctl->hint_byte = fs_info->treelog_bg;
|
||||
spin_unlock(&fs_info->treelog_bg_lock);
|
||||
} else if (ffe_ctl->for_data_reloc) {
|
||||
spin_lock(&fs_info->relocation_bg_lock);
|
||||
if (fs_info->data_reloc_bg)
|
||||
ffe_ctl->hint_byte = fs_info->data_reloc_bg;
|
||||
spin_unlock(&fs_info->relocation_bg_lock);
|
||||
} else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
|
||||
struct btrfs_block_group *block_group;
|
||||
|
||||
spin_lock(&fs_info->zone_active_bgs_lock);
|
||||
list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
|
||||
/*
|
||||
* No lock is OK here because avail is monotinically
|
||||
* decreasing, and this is just a hint.
|
||||
*/
|
||||
u64 avail = block_group->zone_capacity - block_group->alloc_offset;
|
||||
|
||||
if (block_group_bits(block_group, ffe_ctl->flags) &&
|
||||
avail >= ffe_ctl->num_bytes) {
|
||||
ffe_ctl->hint_byte = block_group->start;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fs_info->zone_active_bgs_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare_allocation(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl,
|
||||
struct btrfs_space_info *space_info,
|
||||
@ -4308,19 +4345,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
|
||||
return prepare_allocation_clustered(fs_info, ffe_ctl,
|
||||
space_info, ins);
|
||||
case BTRFS_EXTENT_ALLOC_ZONED:
|
||||
if (ffe_ctl->for_treelog) {
|
||||
spin_lock(&fs_info->treelog_bg_lock);
|
||||
if (fs_info->treelog_bg)
|
||||
ffe_ctl->hint_byte = fs_info->treelog_bg;
|
||||
spin_unlock(&fs_info->treelog_bg_lock);
|
||||
}
|
||||
if (ffe_ctl->for_data_reloc) {
|
||||
spin_lock(&fs_info->relocation_bg_lock);
|
||||
if (fs_info->data_reloc_bg)
|
||||
ffe_ctl->hint_byte = fs_info->data_reloc_bg;
|
||||
spin_unlock(&fs_info->relocation_bg_lock);
|
||||
}
|
||||
return 0;
|
||||
return prepare_allocation_zoned(fs_info, ffe_ctl);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -4458,6 +4458,8 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
|
||||
u64 root_flags;
|
||||
int ret;
|
||||
|
||||
down_write(&fs_info->subvol_sem);
|
||||
|
||||
/*
|
||||
* Don't allow to delete a subvolume with send in progress. This is
|
||||
* inside the inode lock so the error handling that has to drop the bit
|
||||
@ -4469,25 +4471,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
|
||||
btrfs_warn(fs_info,
|
||||
"attempt to delete subvolume %llu during send",
|
||||
dest->root_key.objectid);
|
||||
return -EPERM;
|
||||
ret = -EPERM;
|
||||
goto out_up_write;
|
||||
}
|
||||
if (atomic_read(&dest->nr_swapfiles)) {
|
||||
spin_unlock(&dest->root_item_lock);
|
||||
btrfs_warn(fs_info,
|
||||
"attempt to delete subvolume %llu with active swapfile",
|
||||
root->root_key.objectid);
|
||||
return -EPERM;
|
||||
ret = -EPERM;
|
||||
goto out_up_write;
|
||||
}
|
||||
root_flags = btrfs_root_flags(&dest->root_item);
|
||||
btrfs_set_root_flags(&dest->root_item,
|
||||
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
|
||||
spin_unlock(&dest->root_item_lock);
|
||||
|
||||
down_write(&fs_info->subvol_sem);
|
||||
|
||||
ret = may_destroy_subvol(dest);
|
||||
if (ret)
|
||||
goto out_up_write;
|
||||
goto out_undead;
|
||||
|
||||
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
|
||||
/*
|
||||
@ -4497,7 +4499,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
|
||||
*/
|
||||
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
|
||||
if (ret)
|
||||
goto out_up_write;
|
||||
goto out_undead;
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
@ -4563,15 +4565,17 @@ out_end_trans:
|
||||
inode->i_flags |= S_DEAD;
|
||||
out_release:
|
||||
btrfs_subvolume_release_metadata(root, &block_rsv);
|
||||
out_up_write:
|
||||
up_write(&fs_info->subvol_sem);
|
||||
out_undead:
|
||||
if (ret) {
|
||||
spin_lock(&dest->root_item_lock);
|
||||
root_flags = btrfs_root_flags(&dest->root_item);
|
||||
btrfs_set_root_flags(&dest->root_item,
|
||||
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
|
||||
spin_unlock(&dest->root_item_lock);
|
||||
} else {
|
||||
}
|
||||
out_up_write:
|
||||
up_write(&fs_info->subvol_sem);
|
||||
if (!ret) {
|
||||
d_invalidate(dentry);
|
||||
btrfs_prune_dentries(dest);
|
||||
ASSERT(dest->send_in_progress == 0);
|
||||
|
@ -790,6 +790,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (btrfs_root_refs(&root->root_item) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
|
||||
return -EINVAL;
|
||||
|
||||
@ -2608,6 +2611,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
/* compression requires us to start the IO */
|
||||
if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
|
||||
range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
|
||||
|
@ -425,16 +425,16 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
}
|
||||
|
||||
int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
||||
size_t destlen)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
|
||||
const u32 sectorsize = fs_info->sectorsize;
|
||||
size_t in_len;
|
||||
size_t out_len;
|
||||
size_t max_segment_len = WORKSPACE_BUF_LENGTH;
|
||||
int ret = 0;
|
||||
char *kaddr;
|
||||
unsigned long bytes;
|
||||
|
||||
if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
|
||||
return -EUCLEAN;
|
||||
@ -451,7 +451,7 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
||||
}
|
||||
data_in += LZO_LEN;
|
||||
|
||||
out_len = PAGE_SIZE;
|
||||
out_len = sectorsize;
|
||||
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
|
||||
if (ret != LZO_E_OK) {
|
||||
pr_warn("BTRFS: decompress failed!\n");
|
||||
@ -459,29 +459,13 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (out_len < start_byte) {
|
||||
ASSERT(out_len <= sectorsize);
|
||||
memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len);
|
||||
/* Early end, considered as an error. */
|
||||
if (unlikely(out_len < destlen)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len);
|
||||
}
|
||||
|
||||
/*
|
||||
* the caller is already checking against PAGE_SIZE, but lets
|
||||
* move this check closer to the memcpy/memset
|
||||
*/
|
||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||
bytes = min_t(unsigned long, destlen, out_len - start_byte);
|
||||
|
||||
kaddr = kmap_local_page(dest_page);
|
||||
memcpy(kaddr, workspace->buf + start_byte, bytes);
|
||||
|
||||
/*
|
||||
* btrfs_getblock is doing a zero on the tail of the page too,
|
||||
* but this will cover anything missing from the decompressed
|
||||
* data.
|
||||
*/
|
||||
if (bytes < destlen)
|
||||
memset(kaddr+bytes, 0, destlen-bytes);
|
||||
kunmap_local(kaddr);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -889,8 +889,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
|
||||
out_unlock:
|
||||
spin_unlock(&fs_info->ref_verify_lock);
|
||||
out:
|
||||
if (ret)
|
||||
if (ret) {
|
||||
btrfs_free_ref_cache(fs_info);
|
||||
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
||||
btrfs_free_ref_cache(fs_info);
|
||||
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
||||
}
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
|
@ -1098,12 +1098,22 @@ out:
|
||||
static void scrub_read_endio(struct btrfs_bio *bbio)
|
||||
{
|
||||
struct scrub_stripe *stripe = bbio->private;
|
||||
struct bio_vec *bvec;
|
||||
int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
|
||||
int num_sectors;
|
||||
u32 bio_size = 0;
|
||||
int i;
|
||||
|
||||
ASSERT(sector_nr < stripe->nr_sectors);
|
||||
bio_for_each_bvec_all(bvec, &bbio->bio, i)
|
||||
bio_size += bvec->bv_len;
|
||||
num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
|
||||
|
||||
if (bbio->bio.bi_status) {
|
||||
bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
|
||||
bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
|
||||
bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
|
||||
bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
|
||||
} else {
|
||||
bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
|
||||
bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
|
||||
}
|
||||
bio_put(&bbio->bio);
|
||||
if (atomic_dec_and_test(&stripe->pending_io)) {
|
||||
@ -1636,6 +1646,9 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
|
||||
struct btrfs_bio *bbio = NULL;
|
||||
unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
|
||||
stripe->bg->length - stripe->logical) >>
|
||||
fs_info->sectorsize_bits;
|
||||
u64 stripe_len = BTRFS_STRIPE_LEN;
|
||||
int mirror = stripe->mirror_num;
|
||||
int i;
|
||||
@ -1646,6 +1659,10 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
|
||||
struct page *page = scrub_stripe_get_page(stripe, i);
|
||||
unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
|
||||
|
||||
/* We're beyond the chunk boundary, no need to read anymore. */
|
||||
if (i >= nr_sectors)
|
||||
break;
|
||||
|
||||
/* The current sector cannot be merged, submit the bio. */
|
||||
if (bbio &&
|
||||
((i > 0 &&
|
||||
@ -1701,6 +1718,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = sctx->fs_info;
|
||||
struct btrfs_bio *bbio;
|
||||
unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
|
||||
stripe->bg->length - stripe->logical) >>
|
||||
fs_info->sectorsize_bits;
|
||||
int mirror = stripe->mirror_num;
|
||||
|
||||
ASSERT(stripe->bg);
|
||||
@ -1715,14 +1735,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
|
||||
bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
|
||||
scrub_read_endio, stripe);
|
||||
|
||||
/* Read the whole stripe. */
|
||||
bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
|
||||
for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
|
||||
/* Read the whole range inside the chunk boundary. */
|
||||
for (unsigned int cur = 0; cur < nr_sectors; cur++) {
|
||||
struct page *page = scrub_stripe_get_page(stripe, cur);
|
||||
unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
|
||||
int ret;
|
||||
|
||||
ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
|
||||
ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
|
||||
/* We should have allocated enough bio vectors. */
|
||||
ASSERT(ret == PAGE_SIZE);
|
||||
ASSERT(ret == fs_info->sectorsize);
|
||||
}
|
||||
atomic_inc(&stripe->pending_io);
|
||||
|
||||
|
@ -8205,8 +8205,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
|
||||
arg->clone_sources_count + 1,
|
||||
sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
|
||||
sizeof(*sctx->clone_roots),
|
||||
GFP_KERNEL);
|
||||
if (!sctx->clone_roots) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -475,7 +475,8 @@ void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
|
||||
|
||||
spin_lock_irqsave(&subpage->lock, flags);
|
||||
bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
|
||||
folio_start_writeback(folio);
|
||||
if (!folio_test_writeback(folio))
|
||||
folio_start_writeback(folio);
|
||||
spin_unlock_irqrestore(&subpage->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1457,6 +1457,14 @@ static int btrfs_reconfigure(struct fs_context *fc)
|
||||
|
||||
btrfs_info_to_ctx(fs_info, &old_ctx);
|
||||
|
||||
/*
|
||||
* This is our "bind mount" trick, we don't want to allow the user to do
|
||||
* anything other than mount a different ro/rw and a different subvol,
|
||||
* all of the mount options should be maintained.
|
||||
*/
|
||||
if (mount_reconfigure)
|
||||
ctx->mount_opt = old_ctx.mount_opt;
|
||||
|
||||
sync_filesystem(sb);
|
||||
set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
|
||||
|
||||
|
@ -1436,7 +1436,7 @@ static int check_extent_item(struct extent_buffer *leaf,
|
||||
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
|
||||
extent_err(leaf, slot,
|
||||
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
|
||||
ptr, inline_type, end);
|
||||
ptr, btrfs_extent_inline_ref_size(inline_type), end);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
|
@ -3087,7 +3087,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
|
||||
map = btrfs_find_chunk_map(fs_info, logical, length);
|
||||
|
||||
if (unlikely(!map)) {
|
||||
read_unlock(&fs_info->mapping_tree_lock);
|
||||
btrfs_crit(fs_info,
|
||||
"unable to find chunk map for logical %llu length %llu",
|
||||
logical, length);
|
||||
@ -3095,7 +3094,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
|
||||
}
|
||||
|
||||
if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
|
||||
read_unlock(&fs_info->mapping_tree_lock);
|
||||
btrfs_crit(fs_info,
|
||||
"found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
|
||||
logical, logical + length, map->start,
|
||||
|
@ -354,18 +354,13 @@ done:
|
||||
}
|
||||
|
||||
int zlib_decompress(struct list_head *ws, const u8 *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
||||
size_t destlen)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0;
|
||||
int wbits = MAX_WBITS;
|
||||
unsigned long bytes_left;
|
||||
unsigned long total_out = 0;
|
||||
unsigned long pg_offset = 0;
|
||||
|
||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||
bytes_left = destlen;
|
||||
unsigned long to_copy;
|
||||
|
||||
workspace->strm.next_in = data_in;
|
||||
workspace->strm.avail_in = srclen;
|
||||
@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
while (bytes_left > 0) {
|
||||
unsigned long buf_start;
|
||||
unsigned long buf_offset;
|
||||
unsigned long bytes;
|
||||
/*
|
||||
* Everything (in/out buf) should be at most one sector, there should
|
||||
* be no need to switch any input/output buffer.
|
||||
*/
|
||||
ret = zlib_inflate(&workspace->strm, Z_FINISH);
|
||||
to_copy = min(workspace->strm.total_out, destlen);
|
||||
if (ret != Z_STREAM_END)
|
||||
goto out;
|
||||
|
||||
ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
|
||||
if (ret != Z_OK && ret != Z_STREAM_END)
|
||||
break;
|
||||
memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
|
||||
|
||||
buf_start = total_out;
|
||||
total_out = workspace->strm.total_out;
|
||||
|
||||
if (total_out == buf_start) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (total_out <= start_byte)
|
||||
goto next;
|
||||
|
||||
if (total_out > start_byte && buf_start < start_byte)
|
||||
buf_offset = start_byte - buf_start;
|
||||
else
|
||||
buf_offset = 0;
|
||||
|
||||
bytes = min(PAGE_SIZE - pg_offset,
|
||||
PAGE_SIZE - (buf_offset % PAGE_SIZE));
|
||||
bytes = min(bytes, bytes_left);
|
||||
|
||||
memcpy_to_page(dest_page, pg_offset,
|
||||
workspace->buf + buf_offset, bytes);
|
||||
|
||||
pg_offset += bytes;
|
||||
bytes_left -= bytes;
|
||||
next:
|
||||
workspace->strm.next_out = workspace->buf;
|
||||
workspace->strm.avail_out = workspace->buf_size;
|
||||
}
|
||||
|
||||
if (ret != Z_STREAM_END && bytes_left != 0)
|
||||
out:
|
||||
if (unlikely(to_copy != destlen)) {
|
||||
pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n",
|
||||
to_copy, destlen);
|
||||
ret = -EIO;
|
||||
else
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
zlib_inflateEnd(&workspace->strm);
|
||||
|
||||
/*
|
||||
* this should only happen if zlib returned fewer bytes than we
|
||||
* expected. btrfs_get_block is responsible for zeroing from the
|
||||
* end of the inline extent (destlen) to the end of the page
|
||||
*/
|
||||
if (pg_offset < destlen) {
|
||||
memzero_page(dest_page, pg_offset, destlen - pg_offset);
|
||||
}
|
||||
if (unlikely(to_copy < destlen))
|
||||
memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2055,6 +2055,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
|
||||
map = block_group->physical_map;
|
||||
|
||||
spin_lock(&fs_info->zone_active_bgs_lock);
|
||||
spin_lock(&block_group->lock);
|
||||
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
|
||||
ret = true;
|
||||
@ -2067,7 +2068,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
spin_lock(&fs_info->zone_active_bgs_lock);
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
struct btrfs_zoned_device_info *zinfo;
|
||||
int reserved = 0;
|
||||
@ -2087,20 +2087,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
*/
|
||||
if (atomic_read(&zinfo->active_zones_left) <= reserved) {
|
||||
ret = false;
|
||||
spin_unlock(&fs_info->zone_active_bgs_lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!btrfs_dev_set_active_zone(device, physical)) {
|
||||
/* Cannot activate the zone */
|
||||
ret = false;
|
||||
spin_unlock(&fs_info->zone_active_bgs_lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!is_data)
|
||||
zinfo->reserved_active_zones--;
|
||||
}
|
||||
spin_unlock(&fs_info->zone_active_bgs_lock);
|
||||
|
||||
/* Successfully activated all the zones */
|
||||
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
|
||||
@ -2108,8 +2105,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
|
||||
/* For the active block group list */
|
||||
btrfs_get_block_group(block_group);
|
||||
|
||||
spin_lock(&fs_info->zone_active_bgs_lock);
|
||||
list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
|
||||
spin_unlock(&fs_info->zone_active_bgs_lock);
|
||||
|
||||
@ -2117,6 +2112,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&block_group->lock);
|
||||
spin_unlock(&fs_info->zone_active_bgs_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "misc.h"
|
||||
#include "compression.h"
|
||||
#include "ctree.h"
|
||||
#include "super.h"
|
||||
|
||||
#define ZSTD_BTRFS_MAX_WINDOWLOG 17
|
||||
#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
|
||||
@ -618,80 +619,48 @@ done:
|
||||
}
|
||||
|
||||
int zstd_decompress(struct list_head *ws, const u8 *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
||||
size_t destlen)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
|
||||
const u32 sectorsize = fs_info->sectorsize;
|
||||
zstd_dstream *stream;
|
||||
int ret = 0;
|
||||
size_t ret2;
|
||||
unsigned long total_out = 0;
|
||||
unsigned long pg_offset = 0;
|
||||
unsigned long to_copy = 0;
|
||||
|
||||
stream = zstd_init_dstream(
|
||||
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
|
||||
if (!stream) {
|
||||
pr_warn("BTRFS: zstd_init_dstream failed\n");
|
||||
ret = -EIO;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
destlen = min_t(size_t, destlen, PAGE_SIZE);
|
||||
|
||||
workspace->in_buf.src = data_in;
|
||||
workspace->in_buf.pos = 0;
|
||||
workspace->in_buf.size = srclen;
|
||||
|
||||
workspace->out_buf.dst = workspace->buf;
|
||||
workspace->out_buf.pos = 0;
|
||||
workspace->out_buf.size = PAGE_SIZE;
|
||||
workspace->out_buf.size = sectorsize;
|
||||
|
||||
ret2 = 1;
|
||||
while (pg_offset < destlen
|
||||
&& workspace->in_buf.pos < workspace->in_buf.size) {
|
||||
unsigned long buf_start;
|
||||
unsigned long buf_offset;
|
||||
unsigned long bytes;
|
||||
|
||||
/* Check if the frame is over and we still need more input */
|
||||
if (ret2 == 0) {
|
||||
pr_debug("BTRFS: zstd_decompress_stream ended early\n");
|
||||
ret = -EIO;
|
||||
goto finish;
|
||||
}
|
||||
ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
|
||||
&workspace->in_buf);
|
||||
if (zstd_is_error(ret2)) {
|
||||
pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
|
||||
zstd_get_error_code(ret2));
|
||||
ret = -EIO;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
buf_start = total_out;
|
||||
total_out += workspace->out_buf.pos;
|
||||
workspace->out_buf.pos = 0;
|
||||
|
||||
if (total_out <= start_byte)
|
||||
continue;
|
||||
|
||||
if (total_out > start_byte && buf_start < start_byte)
|
||||
buf_offset = start_byte - buf_start;
|
||||
else
|
||||
buf_offset = 0;
|
||||
|
||||
bytes = min_t(unsigned long, destlen - pg_offset,
|
||||
workspace->out_buf.size - buf_offset);
|
||||
|
||||
memcpy_to_page(dest_page, pg_offset,
|
||||
workspace->out_buf.dst + buf_offset, bytes);
|
||||
|
||||
pg_offset += bytes;
|
||||
/*
|
||||
* Since both input and output buffers should not exceed one sector,
|
||||
* one call should end the decompression.
|
||||
*/
|
||||
ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
|
||||
if (zstd_is_error(ret)) {
|
||||
pr_warn_ratelimited("BTRFS: zstd_decompress_stream return %d\n",
|
||||
zstd_get_error_code(ret));
|
||||
goto finish;
|
||||
}
|
||||
ret = 0;
|
||||
to_copy = workspace->out_buf.pos;
|
||||
memcpy_to_page(dest_page, dest_pgoff + to_copy, workspace->out_buf.dst, to_copy);
|
||||
finish:
|
||||
if (pg_offset < destlen) {
|
||||
memzero_page(dest_page, pg_offset, destlen - pg_offset);
|
||||
/* Error or early end. */
|
||||
if (unlikely(to_copy < destlen)) {
|
||||
ret = -EIO;
|
||||
memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -614,6 +614,9 @@ struct btrfs_ioctl_clone_range_args {
|
||||
*/
|
||||
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
|
||||
#define BTRFS_DEFRAG_RANGE_START_IO 2
|
||||
#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
|
||||
BTRFS_DEFRAG_RANGE_START_IO)
|
||||
|
||||
struct btrfs_ioctl_defrag_range_args {
|
||||
/* start of the defrag operation */
|
||||
__u64 start;
|
||||
|
Loading…
Reference in New Issue
Block a user