mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
for-6.1-rc4-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmNtDwoACgkQxWXV+ddt WDtcBQ/9HA9lLySbgveEj8taIbe6hXZ3Ry+1dSB/r0btb9e/tlcE7Md1ir3ewcIH ICfjWkbltE5Xqo50Ll+cdbEt0kgMwP+2jISPUG4bikTprLRPp1q4Gl8H9frYotJL 76xC8rgmITC4ZR/PkYisauC3UJTv8EBnB19GzU+5SFh82ZfxF+XHmHFc5Wzdl8Q8 OObFOiVy28dTYubJc0cId39XceVbqv/uj+F/y5tQSZvhPhDRPZfPWBdW3LHIAMSP xB4E9Qhbk9NAhFUHjvMwBBRao0q2D6ZO4IViB7y5qAIQOIfk6RJK11hAkeybqO+1 E8ADPY6XBEfM6SA3Bf7X4kz1gjTm/eF8l4lnLZdGT1husbBY4O3Biey0qUjZs+oP LJTUtS3MJMEnTVoW/saUG3iTTDFFxJA+fbn6hKdNLqpKM6jjDgRx2MavbCNoUcCw nnEVbCh+Z44xXE9+N7SH4E+ygoiwJwvkLLgYQ+ZaAHd7Wmpzmwnf9yWEiy1t1iv2 dj5bTv9jlZTacK8u/NUl6F/nqAIg5lcbNKAs1bPJ2m34ye5FKD2RPANgdqshNYFC il7TgQjcnyVw17y0qYpqtLZrDsvTreQgUXeCprTPiTenJ1f72zyF7kHxjk12lHWd /x22sNoX+uWlpJSW1niutVRdupVPqbwED+Qp0E5UkNaC3GeV/Bw= =1+3V -----END PGP SIGNATURE----- Merge tag 'for-6.1-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: - revert memory optimization for scrub blocks, this misses errors in 2nd and following blocks - add exception for ENOMEM as reason for transaction abort to not print stack trace, syzbot has reported many - zoned fixes: - fix locking imbalance during scrub - initialize zones for seeding device - initialize zones for cloned device structures - when looking up device, change assertion to a real check as some of the search parameters can be passed by ioctl, reported by syzbot - fix error pointer check in self tests * tag 'for-6.1-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: zoned: fix locking imbalance on scrub btrfs: zoned: initialize device's zone info for seeding btrfs: zoned: clone zoned device info when cloning a device Revert "btrfs: scrub: use larger block size for data extent scrub" btrfs: don't print stack trace when transaction is aborted due to ENOMEM btrfs: selftests: fix wrong error check in btrfs_free_dummy_root() btrfs: fix match incorrectly in dev_args_match_device
This commit is contained in:
commit
1767a722a7
@ -113,6 +113,22 @@ noinline void btrfs_release_path(struct btrfs_path *p)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We want the transaction abort to print stack trace only for errors where the
|
||||
* cause could be a bug, eg. due to ENOSPC, and not for common errors that are
|
||||
* caused by external factors.
|
||||
*/
|
||||
bool __cold abort_should_print_stack(int errno)
|
||||
{
|
||||
switch (errno) {
|
||||
case -EIO:
|
||||
case -EROFS:
|
||||
case -ENOMEM:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* safely gets a reference on the root node of a tree. A lock
|
||||
* is not taken, so a concurrent writer may put a different node
|
||||
|
@ -3796,9 +3796,11 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
||||
const char *function,
|
||||
unsigned int line, int errno, bool first_hit);
|
||||
|
||||
bool __cold abort_should_print_stack(int errno);
|
||||
|
||||
/*
|
||||
* Call btrfs_abort_transaction as early as possible when an error condition is
|
||||
* detected, that way the exact line number is reported.
|
||||
* detected, that way the exact stack trace is reported for some errors.
|
||||
*/
|
||||
#define btrfs_abort_transaction(trans, errno) \
|
||||
do { \
|
||||
@ -3807,10 +3809,11 @@ do { \
|
||||
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
|
||||
&((trans)->fs_info->fs_state))) { \
|
||||
first = true; \
|
||||
if ((errno) != -EIO && (errno) != -EROFS) { \
|
||||
WARN(1, KERN_DEBUG \
|
||||
if (WARN(abort_should_print_stack(errno), \
|
||||
KERN_DEBUG \
|
||||
"BTRFS: Transaction aborted (error %d)\n", \
|
||||
(errno)); \
|
||||
(errno))) { \
|
||||
/* Stack trace printed. */ \
|
||||
} else { \
|
||||
btrfs_debug((trans)->fs_info, \
|
||||
"Transaction aborted (error %d)", \
|
||||
|
@ -2551,7 +2551,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
|
||||
fs_info->dev_root = root;
|
||||
}
|
||||
/* Initialize fs_info for all devices in any case */
|
||||
btrfs_init_devices_late(fs_info);
|
||||
ret = btrfs_init_devices_late(fs_info);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This tree can share blocks with some other fs tree during relocation
|
||||
|
@ -2672,17 +2672,11 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
|
||||
u8 csum[BTRFS_CSUM_SIZE];
|
||||
u32 blocksize;
|
||||
|
||||
/*
|
||||
* Block size determines how many scrub_block will be allocated. Here
|
||||
* we use BTRFS_STRIPE_LEN (64KiB) as default limit, so we won't
|
||||
* allocate too many scrub_block, while still won't cause too large
|
||||
* bios for large extents.
|
||||
*/
|
||||
if (flags & BTRFS_EXTENT_FLAG_DATA) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
|
||||
blocksize = map->stripe_len;
|
||||
else
|
||||
blocksize = BTRFS_STRIPE_LEN;
|
||||
blocksize = sctx->fs_info->sectorsize;
|
||||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.data_extents_scrubbed++;
|
||||
sctx->stat.data_bytes_scrubbed += len;
|
||||
@ -3917,7 +3911,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
||||
|
||||
if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
|
||||
if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
|
||||
spin_unlock(&cache->lock);
|
||||
btrfs_put_block_group(cache);
|
||||
goto skip;
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
|
||||
|
||||
void btrfs_free_dummy_root(struct btrfs_root *root)
|
||||
{
|
||||
if (!root)
|
||||
if (IS_ERR_OR_NULL(root))
|
||||
return;
|
||||
/* Will be freed by btrfs_free_fs_roots */
|
||||
if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
|
||||
|
@ -1011,6 +1011,18 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
|
||||
rcu_assign_pointer(device->name, name);
|
||||
}
|
||||
|
||||
if (orig_dev->zone_info) {
|
||||
struct btrfs_zoned_device_info *zone_info;
|
||||
|
||||
zone_info = btrfs_clone_dev_zone_info(orig_dev);
|
||||
if (!zone_info) {
|
||||
btrfs_free_device(device);
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
device->zone_info = zone_info;
|
||||
}
|
||||
|
||||
list_add(&device->dev_list, &fs_devices->devices);
|
||||
device->fs_devices = fs_devices;
|
||||
fs_devices->num_devices++;
|
||||
@ -6918,18 +6930,18 @@ static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
|
||||
static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
|
||||
const struct btrfs_device *device)
|
||||
{
|
||||
ASSERT((args->devid != (u64)-1) || args->missing);
|
||||
if (args->missing) {
|
||||
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
|
||||
!device->bdev)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((args->devid != (u64)-1) && device->devid != args->devid)
|
||||
if (device->devid != args->devid)
|
||||
return false;
|
||||
if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
|
||||
return false;
|
||||
if (!args->missing)
|
||||
return true;
|
||||
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
|
||||
!device->bdev)
|
||||
return true;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7744,10 +7756,11 @@ error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
|
||||
int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
|
||||
struct btrfs_device *device;
|
||||
int ret = 0;
|
||||
|
||||
fs_devices->fs_info = fs_info;
|
||||
|
||||
@ -7756,12 +7769,18 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
|
||||
device->fs_info = fs_info;
|
||||
|
||||
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
|
||||
list_for_each_entry(device, &seed_devs->devices, dev_list)
|
||||
list_for_each_entry(device, &seed_devs->devices, dev_list) {
|
||||
device->fs_info = fs_info;
|
||||
ret = btrfs_get_dev_zone_info(device, false);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
seed_devs->fs_info = fs_info;
|
||||
}
|
||||
mutex_unlock(&fs_devices->device_list_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
|
||||
|
@ -671,7 +671,7 @@ int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
|
||||
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
|
||||
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_ioctl_get_dev_stats *stats);
|
||||
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
|
||||
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
|
||||
|
@ -639,6 +639,46 @@ void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
|
||||
device->zone_info = NULL;
|
||||
}
|
||||
|
||||
struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
|
||||
{
|
||||
struct btrfs_zoned_device_info *zone_info;
|
||||
|
||||
zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
|
||||
if (!zone_info)
|
||||
return NULL;
|
||||
|
||||
zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
||||
if (!zone_info->seq_zones)
|
||||
goto out;
|
||||
|
||||
bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
|
||||
zone_info->nr_zones);
|
||||
|
||||
zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
||||
if (!zone_info->empty_zones)
|
||||
goto out;
|
||||
|
||||
bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
|
||||
zone_info->nr_zones);
|
||||
|
||||
zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
||||
if (!zone_info->active_zones)
|
||||
goto out;
|
||||
|
||||
bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
|
||||
zone_info->nr_zones);
|
||||
zone_info->zone_cache = NULL;
|
||||
|
||||
return zone_info;
|
||||
|
||||
out:
|
||||
bitmap_free(zone_info->seq_zones);
|
||||
bitmap_free(zone_info->empty_zones);
|
||||
bitmap_free(zone_info->active_zones);
|
||||
kfree(zone_info);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
|
||||
struct blk_zone *zone)
|
||||
{
|
||||
|
@ -36,6 +36,7 @@ int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
|
||||
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
|
||||
void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
|
||||
struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev);
|
||||
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
|
||||
int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
|
||||
@ -103,6 +104,16 @@ static inline int btrfs_get_dev_zone_info(struct btrfs_device *device,
|
||||
|
||||
static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { }
|
||||
|
||||
/*
|
||||
* In case the kernel is compiled without CONFIG_BLK_DEV_ZONED we'll never call
|
||||
* into btrfs_clone_dev_zone_info() so it's safe to return NULL here.
|
||||
*/
|
||||
static inline struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(
|
||||
struct btrfs_device *orig_dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
if (!btrfs_is_zoned(fs_info))
|
||||
|
Loading…
Reference in New Issue
Block a user