mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 08:13:56 +08:00
for-5.3-rc4-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAl1ZOygACgkQxWXV+ddt WDvokw/8CRjbN5Bjlk/JzmikH+mU28Cd7qQgahWw7Afkyh5Gzb4IJJbNXzapy988 dMMKYF2H6lxY46EZG8cF4MFjCv8L4L1eZ9CqwfZyf7MfzPL2pnLSN77QJYYebWp3 y9rc8xv+qUdIcumQP25yxXmtN0YxT5bIJiuCmpHJFNfyijHVRnXV2CxQ8nwe63/1 G25a03x6BNKtSrU3ZP0fW2VjARKzIF7i+bEy4Ew3n3dNKqAiROYecswcBedhCBkF D9hL62uHcxQSeHi/6lAZYKpsp4g4pKEO4c92MxsI8PJtd4zgHQG7MttXsHC1F1FQ lHcP3vxKICOOMa13W6QdytSX6uSpjeLyMTDfmvaahQmwG6I5dBN56pkGlWdDMKNn NUCNBmV063D7Shed4W2uIafLfo3BLEwKr2pd6pOfOZHwOKPWblJ0v4KzVDLoKC7v CA5HB9BBz4KfSyp3fkm9B5VGJW938vRHVfx55IoakL+tfs67cKDYo8QEFHDuz0P5 DrYNwKvp4a5llGQ6vdRKfeiN7vBea10795MI5vFJiGUHfY3pN99R4UUed7kaul58 n6gqYukcPtIBqm8auxK037nngA+V0N2y0ceM1/aKUGaZVlCtSmEKXseYjbaiH6fP MEZSgZn4W5qnu2oQuKohfQsNHtY5WP9489GpByy+DS5QsbDaueg= =ovM7 -----END PGP SIGNATURE----- Merge tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: "Two fixes that popped up during testing: - fix for sysfs-related code that adds/removes block groups, warnings appear during several fstests in connection with sysfs updates in 5.3, the fix essentially replaces a workaround with scope NOFS and applies to 5.2-based branch too - add sanity check of trim range" * tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: trim: Check the range passed into to prevent overflow Btrfs: fix sysfs warning and missing raid sysfs directories
This commit is contained in:
commit
3039fadf2b
@ -401,7 +401,6 @@ struct btrfs_dev_replace {
|
||||
struct raid_kobject {
|
||||
u64 flags;
|
||||
struct kobject kobj;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -915,8 +914,6 @@ struct btrfs_fs_info {
|
||||
u32 thread_pool_size;
|
||||
|
||||
struct kobject *space_info_kobj;
|
||||
struct list_head pending_raid_kobjs;
|
||||
spinlock_t pending_raid_kobjs_lock; /* uncontended */
|
||||
|
||||
u64 total_pinned;
|
||||
|
||||
@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
|
||||
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
||||
u64 bytes_used, u64 type, u64 chunk_offset,
|
||||
u64 size);
|
||||
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
|
||||
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
||||
struct btrfs_fs_info *fs_info,
|
||||
const u64 chunk_offset);
|
||||
|
@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
|
||||
INIT_LIST_HEAD(&fs_info->delayed_iputs);
|
||||
INIT_LIST_HEAD(&fs_info->delalloc_roots);
|
||||
INIT_LIST_HEAD(&fs_info->caching_block_groups);
|
||||
INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
|
||||
spin_lock_init(&fs_info->pending_raid_kobjs_lock);
|
||||
spin_lock_init(&fs_info->delalloc_root_lock);
|
||||
spin_lock_init(&fs_info->trans_lock);
|
||||
spin_lock_init(&fs_info->fs_roots_radix_lock);
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/writeback.h>
|
||||
@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* link_block_group will queue up kobjects to add when we're reclaim-safe */
|
||||
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_space_info *space_info;
|
||||
struct raid_kobject *rkobj;
|
||||
LIST_HEAD(list);
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&fs_info->pending_raid_kobjs_lock);
|
||||
list_splice_init(&fs_info->pending_raid_kobjs, &list);
|
||||
spin_unlock(&fs_info->pending_raid_kobjs_lock);
|
||||
|
||||
list_for_each_entry(rkobj, &list, list) {
|
||||
space_info = btrfs_find_space_info(fs_info, rkobj->flags);
|
||||
|
||||
ret = kobject_add(&rkobj->kobj, &space_info->kobj,
|
||||
"%s", btrfs_bg_type_to_raid_name(rkobj->flags));
|
||||
if (ret) {
|
||||
kobject_put(&rkobj->kobj);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
btrfs_warn(fs_info,
|
||||
"failed to add kobject for block cache, ignoring");
|
||||
}
|
||||
|
||||
static void link_block_group(struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
struct btrfs_space_info *space_info = cache->space_info;
|
||||
@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
|
||||
up_write(&space_info->groups_sem);
|
||||
|
||||
if (first) {
|
||||
struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
|
||||
struct raid_kobject *rkobj;
|
||||
unsigned int nofs_flag;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Setup a NOFS context because kobject_add(), deep in its call
|
||||
* chain, does GFP_KERNEL allocations, and we are often called
|
||||
* in a context where if reclaim is triggered we can deadlock
|
||||
* (we are either holding a transaction handle or some lock
|
||||
* required for a transaction commit).
|
||||
*/
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
|
||||
if (!rkobj) {
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
btrfs_warn(cache->fs_info,
|
||||
"couldn't alloc memory for raid level kobject");
|
||||
return;
|
||||
}
|
||||
rkobj->flags = cache->flags;
|
||||
kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
|
||||
|
||||
spin_lock(&fs_info->pending_raid_kobjs_lock);
|
||||
list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
|
||||
spin_unlock(&fs_info->pending_raid_kobjs_lock);
|
||||
ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
|
||||
btrfs_bg_type_to_raid_name(rkobj->flags));
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
if (ret) {
|
||||
kobject_put(&rkobj->kobj);
|
||||
btrfs_warn(fs_info,
|
||||
"failed to add kobject for block cache, ignoring");
|
||||
return;
|
||||
}
|
||||
space_info->block_group_kobjs[index] = &rkobj->kobj;
|
||||
}
|
||||
}
|
||||
@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
|
||||
inc_block_group_ro(cache, 1);
|
||||
}
|
||||
|
||||
btrfs_add_raid_kobjects(info);
|
||||
btrfs_init_global_block_rsv(info);
|
||||
ret = check_chunk_block_group_mappings(info);
|
||||
error:
|
||||
@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
|
||||
struct btrfs_device *device;
|
||||
struct list_head *devices;
|
||||
u64 group_trimmed;
|
||||
u64 range_end = U64_MAX;
|
||||
u64 start;
|
||||
u64 end;
|
||||
u64 trimmed = 0;
|
||||
@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
|
||||
int dev_ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Check range overflow if range->len is set.
|
||||
* The default range->len is U64_MAX.
|
||||
*/
|
||||
if (range->len != U64_MAX &&
|
||||
check_add_overflow(range->start, range->len, &range_end))
|
||||
return -EINVAL;
|
||||
|
||||
cache = btrfs_lookup_first_block_group(fs_info, range->start);
|
||||
for (; cache; cache = next_block_group(cache)) {
|
||||
if (cache->key.objectid >= (range->start + range->len)) {
|
||||
if (cache->key.objectid >= range_end) {
|
||||
btrfs_put_block_group(cache);
|
||||
break;
|
||||
}
|
||||
|
||||
start = max(range->start, cache->key.objectid);
|
||||
end = min(range->start + range->len,
|
||||
cache->key.objectid + cache->key.offset);
|
||||
end = min(range_end, cache->key.objectid + cache->key.offset);
|
||||
|
||||
if (end - start >= range->minlen) {
|
||||
if (!block_group_cache_done(cache)) {
|
||||
|
@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We add the kobjects here (and after forcing data chunk creation)
|
||||
* since relocation is the only place we'll create chunks of a new
|
||||
* type at runtime. The only place where we'll remove the last
|
||||
* chunk of a type is the call immediately below this one. Even
|
||||
* so, we're protected against races with the cleaner thread since
|
||||
* we're covered by the delete_unused_bgs_mutex.
|
||||
*/
|
||||
btrfs_add_raid_kobjects(fs_info);
|
||||
|
||||
trans = btrfs_start_trans_remove_block_group(root->fs_info,
|
||||
chunk_offset);
|
||||
if (IS_ERR(trans)) {
|
||||
@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
|
||||
btrfs_end_transaction(trans);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
btrfs_add_raid_kobjects(fs_info);
|
||||
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user