mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 08:38:24 +08:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: rename the option to nospace_cache Btrfs: handle bio_add_page failure gracefully in scrub Btrfs: fix deadlock caused by the race between relocation Btrfs: only map pages if we know we need them when reading the space cache Btrfs: fix orphan backref nodes Btrfs: Abstract similar code for btrfs_block_rsv_add{, _noflush} Btrfs: fix unreleased path in btrfs_orphan_cleanup() Btrfs: fix no reserved space for writing out inode cache Btrfs: fix nocow when deleting the item Btrfs: tweak the delayed inode reservations again Btrfs: rework error handling in btrfs_mount() Btrfs: close devices on all error paths in open_ctree() Btrfs: avoid null dereference and leaks when bailing from open_ctree() Btrfs: fix subvol_name leak on error in btrfs_mount() Btrfs: fix memory leak in btrfs_parse_early_options() Btrfs: fix our reservations for updating an inode when completing io Btrfs: fix oops on NULL trans handle in btrfs_truncate btrfs: fix double-free 'tree_root' in 'btrfs_mount()'
This commit is contained in:
commit
c1f4246716
@ -147,14 +147,12 @@ struct btrfs_inode {
|
||||
* the btrfs file release call will add this inode to the
|
||||
* ordered operations list so that we make sure to flush out any
|
||||
* new data the application may have written before commit.
|
||||
*
|
||||
* yes, its silly to have a single bitflag, but we might grow more
|
||||
* of these.
|
||||
*/
|
||||
unsigned ordered_data_close:1;
|
||||
unsigned orphan_meta_reserved:1;
|
||||
unsigned dummy_inode:1;
|
||||
unsigned in_defrag:1;
|
||||
unsigned delalloc_meta_reserved:1;
|
||||
|
||||
/*
|
||||
* always compress this one file
|
||||
|
@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
|
||||
static int btrfs_delayed_inode_reserve_metadata(
|
||||
struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct inode *inode,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
struct btrfs_block_rsv *src_rsv;
|
||||
struct btrfs_block_rsv *dst_rsv;
|
||||
u64 num_bytes;
|
||||
int ret;
|
||||
int release = false;
|
||||
|
||||
src_rsv = trans->block_rsv;
|
||||
dst_rsv = &root->fs_info->delayed_block_rsv;
|
||||
@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
|
||||
if (!ret)
|
||||
node->bytes_reserved = num_bytes;
|
||||
return ret;
|
||||
} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
if (BTRFS_I(inode)->delalloc_meta_reserved) {
|
||||
BTRFS_I(inode)->delalloc_meta_reserved = 0;
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
release = true;
|
||||
goto migrate;
|
||||
}
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
|
||||
/* Ok we didn't have space pre-reserved. This shouldn't happen
|
||||
* too often but it can happen if we do delalloc to an existing
|
||||
* inode which gets dirtied because of the time update, and then
|
||||
* isn't touched again until after the transaction commits and
|
||||
* then we try to write out the data. First try to be nice and
|
||||
* reserve something strictly for us. If not be a pain and try
|
||||
* to steal from the delalloc block rsv.
|
||||
*/
|
||||
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Ok this is a problem, let's just steal from the global rsv
|
||||
* since this really shouldn't happen that often.
|
||||
*/
|
||||
WARN_ON(1);
|
||||
ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
|
||||
dst_rsv, num_bytes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
migrate:
|
||||
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
|
||||
|
||||
out:
|
||||
/*
|
||||
* Migrate only takes a reservation, it doesn't touch the size of the
|
||||
* block_rsv. This is to simplify people who don't normally have things
|
||||
* migrated from their block rsv. If they go to release their
|
||||
* reservation, that will decrease the size as well, so if migrate
|
||||
* reduced size we'd end up with a negative size. But for the
|
||||
* delalloc_meta_reserved stuff we will only know to drop 1 reservation,
|
||||
* but we could in fact do this reserve/migrate dance several times
|
||||
* between the time we did the original reservation and we'd clean it
|
||||
* up. So to take care of this, release the space for the meta
|
||||
* reservation here. I think it may be time for a documentation page on
|
||||
* how block rsvs. work.
|
||||
*/
|
||||
if (!ret)
|
||||
node->bytes_reserved = num_bytes;
|
||||
|
||||
if (release)
|
||||
btrfs_block_rsv_release(root, src_rsv, num_bytes);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
||||
goto release_node;
|
||||
}
|
||||
|
||||
ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
|
||||
ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
|
||||
delayed_node);
|
||||
if (ret)
|
||||
goto release_node;
|
||||
|
||||
|
@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
u64 features;
|
||||
struct btrfs_key location;
|
||||
struct buffer_head *bh;
|
||||
struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
|
||||
GFP_NOFS);
|
||||
struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
|
||||
GFP_NOFS);
|
||||
struct btrfs_super_block *disk_super;
|
||||
struct btrfs_root *tree_root = btrfs_sb(sb);
|
||||
struct btrfs_fs_info *fs_info = NULL;
|
||||
struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
|
||||
GFP_NOFS);
|
||||
struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
|
||||
GFP_NOFS);
|
||||
struct btrfs_fs_info *fs_info = tree_root->fs_info;
|
||||
struct btrfs_root *extent_root;
|
||||
struct btrfs_root *csum_root;
|
||||
struct btrfs_root *chunk_root;
|
||||
struct btrfs_root *dev_root;
|
||||
struct btrfs_root *log_tree_root;
|
||||
|
||||
int ret;
|
||||
int err = -EINVAL;
|
||||
int num_backups_tried = 0;
|
||||
int backup_index = 0;
|
||||
|
||||
struct btrfs_super_block *disk_super;
|
||||
extent_root = fs_info->extent_root =
|
||||
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||
csum_root = fs_info->csum_root =
|
||||
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||
chunk_root = fs_info->chunk_root =
|
||||
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||
dev_root = fs_info->dev_root =
|
||||
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||
|
||||
if (!extent_root || !tree_root || !tree_root->fs_info ||
|
||||
!chunk_root || !dev_root || !csum_root) {
|
||||
if (!extent_root || !csum_root || !chunk_root || !dev_root) {
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
fs_info = tree_root->fs_info;
|
||||
|
||||
ret = init_srcu_struct(&fs_info->subvol_srcu);
|
||||
if (ret) {
|
||||
@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
mutex_init(&fs_info->reloc_mutex);
|
||||
|
||||
init_completion(&fs_info->kobj_unregister);
|
||||
fs_info->tree_root = tree_root;
|
||||
fs_info->extent_root = extent_root;
|
||||
fs_info->csum_root = csum_root;
|
||||
fs_info->chunk_root = chunk_root;
|
||||
fs_info->dev_root = dev_root;
|
||||
fs_info->fs_devices = fs_devices;
|
||||
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
|
||||
INIT_LIST_HEAD(&fs_info->space_info);
|
||||
btrfs_mapping_init(&fs_info->mapping_tree);
|
||||
@ -2465,21 +2460,20 @@ fail_sb_buffer:
|
||||
btrfs_stop_workers(&fs_info->caching_workers);
|
||||
fail_alloc:
|
||||
fail_iput:
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
iput(fs_info->btree_inode);
|
||||
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
fail_bdi:
|
||||
bdi_destroy(&fs_info->bdi);
|
||||
fail_srcu:
|
||||
cleanup_srcu_struct(&fs_info->subvol_srcu);
|
||||
fail:
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
free_fs_info(fs_info);
|
||||
return ERR_PTR(err);
|
||||
|
||||
recovery_tree_root:
|
||||
|
||||
if (!btrfs_test_opt(tree_root, RECOVERY))
|
||||
goto fail_tree_roots;
|
||||
|
||||
|
@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
|
||||
kfree(rsv);
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_add(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 num_bytes)
|
||||
static inline int __block_rsv_add(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 num_bytes, int flush)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (num_bytes == 0)
|
||||
return 0;
|
||||
|
||||
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
|
||||
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
|
||||
if (!ret) {
|
||||
block_rsv_add_bytes(block_rsv, num_bytes, 1);
|
||||
return 0;
|
||||
@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_add(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 num_bytes)
|
||||
{
|
||||
return __block_rsv_add(root, block_rsv, num_bytes, 1);
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 num_bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (num_bytes == 0)
|
||||
return 0;
|
||||
|
||||
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
|
||||
if (!ret) {
|
||||
block_rsv_add_bytes(block_rsv, num_bytes, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return __block_rsv_add(root, block_rsv, num_bytes, 0);
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_check(struct btrfs_root *root,
|
||||
@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
*/
|
||||
static unsigned drop_outstanding_extent(struct inode *inode)
|
||||
{
|
||||
unsigned drop_inode_space = 0;
|
||||
unsigned dropped_extents = 0;
|
||||
|
||||
BUG_ON(!BTRFS_I(inode)->outstanding_extents);
|
||||
BTRFS_I(inode)->outstanding_extents--;
|
||||
|
||||
if (BTRFS_I(inode)->outstanding_extents == 0 &&
|
||||
BTRFS_I(inode)->delalloc_meta_reserved) {
|
||||
drop_inode_space = 1;
|
||||
BTRFS_I(inode)->delalloc_meta_reserved = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have more or the same amount of outsanding extents than we have
|
||||
* reserved then we need to leave the reserved extents count alone.
|
||||
*/
|
||||
if (BTRFS_I(inode)->outstanding_extents >=
|
||||
BTRFS_I(inode)->reserved_extents)
|
||||
return 0;
|
||||
return drop_inode_space;
|
||||
|
||||
dropped_extents = BTRFS_I(inode)->reserved_extents -
|
||||
BTRFS_I(inode)->outstanding_extents;
|
||||
BTRFS_I(inode)->reserved_extents -= dropped_extents;
|
||||
return dropped_extents;
|
||||
return dropped_extents + drop_inode_space;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
||||
nr_extents = BTRFS_I(inode)->outstanding_extents -
|
||||
BTRFS_I(inode)->reserved_extents;
|
||||
BTRFS_I(inode)->reserved_extents += nr_extents;
|
||||
|
||||
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add an item to reserve for updating the inode when we complete the
|
||||
* delalloc io.
|
||||
*/
|
||||
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
|
||||
nr_extents++;
|
||||
BTRFS_I(inode)->delalloc_meta_reserved = 1;
|
||||
}
|
||||
|
||||
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
|
||||
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
|
||||
|
@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
|
||||
struct btrfs_free_space *entry, u8 *type)
|
||||
{
|
||||
struct btrfs_free_space_entry *e;
|
||||
int ret;
|
||||
|
||||
if (!io_ctl->cur) {
|
||||
ret = io_ctl_check_crc(io_ctl, io_ctl->index);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
e = io_ctl->cur;
|
||||
entry->offset = le64_to_cpu(e->offset);
|
||||
@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
|
||||
|
||||
io_ctl_unmap_page(io_ctl);
|
||||
|
||||
if (io_ctl->index >= io_ctl->num_pages)
|
||||
return 0;
|
||||
|
||||
return io_ctl_check_crc(io_ctl, io_ctl->index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
|
||||
@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
|
||||
io_ctl_unmap_page(io_ctl);
|
||||
|
||||
ret = io_ctl_check_crc(io_ctl, io_ctl->index);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
|
||||
num_entries--;
|
||||
}
|
||||
|
||||
io_ctl_unmap_page(&io_ctl);
|
||||
|
||||
/*
|
||||
* We add the bitmaps at the end of the entries in order that
|
||||
* the bitmap entries are added to the cache.
|
||||
|
@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
|
||||
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
|
||||
struct btrfs_path *path;
|
||||
struct inode *inode;
|
||||
struct btrfs_block_rsv *rsv;
|
||||
u64 num_bytes;
|
||||
u64 alloc_hint = 0;
|
||||
int ret;
|
||||
int prealloc;
|
||||
@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
rsv = trans->block_rsv;
|
||||
trans->block_rsv = &root->fs_info->trans_block_rsv;
|
||||
|
||||
num_bytes = trans->bytes_reserved;
|
||||
/*
|
||||
* 1 item for inode item insertion if need
|
||||
* 3 items for inode item update (in the worst case)
|
||||
* 1 item for free space object
|
||||
* 3 items for pre-allocation
|
||||
*/
|
||||
trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
|
||||
ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
|
||||
trans->bytes_reserved);
|
||||
if (ret)
|
||||
goto out;
|
||||
again:
|
||||
inode = lookup_free_ino_inode(root, path);
|
||||
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
|
||||
ret = PTR_ERR(inode);
|
||||
goto out;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
if (IS_ERR(inode)) {
|
||||
@ -434,7 +451,7 @@ again:
|
||||
|
||||
ret = create_free_ino_inode(root, trans, path);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_release;
|
||||
goto again;
|
||||
}
|
||||
|
||||
@ -477,11 +494,14 @@ again:
|
||||
}
|
||||
btrfs_free_reserved_data_space(inode, prealloc);
|
||||
|
||||
ret = btrfs_write_out_ino_cache(root, trans, path);
|
||||
out_put:
|
||||
iput(inode);
|
||||
out_release:
|
||||
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
|
||||
out:
|
||||
if (ret == 0)
|
||||
ret = btrfs_write_out_ino_cache(root, trans, path);
|
||||
trans->block_rsv = rsv;
|
||||
trans->bytes_reserved = num_bytes;
|
||||
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
|
@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
|
||||
struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written, int unlock);
|
||||
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode);
|
||||
|
||||
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode, struct inode *dir,
|
||||
@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
|
||||
trans = btrfs_join_transaction(root);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
ret = btrfs_update_inode_fallback(trans, root, inode);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
goto out;
|
||||
@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
|
||||
|
||||
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
|
||||
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
ret = btrfs_update_inode_fallback(trans, root, inode);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
ret = 0;
|
||||
@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
/* release the path since we're done with it */
|
||||
btrfs_release_path(path);
|
||||
|
||||
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
|
||||
|
||||
if (root->orphan_block_rsv)
|
||||
@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
|
||||
/*
|
||||
* copy everything in the in-memory inode into the btree.
|
||||
*/
|
||||
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
|
||||
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode)
|
||||
{
|
||||
struct btrfs_inode_item *inode_item;
|
||||
@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
|
||||
struct extent_buffer *leaf;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the inode is a free space inode, we can deadlock during commit
|
||||
* if we put it into the delayed code.
|
||||
*
|
||||
* The data relocation inode should also be directly updated
|
||||
* without delay
|
||||
*/
|
||||
if (!btrfs_is_free_space_inode(root, inode)
|
||||
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
|
||||
ret = btrfs_delayed_update_inode(trans, root, inode);
|
||||
if (!ret)
|
||||
btrfs_set_inode_last_trans(trans, inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
@ -2476,6 +2466,43 @@ failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* copy everything in the in-memory inode into the btree.
|
||||
*/
|
||||
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the inode is a free space inode, we can deadlock during commit
|
||||
* if we put it into the delayed code.
|
||||
*
|
||||
* The data relocation inode should also be directly updated
|
||||
* without delay
|
||||
*/
|
||||
if (!btrfs_is_free_space_inode(root, inode)
|
||||
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
|
||||
ret = btrfs_delayed_update_inode(trans, root, inode);
|
||||
if (!ret)
|
||||
btrfs_set_inode_last_trans(trans, inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return btrfs_update_inode_item(trans, root, inode);
|
||||
}
|
||||
|
||||
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
if (ret == -ENOSPC)
|
||||
return btrfs_update_inode_item(trans, root, inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* unlink helper that gets used here in inode.c and in the tree logging
|
||||
* recovery code. It remove a link in a directory with a given name, and
|
||||
@ -5632,7 +5659,7 @@ again:
|
||||
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
|
||||
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
|
||||
if (!ret)
|
||||
err = btrfs_update_inode(trans, root, inode);
|
||||
err = btrfs_update_inode_fallback(trans, root, inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -5670,7 +5697,7 @@ again:
|
||||
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
|
||||
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
|
||||
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
|
||||
btrfs_update_inode(trans, root, inode);
|
||||
btrfs_update_inode_fallback(trans, root, inode);
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
|
||||
@ -6529,14 +6556,16 @@ end_trans:
|
||||
ret = btrfs_orphan_del(NULL, inode);
|
||||
}
|
||||
|
||||
trans->block_rsv = &root->fs_info->trans_block_rsv;
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
if (trans) {
|
||||
trans->block_rsv = &root->fs_info->trans_block_rsv;
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
|
||||
nr = trans->blocks_used;
|
||||
ret = btrfs_end_transaction_throttle(trans, root);
|
||||
btrfs_btree_balance_dirty(root, nr);
|
||||
nr = trans->blocks_used;
|
||||
ret = btrfs_end_transaction_throttle(trans, root);
|
||||
btrfs_btree_balance_dirty(root, nr);
|
||||
}
|
||||
|
||||
out:
|
||||
btrfs_free_block_rsv(root, rsv);
|
||||
@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
||||
ei->orphan_meta_reserved = 0;
|
||||
ei->dummy_inode = 0;
|
||||
ei->in_defrag = 0;
|
||||
ei->delalloc_meta_reserved = 0;
|
||||
ei->force_compress = BTRFS_COMPRESS_NONE;
|
||||
|
||||
ei->delayed_node = NULL;
|
||||
|
@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
|
||||
list_add_tail(&new_edge->list[UPPER],
|
||||
&new_node->lower);
|
||||
}
|
||||
} else {
|
||||
list_add_tail(&new_node->lower, &cache->leaves);
|
||||
}
|
||||
|
||||
rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
|
||||
|
@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
|
||||
static int scrub_submit(struct scrub_dev *sdev)
|
||||
{
|
||||
struct scrub_bio *sbio;
|
||||
struct bio *bio;
|
||||
int i;
|
||||
|
||||
if (sdev->curr == -1)
|
||||
return 0;
|
||||
|
||||
sbio = sdev->bios[sdev->curr];
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, sbio->count);
|
||||
if (!bio)
|
||||
goto nomem;
|
||||
|
||||
bio->bi_private = sbio;
|
||||
bio->bi_end_io = scrub_bio_end_io;
|
||||
bio->bi_bdev = sdev->dev->bdev;
|
||||
bio->bi_sector = sbio->physical >> 9;
|
||||
|
||||
for (i = 0; i < sbio->count; ++i) {
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
page = alloc_page(GFP_NOFS);
|
||||
if (!page)
|
||||
goto nomem;
|
||||
|
||||
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (!ret) {
|
||||
__free_page(page);
|
||||
goto nomem;
|
||||
}
|
||||
}
|
||||
|
||||
sbio->err = 0;
|
||||
sdev->curr = -1;
|
||||
atomic_inc(&sdev->in_flight);
|
||||
|
||||
submit_bio(READ, bio);
|
||||
submit_bio(READ, sbio->bio);
|
||||
|
||||
return 0;
|
||||
|
||||
nomem:
|
||||
scrub_free_bio(bio);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
|
||||
@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
|
||||
u8 *csum, int force)
|
||||
{
|
||||
struct scrub_bio *sbio;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
/*
|
||||
@ -1015,12 +985,22 @@ again:
|
||||
}
|
||||
sbio = sdev->bios[sdev->curr];
|
||||
if (sbio->count == 0) {
|
||||
struct bio *bio;
|
||||
|
||||
sbio->physical = physical;
|
||||
sbio->logical = logical;
|
||||
bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio->bi_private = sbio;
|
||||
bio->bi_end_io = scrub_bio_end_io;
|
||||
bio->bi_bdev = sdev->dev->bdev;
|
||||
bio->bi_sector = sbio->physical >> 9;
|
||||
sbio->err = 0;
|
||||
sbio->bio = bio;
|
||||
} else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
|
||||
sbio->logical + sbio->count * PAGE_SIZE != logical) {
|
||||
int ret;
|
||||
|
||||
ret = scrub_submit(sdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1030,6 +1010,20 @@ again:
|
||||
sbio->spag[sbio->count].generation = gen;
|
||||
sbio->spag[sbio->count].have_csum = 0;
|
||||
sbio->spag[sbio->count].mirror_num = mirror_num;
|
||||
|
||||
page = alloc_page(GFP_NOFS);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
|
||||
if (!ret) {
|
||||
__free_page(page);
|
||||
ret = scrub_submit(sdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (csum) {
|
||||
sbio->spag[sbio->count].have_csum = 1;
|
||||
memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
|
||||
|
@ -197,7 +197,7 @@ static match_table_t tokens = {
|
||||
{Opt_subvolrootid, "subvolrootid=%d"},
|
||||
{Opt_defrag, "autodefrag"},
|
||||
{Opt_inode_cache, "inode_cache"},
|
||||
{Opt_no_space_cache, "no_space_cache"},
|
||||
{Opt_no_space_cache, "nospace_cache"},
|
||||
{Opt_recovery, "recovery"},
|
||||
{Opt_err, NULL},
|
||||
};
|
||||
@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_subvol:
|
||||
kfree(*subvol_name);
|
||||
*subvol_name = match_strdup(&args[0]);
|
||||
break;
|
||||
case Opt_subvolid:
|
||||
@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
||||
if (btrfs_test_opt(root, SPACE_CACHE))
|
||||
seq_puts(seq, ",space_cache");
|
||||
else
|
||||
seq_puts(seq, ",no_space_cache");
|
||||
seq_puts(seq, ",nospace_cache");
|
||||
if (btrfs_test_opt(root, CLEAR_CACHE))
|
||||
seq_puts(seq, ",clear_cache");
|
||||
if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
|
||||
@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||
struct super_block *s;
|
||||
struct dentry *root;
|
||||
struct btrfs_fs_devices *fs_devices = NULL;
|
||||
struct btrfs_root *tree_root = NULL;
|
||||
struct btrfs_fs_info *fs_info = NULL;
|
||||
fmode_t mode = FMODE_READ;
|
||||
char *subvol_name = NULL;
|
||||
@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||
error = btrfs_parse_early_options(data, mode, fs_type,
|
||||
&subvol_name, &subvol_objectid,
|
||||
&subvol_rootid, &fs_devices);
|
||||
if (error)
|
||||
if (error) {
|
||||
kfree(subvol_name);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
if (subvol_name) {
|
||||
root = mount_subvol(subvol_name, flags, device_name, data);
|
||||
@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
error = btrfs_open_devices(fs_devices, mode, fs_type);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
|
||||
error = -EACCES;
|
||||
goto error_close_devices;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup a dummy root and fs_info for test/set super. This is because
|
||||
* we don't actually fill this stuff out until open_ctree, but we need
|
||||
@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||
* then open_ctree will properly initialize everything later.
|
||||
*/
|
||||
fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
|
||||
tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||
if (!fs_info || !tree_root) {
|
||||
if (!fs_info)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||
if (!fs_info->tree_root) {
|
||||
error = -ENOMEM;
|
||||
goto error_close_devices;
|
||||
goto error_fs_info;
|
||||
}
|
||||
fs_info->tree_root = tree_root;
|
||||
fs_info->tree_root->fs_info = fs_info;
|
||||
fs_info->fs_devices = fs_devices;
|
||||
tree_root->fs_info = fs_info;
|
||||
|
||||
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
|
||||
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
|
||||
if (!fs_info->super_copy || !fs_info->super_for_commit) {
|
||||
error = -ENOMEM;
|
||||
goto error_fs_info;
|
||||
}
|
||||
|
||||
error = btrfs_open_devices(fs_devices, mode, fs_type);
|
||||
if (error)
|
||||
goto error_fs_info;
|
||||
|
||||
if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
|
||||
error = -EACCES;
|
||||
goto error_close_devices;
|
||||
}
|
||||
|
||||
bdev = fs_devices->latest_bdev;
|
||||
s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root);
|
||||
s = sget(fs_type, btrfs_test_super, btrfs_set_super,
|
||||
fs_info->tree_root);
|
||||
if (IS_ERR(s)) {
|
||||
error = PTR_ERR(s);
|
||||
goto error_close_devices;
|
||||
@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||
if (s->s_root) {
|
||||
if ((flags ^ s->s_flags) & MS_RDONLY) {
|
||||
deactivate_locked_super(s);
|
||||
return ERR_PTR(-EBUSY);
|
||||
error = -EBUSY;
|
||||
goto error_close_devices;
|
||||
}
|
||||
|
||||
btrfs_close_devices(fs_devices);
|
||||
free_fs_info(fs_info);
|
||||
kfree(tree_root);
|
||||
} else {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||
|
||||
error_close_devices:
|
||||
btrfs_close_devices(fs_devices);
|
||||
error_fs_info:
|
||||
free_fs_info(fs_info);
|
||||
kfree(tree_root);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
|
@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
||||
btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
|
||||
|
||||
if (to_reserve > 0) {
|
||||
ret = btrfs_block_rsv_add(root, &pending->block_rsv,
|
||||
to_reserve);
|
||||
ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
|
||||
to_reserve);
|
||||
if (ret) {
|
||||
pending->error = ret;
|
||||
goto fail;
|
||||
|
@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||
key.objectid = device->devid;
|
||||
key.offset = start;
|
||||
key.type = BTRFS_DEV_EXTENT_KEY;
|
||||
|
||||
again:
|
||||
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
||||
if (ret > 0) {
|
||||
ret = btrfs_previous_item(root, path, key.objectid,
|
||||
@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_dev_extent);
|
||||
BUG_ON(found_key.offset > start || found_key.offset +
|
||||
btrfs_dev_extent_length(leaf, extent) < start);
|
||||
key = found_key;
|
||||
btrfs_release_path(path);
|
||||
goto again;
|
||||
} else if (ret == 0) {
|
||||
leaf = path->nodes[0];
|
||||
extent = btrfs_item_ptr(leaf, path->slots[0],
|
||||
|
Loading…
Reference in New Issue
Block a user