mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: fix possible panic on unmount Btrfs: deal with NULL acl sent to btrfs_set_acl Btrfs: fix regression in orphan cleanup Btrfs: Fix race in btrfs_mark_extent_written Btrfs, fix memory leaks in error paths Btrfs: align offsets for btrfs_ordered_update_i_size btrfs: fix missing last-entry in readdir(3)
This commit is contained in:
commit
30a0f5e1fb
@ -112,12 +112,14 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
mode = inode->i_mode;
|
||||
ret = posix_acl_equiv_mode(acl, &mode);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = 0;
|
||||
inode->i_mode = mode;
|
||||
name = POSIX_ACL_XATTR_ACCESS;
|
||||
if (acl) {
|
||||
ret = posix_acl_equiv_mode(acl, &mode);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
inode->i_mode = mode;
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
|
@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
|
||||
return (cache->flags & bits) == bits;
|
||||
}
|
||||
|
||||
void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
atomic_inc(&cache->count);
|
||||
}
|
||||
|
||||
void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
if (atomic_dec_and_test(&cache->count))
|
||||
kfree(cache);
|
||||
}
|
||||
|
||||
/*
|
||||
* this adds the block group to the fs_info rb tree for the block group
|
||||
* cache
|
||||
@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
atomic_inc(&ret->count);
|
||||
btrfs_get_block_group(ret);
|
||||
spin_unlock(&info->block_group_cache_lock);
|
||||
|
||||
return ret;
|
||||
@ -407,6 +418,8 @@ err:
|
||||
|
||||
put_caching_control(caching_ctl);
|
||||
atomic_dec(&block_group->space_info->caching_threads);
|
||||
btrfs_put_block_group(block_group);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -447,6 +460,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
|
||||
up_write(&fs_info->extent_commit_sem);
|
||||
|
||||
atomic_inc(&cache->space_info->caching_threads);
|
||||
btrfs_get_block_group(cache);
|
||||
|
||||
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
|
||||
cache->key.objectid);
|
||||
@ -486,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
return cache;
|
||||
}
|
||||
|
||||
void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
if (atomic_dec_and_test(&cache->count))
|
||||
kfree(cache);
|
||||
}
|
||||
|
||||
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
|
||||
u64 flags)
|
||||
{
|
||||
@ -2582,7 +2590,7 @@ next_block_group(struct btrfs_root *root,
|
||||
if (node) {
|
||||
cache = rb_entry(node, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
atomic_inc(&cache->count);
|
||||
btrfs_get_block_group(cache);
|
||||
} else
|
||||
cache = NULL;
|
||||
spin_unlock(&root->fs_info->block_group_cache_lock);
|
||||
@ -4227,7 +4235,7 @@ search:
|
||||
u64 offset;
|
||||
int cached;
|
||||
|
||||
atomic_inc(&block_group->count);
|
||||
btrfs_get_block_group(block_group);
|
||||
search_start = block_group->key.objectid;
|
||||
|
||||
have_block_group:
|
||||
@ -4315,7 +4323,7 @@ have_block_group:
|
||||
|
||||
btrfs_put_block_group(block_group);
|
||||
block_group = last_ptr->block_group;
|
||||
atomic_inc(&block_group->count);
|
||||
btrfs_get_block_group(block_group);
|
||||
spin_unlock(&last_ptr->lock);
|
||||
spin_unlock(&last_ptr->refill_lock);
|
||||
|
||||
@ -7395,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||
wait_block_group_cache_done(block_group);
|
||||
|
||||
btrfs_remove_free_space_cache(block_group);
|
||||
|
||||
WARN_ON(atomic_read(&block_group->count) != 1);
|
||||
kfree(block_group);
|
||||
btrfs_put_block_group(block_group);
|
||||
|
||||
spin_lock(&info->block_group_cache_lock);
|
||||
}
|
||||
|
100
fs/btrfs/file.c
100
fs/btrfs/file.c
@ -506,7 +506,8 @@ next_slot:
|
||||
}
|
||||
|
||||
static int extent_mergeable(struct extent_buffer *leaf, int slot,
|
||||
u64 objectid, u64 bytenr, u64 *start, u64 *end)
|
||||
u64 objectid, u64 bytenr, u64 orig_offset,
|
||||
u64 *start, u64 *end)
|
||||
{
|
||||
struct btrfs_file_extent_item *fi;
|
||||
struct btrfs_key key;
|
||||
@ -522,6 +523,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
|
||||
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
|
||||
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
|
||||
btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
|
||||
btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
|
||||
btrfs_file_extent_compression(leaf, fi) ||
|
||||
btrfs_file_extent_encryption(leaf, fi) ||
|
||||
btrfs_file_extent_other_encoding(leaf, fi))
|
||||
@ -561,6 +563,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
||||
u64 split;
|
||||
int del_nr = 0;
|
||||
int del_slot = 0;
|
||||
int recow;
|
||||
int ret;
|
||||
|
||||
btrfs_drop_extent_cache(inode, start, end - 1, 0);
|
||||
@ -568,6 +571,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
||||
path = btrfs_alloc_path();
|
||||
BUG_ON(!path);
|
||||
again:
|
||||
recow = 0;
|
||||
split = start;
|
||||
key.objectid = inode->i_ino;
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
@ -591,12 +595,60 @@ again:
|
||||
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
|
||||
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
|
||||
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
|
||||
memcpy(&new_key, &key, sizeof(new_key));
|
||||
|
||||
if (start == key.offset && end < extent_end) {
|
||||
other_start = 0;
|
||||
other_end = start;
|
||||
if (extent_mergeable(leaf, path->slots[0] - 1,
|
||||
inode->i_ino, bytenr, orig_offset,
|
||||
&other_start, &other_end)) {
|
||||
new_key.offset = end;
|
||||
btrfs_set_item_key_safe(trans, root, path, &new_key);
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
btrfs_set_file_extent_num_bytes(leaf, fi,
|
||||
extent_end - end);
|
||||
btrfs_set_file_extent_offset(leaf, fi,
|
||||
end - orig_offset);
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
|
||||
struct btrfs_file_extent_item);
|
||||
btrfs_set_file_extent_num_bytes(leaf, fi,
|
||||
end - other_start);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (start > key.offset && end == extent_end) {
|
||||
other_start = end;
|
||||
other_end = 0;
|
||||
if (extent_mergeable(leaf, path->slots[0] + 1,
|
||||
inode->i_ino, bytenr, orig_offset,
|
||||
&other_start, &other_end)) {
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
btrfs_set_file_extent_num_bytes(leaf, fi,
|
||||
start - key.offset);
|
||||
path->slots[0]++;
|
||||
new_key.offset = start;
|
||||
btrfs_set_item_key_safe(trans, root, path, &new_key);
|
||||
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
btrfs_set_file_extent_num_bytes(leaf, fi,
|
||||
other_end - start);
|
||||
btrfs_set_file_extent_offset(leaf, fi,
|
||||
start - orig_offset);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
while (start > key.offset || end < extent_end) {
|
||||
if (key.offset == start)
|
||||
split = end;
|
||||
|
||||
memcpy(&new_key, &key, sizeof(new_key));
|
||||
new_key.offset = split;
|
||||
ret = btrfs_duplicate_item(trans, root, path, &new_key);
|
||||
if (ret == -EAGAIN) {
|
||||
@ -631,15 +683,18 @@ again:
|
||||
path->slots[0]--;
|
||||
extent_end = end;
|
||||
}
|
||||
recow = 1;
|
||||
}
|
||||
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
|
||||
other_start = end;
|
||||
other_end = 0;
|
||||
if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
|
||||
bytenr, &other_start, &other_end)) {
|
||||
if (extent_mergeable(leaf, path->slots[0] + 1,
|
||||
inode->i_ino, bytenr, orig_offset,
|
||||
&other_start, &other_end)) {
|
||||
if (recow) {
|
||||
btrfs_release_path(root, path);
|
||||
goto again;
|
||||
}
|
||||
extent_end = other_end;
|
||||
del_slot = path->slots[0] + 1;
|
||||
del_nr++;
|
||||
@ -650,8 +705,13 @@ again:
|
||||
}
|
||||
other_start = 0;
|
||||
other_end = start;
|
||||
if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
|
||||
bytenr, &other_start, &other_end)) {
|
||||
if (extent_mergeable(leaf, path->slots[0] - 1,
|
||||
inode->i_ino, bytenr, orig_offset,
|
||||
&other_start, &other_end)) {
|
||||
if (recow) {
|
||||
btrfs_release_path(root, path);
|
||||
goto again;
|
||||
}
|
||||
key.offset = other_start;
|
||||
del_slot = path->slots[0];
|
||||
del_nr++;
|
||||
@ -660,22 +720,22 @@ again:
|
||||
inode->i_ino, orig_offset);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
if (del_nr == 0) {
|
||||
btrfs_set_file_extent_type(leaf, fi,
|
||||
BTRFS_FILE_EXTENT_REG);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
goto out;
|
||||
} else {
|
||||
btrfs_set_file_extent_type(leaf, fi,
|
||||
BTRFS_FILE_EXTENT_REG);
|
||||
btrfs_set_file_extent_num_bytes(leaf, fi,
|
||||
extent_end - key.offset);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
fi = btrfs_item_ptr(leaf, del_slot - 1,
|
||||
struct btrfs_file_extent_item);
|
||||
btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
|
||||
btrfs_set_file_extent_num_bytes(leaf, fi,
|
||||
extent_end - key.offset);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
|
||||
BUG_ON(ret);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
return 0;
|
||||
|
@ -3796,6 +3796,12 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
||||
|
||||
if (location.type == BTRFS_INODE_ITEM_KEY) {
|
||||
inode = btrfs_iget(dir->i_sb, &location, root);
|
||||
if (unlikely(root->clean_orphans) &&
|
||||
!(inode->i_sb->s_flags & MS_RDONLY)) {
|
||||
down_read(&root->fs_info->cleanup_work_sem);
|
||||
btrfs_orphan_cleanup(root);
|
||||
up_read(&root->fs_info->cleanup_work_sem);
|
||||
}
|
||||
return inode;
|
||||
}
|
||||
|
||||
@ -3995,7 +4001,11 @@ skip:
|
||||
|
||||
/* Reached end of directory/root. Bump pos past the last item. */
|
||||
if (key_type == BTRFS_DIR_INDEX_KEY)
|
||||
filp->f_pos = INT_LIMIT(off_t);
|
||||
/*
|
||||
* 32-bit glibc will use getdents64, but then strtol -
|
||||
* so the last number we can serve is this.
|
||||
*/
|
||||
filp->f_pos = 0x7fffffff;
|
||||
else
|
||||
filp->f_pos++;
|
||||
nopos:
|
||||
|
@ -626,6 +626,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
|
||||
|
||||
if (ordered)
|
||||
offset = entry_end(ordered);
|
||||
else
|
||||
offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
|
||||
|
||||
mutex_lock(&tree->mutex);
|
||||
disk_i_size = BTRFS_I(inode)->disk_i_size;
|
||||
|
@ -3281,8 +3281,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
|
||||
return -ENOMEM;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
if (!path) {
|
||||
kfree(cluster);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rc->extents_found = 0;
|
||||
rc->extents_skipped = 0;
|
||||
|
@ -2649,8 +2649,10 @@ again:
|
||||
em = lookup_extent_mapping(em_tree, logical, *length);
|
||||
read_unlock(&em_tree->lock);
|
||||
|
||||
if (!em && unplug_page)
|
||||
if (!em && unplug_page) {
|
||||
kfree(multi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!em) {
|
||||
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
|
||||
|
Loading…
Reference in New Issue
Block a user