2008-06-12 09:53:53 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/fs.h>
|
2008-10-10 01:39:39 +08:00
|
|
|
#include <linux/fsnotify.h>
|
2008-06-12 09:53:53 +08:00
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/backing-dev.h>
|
2008-10-10 01:39:39 +08:00
|
|
|
#include <linux/mount.h>
|
2008-06-12 09:53:53 +08:00
|
|
|
#include <linux/mpage.h>
|
2008-10-10 01:39:39 +08:00
|
|
|
#include <linux/namei.h>
|
2008-06-12 09:53:53 +08:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/statfs.h>
|
|
|
|
#include <linux/compat.h>
|
|
|
|
#include <linux/bit_spinlock.h>
|
2008-10-10 01:39:39 +08:00
|
|
|
#include <linux/security.h>
|
2008-06-12 09:53:53 +08:00
|
|
|
#include <linux/xattr.h>
|
2008-08-06 01:05:02 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2008-11-20 23:22:27 +08:00
|
|
|
#include "compat.h"
|
2008-06-12 09:53:53 +08:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "disk-io.h"
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "btrfs_inode.h"
|
|
|
|
#include "ioctl.h"
|
|
|
|
#include "print-tree.h"
|
|
|
|
#include "volumes.h"
|
2008-06-26 04:01:30 +08:00
|
|
|
#include "locking.h"
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2009-04-17 16:37:41 +08:00
|
|
|
/* Mask out flags that are inappropriate for the given type of inode. */
|
|
|
|
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
|
|
|
|
{
|
|
|
|
if (S_ISDIR(mode))
|
|
|
|
return flags;
|
|
|
|
else if (S_ISREG(mode))
|
|
|
|
return flags & ~FS_DIRSYNC_FL;
|
|
|
|
else
|
|
|
|
return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
|
|
|
|
*/
|
|
|
|
static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
|
|
|
|
{
|
|
|
|
unsigned int iflags = 0;
|
|
|
|
|
|
|
|
if (flags & BTRFS_INODE_SYNC)
|
|
|
|
iflags |= FS_SYNC_FL;
|
|
|
|
if (flags & BTRFS_INODE_IMMUTABLE)
|
|
|
|
iflags |= FS_IMMUTABLE_FL;
|
|
|
|
if (flags & BTRFS_INODE_APPEND)
|
|
|
|
iflags |= FS_APPEND_FL;
|
|
|
|
if (flags & BTRFS_INODE_NODUMP)
|
|
|
|
iflags |= FS_NODUMP_FL;
|
|
|
|
if (flags & BTRFS_INODE_NOATIME)
|
|
|
|
iflags |= FS_NOATIME_FL;
|
|
|
|
if (flags & BTRFS_INODE_DIRSYNC)
|
|
|
|
iflags |= FS_DIRSYNC_FL;
|
|
|
|
|
|
|
|
return iflags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update inode->i_flags based on the btrfs internal flags.
|
|
|
|
*/
|
|
|
|
void btrfs_update_iflags(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct btrfs_inode *ip = BTRFS_I(inode);
|
|
|
|
|
|
|
|
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
|
|
|
|
|
|
|
|
if (ip->flags & BTRFS_INODE_SYNC)
|
|
|
|
inode->i_flags |= S_SYNC;
|
|
|
|
if (ip->flags & BTRFS_INODE_IMMUTABLE)
|
|
|
|
inode->i_flags |= S_IMMUTABLE;
|
|
|
|
if (ip->flags & BTRFS_INODE_APPEND)
|
|
|
|
inode->i_flags |= S_APPEND;
|
|
|
|
if (ip->flags & BTRFS_INODE_NOATIME)
|
|
|
|
inode->i_flags |= S_NOATIME;
|
|
|
|
if (ip->flags & BTRFS_INODE_DIRSYNC)
|
|
|
|
inode->i_flags |= S_DIRSYNC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Inherit flags from the parent inode.
|
|
|
|
*
|
|
|
|
* Unlike extN we don't have any flags we don't want to inherit currently.
|
|
|
|
*/
|
|
|
|
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
|
|
|
|
{
|
2009-06-11 23:13:35 +08:00
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
if (!dir)
|
|
|
|
return;
|
|
|
|
|
|
|
|
flags = BTRFS_I(dir)->flags;
|
2009-04-17 16:37:41 +08:00
|
|
|
|
|
|
|
if (S_ISREG(inode->i_mode))
|
|
|
|
flags &= ~BTRFS_INODE_DIRSYNC;
|
|
|
|
else if (!S_ISDIR(inode->i_mode))
|
|
|
|
flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME);
|
|
|
|
|
|
|
|
BTRFS_I(inode)->flags = flags;
|
|
|
|
btrfs_update_iflags(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
|
|
|
|
{
|
|
|
|
struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
|
|
|
|
unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
|
|
|
|
|
|
|
|
if (copy_to_user(arg, &flags, sizeof(flags)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
|
|
|
{
|
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
|
|
|
struct btrfs_inode *ip = BTRFS_I(inode);
|
|
|
|
struct btrfs_root *root = ip->root;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
unsigned int flags, oldflags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (copy_from_user(&flags, arg, sizeof(flags)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
|
|
|
|
FS_NOATIME_FL | FS_NODUMP_FL | \
|
|
|
|
FS_SYNC_FL | FS_DIRSYNC_FL))
|
|
|
|
return -EOPNOTSUPP;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2009-04-17 16:37:41 +08:00
|
|
|
if (!is_owner_or_cap(inode))
|
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
|
|
|
|
flags = btrfs_mask_flags(inode->i_mode, flags);
|
|
|
|
oldflags = btrfs_flags_to_ioctl(ip->flags);
|
|
|
|
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
|
|
|
|
if (!capable(CAP_LINUX_IMMUTABLE)) {
|
|
|
|
ret = -EPERM;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = mnt_want_write(file->f_path.mnt);
|
|
|
|
if (ret)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (flags & FS_SYNC_FL)
|
|
|
|
ip->flags |= BTRFS_INODE_SYNC;
|
|
|
|
else
|
|
|
|
ip->flags &= ~BTRFS_INODE_SYNC;
|
|
|
|
if (flags & FS_IMMUTABLE_FL)
|
|
|
|
ip->flags |= BTRFS_INODE_IMMUTABLE;
|
|
|
|
else
|
|
|
|
ip->flags &= ~BTRFS_INODE_IMMUTABLE;
|
|
|
|
if (flags & FS_APPEND_FL)
|
|
|
|
ip->flags |= BTRFS_INODE_APPEND;
|
|
|
|
else
|
|
|
|
ip->flags &= ~BTRFS_INODE_APPEND;
|
|
|
|
if (flags & FS_NODUMP_FL)
|
|
|
|
ip->flags |= BTRFS_INODE_NODUMP;
|
|
|
|
else
|
|
|
|
ip->flags &= ~BTRFS_INODE_NODUMP;
|
|
|
|
if (flags & FS_NOATIME_FL)
|
|
|
|
ip->flags |= BTRFS_INODE_NOATIME;
|
|
|
|
else
|
|
|
|
ip->flags &= ~BTRFS_INODE_NOATIME;
|
|
|
|
if (flags & FS_DIRSYNC_FL)
|
|
|
|
ip->flags |= BTRFS_INODE_DIRSYNC;
|
|
|
|
else
|
|
|
|
ip->flags &= ~BTRFS_INODE_DIRSYNC;
|
|
|
|
|
|
|
|
|
|
|
|
trans = btrfs_join_transaction(root, 1);
|
|
|
|
BUG_ON(!trans);
|
|
|
|
|
|
|
|
ret = btrfs_update_inode(trans, root, inode);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
btrfs_update_iflags(inode);
|
|
|
|
inode->i_ctime = CURRENT_TIME;
|
|
|
|
btrfs_end_transaction(trans, root);
|
|
|
|
|
|
|
|
mnt_drop_write(file->f_path.mnt);
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
|
|
|
|
{
|
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
|
|
|
|
|
|
|
return put_user(inode->i_generation, arg);
|
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2008-10-10 01:39:39 +08:00
|
|
|
static noinline int create_subvol(struct btrfs_root *root,
|
|
|
|
struct dentry *dentry,
|
|
|
|
char *name, int namelen)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_root_item root_item;
|
|
|
|
struct btrfs_inode_item *inode_item;
|
|
|
|
struct extent_buffer *leaf;
|
2009-09-22 04:00:26 +08:00
|
|
|
struct btrfs_root *new_root;
|
|
|
|
struct inode *dir = dentry->d_parent->d_inode;
|
2008-06-12 09:53:53 +08:00
|
|
|
int ret;
|
|
|
|
int err;
|
|
|
|
u64 objectid;
|
|
|
|
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
|
2008-11-18 10:02:50 +08:00
|
|
|
u64 index = 0;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2009-09-12 04:12:44 +08:00
|
|
|
/*
|
|
|
|
* 1 - inode item
|
|
|
|
* 2 - refs
|
|
|
|
* 1 - root item
|
|
|
|
* 2 - dir items
|
|
|
|
*/
|
|
|
|
ret = btrfs_reserve_metadata_space(root, 6);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (ret)
|
2009-09-22 04:00:26 +08:00
|
|
|
return ret;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
BUG_ON(!trans);
|
|
|
|
|
|
|
|
ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
|
|
|
|
0, &objectid);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata.
Once a filesystem has been mounted with this commit, IT WILL NO LONGER
BE MOUNTABLE BY OLDER KERNELS.
When a tree block in subvolume tree is cow'd, the reference counts of all
extents it points to are increased by one. At transaction commit time,
the old root of the subvolume is recorded in a "dead root" data structure,
and the btree it points to is later walked, dropping reference counts
and freeing any blocks where the reference count goes to 0.
The increments done during cow and decrements done after commit cancel out,
and the walk is a very expensive way to go about freeing the blocks that
are no longer referenced by the new btree root. This commit reduces the
transaction overhead by avoiding the need for dead root records.
When a non-shared tree block is cow'd, we free the old block at once, and the
new block inherits old block's references. When a tree block with reference
count > 1 is cow'd, we increase the reference counts of all extents
the new block points to by one, and decrease the old block's reference count by
one.
This dead tree avoidance code removes the need to modify the reference
counts of lower level extents when a non-shared tree block is cow'd.
But we still need to update back ref for all pointers in the block.
This is because the location of the block is recorded in the back ref
item.
We can solve this by introducing a new type of back ref. The new
back ref provides information about pointer's key, level and in which
tree the pointer lives. This information allow us to find the pointer
by searching the tree. The shortcoming of the new back ref is that it
only works for pointers in tree blocks referenced by their owner trees.
This is mostly a problem for snapshots, where resolving one of these
fuzzy back references would be O(number_of_snapshots) and quite slow.
The solution used here is to use the fuzzy back references in the common
case where a given tree block is only referenced by one root,
and use the full back references when multiple roots have a reference
on a given block.
This commit adds per subvolume red-black tree to keep trace of cached
inodes. The red-black tree helps the balancing code to find cached
inodes whose inode numbers within a given range.
This commit improves the balancing code by introducing several data
structures to keep the state of balancing. The most important one
is the back ref cache. It caches how the upper level tree blocks are
referenced. This greatly reduce the overhead of checking back ref.
The improved balancing code scales significantly better with a large
number of snapshots.
This is a very large commit and was written in a number of
pieces. But, they depend heavily on the disk format change and were
squashed together to make sure git bisect didn't end up in a
bad state wrt space balancing or the format change.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2009-06-10 22:45:14 +08:00
|
|
|
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
|
|
|
|
0, objectid, NULL, 0, 0, 0);
|
2008-07-25 00:17:14 +08:00
|
|
|
if (IS_ERR(leaf)) {
|
|
|
|
ret = PTR_ERR(leaf);
|
|
|
|
goto fail;
|
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
|
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata.
Once a filesystem has been mounted with this commit, IT WILL NO LONGER
BE MOUNTABLE BY OLDER KERNELS.
When a tree block in subvolume tree is cow'd, the reference counts of all
extents it points to are increased by one. At transaction commit time,
the old root of the subvolume is recorded in a "dead root" data structure,
and the btree it points to is later walked, dropping reference counts
and freeing any blocks where the reference count goes to 0.
The increments done during cow and decrements done after commit cancel out,
and the walk is a very expensive way to go about freeing the blocks that
are no longer referenced by the new btree root. This commit reduces the
transaction overhead by avoiding the need for dead root records.
When a non-shared tree block is cow'd, we free the old block at once, and the
new block inherits old block's references. When a tree block with reference
count > 1 is cow'd, we increase the reference counts of all extents
the new block points to by one, and decrease the old block's reference count by
one.
This dead tree avoidance code removes the need to modify the reference
counts of lower level extents when a non-shared tree block is cow'd.
But we still need to update back ref for all pointers in the block.
This is because the location of the block is recorded in the back ref
item.
We can solve this by introducing a new type of back ref. The new
back ref provides information about pointer's key, level and in which
tree the pointer lives. This information allow us to find the pointer
by searching the tree. The shortcoming of the new back ref is that it
only works for pointers in tree blocks referenced by their owner trees.
This is mostly a problem for snapshots, where resolving one of these
fuzzy back references would be O(number_of_snapshots) and quite slow.
The solution used here is to use the fuzzy back references in the common
case where a given tree block is only referenced by one root,
and use the full back references when multiple roots have a reference
on a given block.
This commit adds per subvolume red-black tree to keep trace of cached
inodes. The red-black tree helps the balancing code to find cached
inodes whose inode numbers within a given range.
This commit improves the balancing code by introducing several data
structures to keep the state of balancing. The most important one
is the back ref cache. It caches how the upper level tree blocks are
referenced. This greatly reduce the overhead of checking back ref.
The improved balancing code scales significantly better with a large
number of snapshots.
This is a very large commit and was written in a number of
pieces. But, they depend heavily on the disk format change and were
squashed together to make sure git bisect didn't end up in a
bad state wrt space balancing or the format change.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2009-06-10 22:45:14 +08:00
|
|
|
memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_set_header_bytenr(leaf, leaf->start);
|
|
|
|
btrfs_set_header_generation(leaf, trans->transid);
|
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata.
Once a filesystem has been mounted with this commit, IT WILL NO LONGER
BE MOUNTABLE BY OLDER KERNELS.
When a tree block in subvolume tree is cow'd, the reference counts of all
extents it points to are increased by one. At transaction commit time,
the old root of the subvolume is recorded in a "dead root" data structure,
and the btree it points to is later walked, dropping reference counts
and freeing any blocks where the reference count goes to 0.
The increments done during cow and decrements done after commit cancel out,
and the walk is a very expensive way to go about freeing the blocks that
are no longer referenced by the new btree root. This commit reduces the
transaction overhead by avoiding the need for dead root records.
When a non-shared tree block is cow'd, we free the old block at once, and the
new block inherits old block's references. When a tree block with reference
count > 1 is cow'd, we increase the reference counts of all extents
the new block points to by one, and decrease the old block's reference count by
one.
This dead tree avoidance code removes the need to modify the reference
counts of lower level extents when a non-shared tree block is cow'd.
But we still need to update back ref for all pointers in the block.
This is because the location of the block is recorded in the back ref
item.
We can solve this by introducing a new type of back ref. The new
back ref provides information about pointer's key, level and in which
tree the pointer lives. This information allow us to find the pointer
by searching the tree. The shortcoming of the new back ref is that it
only works for pointers in tree blocks referenced by their owner trees.
This is mostly a problem for snapshots, where resolving one of these
fuzzy back references would be O(number_of_snapshots) and quite slow.
The solution used here is to use the fuzzy back references in the common
case where a given tree block is only referenced by one root,
and use the full back references when multiple roots have a reference
on a given block.
This commit adds per subvolume red-black tree to keep trace of cached
inodes. The red-black tree helps the balancing code to find cached
inodes whose inode numbers within a given range.
This commit improves the balancing code by introducing several data
structures to keep the state of balancing. The most important one
is the back ref cache. It caches how the upper level tree blocks are
referenced. This greatly reduce the overhead of checking back ref.
The improved balancing code scales significantly better with a large
number of snapshots.
This is a very large commit and was written in a number of
pieces. But, they depend heavily on the disk format change and were
squashed together to make sure git bisect didn't end up in a
bad state wrt space balancing or the format change.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2009-06-10 22:45:14 +08:00
|
|
|
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_set_header_owner(leaf, objectid);
|
|
|
|
|
|
|
|
write_extent_buffer(leaf, root->fs_info->fsid,
|
|
|
|
(unsigned long)btrfs_header_fsid(leaf),
|
|
|
|
BTRFS_FSID_SIZE);
|
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata.
Once a filesystem has been mounted with this commit, IT WILL NO LONGER
BE MOUNTABLE BY OLDER KERNELS.
When a tree block in subvolume tree is cow'd, the reference counts of all
extents it points to are increased by one. At transaction commit time,
the old root of the subvolume is recorded in a "dead root" data structure,
and the btree it points to is later walked, dropping reference counts
and freeing any blocks where the reference count goes to 0.
The increments done during cow and decrements done after commit cancel out,
and the walk is a very expensive way to go about freeing the blocks that
are no longer referenced by the new btree root. This commit reduces the
transaction overhead by avoiding the need for dead root records.
When a non-shared tree block is cow'd, we free the old block at once, and the
new block inherits old block's references. When a tree block with reference
count > 1 is cow'd, we increase the reference counts of all extents
the new block points to by one, and decrease the old block's reference count by
one.
This dead tree avoidance code removes the need to modify the reference
counts of lower level extents when a non-shared tree block is cow'd.
But we still need to update back ref for all pointers in the block.
This is because the location of the block is recorded in the back ref
item.
We can solve this by introducing a new type of back ref. The new
back ref provides information about pointer's key, level and in which
tree the pointer lives. This information allow us to find the pointer
by searching the tree. The shortcoming of the new back ref is that it
only works for pointers in tree blocks referenced by their owner trees.
This is mostly a problem for snapshots, where resolving one of these
fuzzy back references would be O(number_of_snapshots) and quite slow.
The solution used here is to use the fuzzy back references in the common
case where a given tree block is only referenced by one root,
and use the full back references when multiple roots have a reference
on a given block.
This commit adds per subvolume red-black tree to keep trace of cached
inodes. The red-black tree helps the balancing code to find cached
inodes whose inode numbers within a given range.
This commit improves the balancing code by introducing several data
structures to keep the state of balancing. The most important one
is the back ref cache. It caches how the upper level tree blocks are
referenced. This greatly reduce the overhead of checking back ref.
The improved balancing code scales significantly better with a large
number of snapshots.
This is a very large commit and was written in a number of
pieces. But, they depend heavily on the disk format change and were
squashed together to make sure git bisect didn't end up in a
bad state wrt space balancing or the format change.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2009-06-10 22:45:14 +08:00
|
|
|
write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
|
|
|
|
(unsigned long)btrfs_header_chunk_tree_uuid(leaf),
|
|
|
|
BTRFS_UUID_SIZE);
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
|
|
|
|
inode_item = &root_item.inode;
|
|
|
|
memset(inode_item, 0, sizeof(*inode_item));
|
|
|
|
inode_item->generation = cpu_to_le64(1);
|
|
|
|
inode_item->size = cpu_to_le64(3);
|
|
|
|
inode_item->nlink = cpu_to_le32(1);
|
2008-10-09 23:46:29 +08:00
|
|
|
inode_item->nbytes = cpu_to_le64(root->leafsize);
|
2008-06-12 09:53:53 +08:00
|
|
|
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
|
|
|
|
|
|
|
|
btrfs_set_root_bytenr(&root_item, leaf->start);
|
2008-10-30 02:49:05 +08:00
|
|
|
btrfs_set_root_generation(&root_item, trans->transid);
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_set_root_level(&root_item, 0);
|
|
|
|
btrfs_set_root_refs(&root_item, 1);
|
2009-11-12 17:36:50 +08:00
|
|
|
btrfs_set_root_used(&root_item, leaf->len);
|
2008-10-31 02:20:02 +08:00
|
|
|
btrfs_set_root_last_snapshot(&root_item, 0);
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
|
|
|
|
root_item.drop_level = 0;
|
|
|
|
|
2008-06-26 04:01:30 +08:00
|
|
|
btrfs_tree_unlock(leaf);
|
2008-06-12 09:53:53 +08:00
|
|
|
free_extent_buffer(leaf);
|
|
|
|
leaf = NULL;
|
|
|
|
|
|
|
|
btrfs_set_root_dirid(&root_item, new_dirid);
|
|
|
|
|
|
|
|
key.objectid = objectid;
|
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata.
Once a filesystem has been mounted with this commit, IT WILL NO LONGER
BE MOUNTABLE BY OLDER KERNELS.
When a tree block in subvolume tree is cow'd, the reference counts of all
extents it points to are increased by one. At transaction commit time,
the old root of the subvolume is recorded in a "dead root" data structure,
and the btree it points to is later walked, dropping reference counts
and freeing any blocks where the reference count goes to 0.
The increments done during cow and decrements done after commit cancel out,
and the walk is a very expensive way to go about freeing the blocks that
are no longer referenced by the new btree root. This commit reduces the
transaction overhead by avoiding the need for dead root records.
When a non-shared tree block is cow'd, we free the old block at once, and the
new block inherits old block's references. When a tree block with reference
count > 1 is cow'd, we increase the reference counts of all extents
the new block points to by one, and decrease the old block's reference count by
one.
This dead tree avoidance code removes the need to modify the reference
counts of lower level extents when a non-shared tree block is cow'd.
But we still need to update back ref for all pointers in the block.
This is because the location of the block is recorded in the back ref
item.
We can solve this by introducing a new type of back ref. The new
back ref provides information about pointer's key, level and in which
tree the pointer lives. This information allow us to find the pointer
by searching the tree. The shortcoming of the new back ref is that it
only works for pointers in tree blocks referenced by their owner trees.
This is mostly a problem for snapshots, where resolving one of these
fuzzy back references would be O(number_of_snapshots) and quite slow.
The solution used here is to use the fuzzy back references in the common
case where a given tree block is only referenced by one root,
and use the full back references when multiple roots have a reference
on a given block.
This commit adds per subvolume red-black tree to keep trace of cached
inodes. The red-black tree helps the balancing code to find cached
inodes whose inode numbers within a given range.
This commit improves the balancing code by introducing several data
structures to keep the state of balancing. The most important one
is the back ref cache. It caches how the upper level tree blocks are
referenced. This greatly reduce the overhead of checking back ref.
The improved balancing code scales significantly better with a large
number of snapshots.
This is a very large commit and was written in a number of
pieces. But, they depend heavily on the disk format change and were
squashed together to make sure git bisect didn't end up in a
bad state wrt space balancing or the format change.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2009-06-10 22:45:14 +08:00
|
|
|
key.offset = 0;
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
|
|
|
|
ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
|
|
|
|
&root_item);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
key.offset = (u64)-1;
|
|
|
|
new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
|
|
|
|
BUG_ON(IS_ERR(new_root));
|
|
|
|
|
|
|
|
btrfs_record_root_in_trans(trans, new_root);
|
|
|
|
|
|
|
|
ret = btrfs_create_subvol_root(trans, new_root, new_dirid,
|
|
|
|
BTRFS_I(dir)->block_group);
|
2008-06-12 09:53:53 +08:00
|
|
|
/*
|
|
|
|
* insert the directory item
|
|
|
|
*/
|
2008-11-18 10:02:50 +08:00
|
|
|
ret = btrfs_set_inode_index(dir, &index);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
ret = btrfs_insert_dir_item(trans, root,
|
2008-06-12 09:53:53 +08:00
|
|
|
name, namelen, dir->i_ino, &key,
|
2008-11-18 10:02:50 +08:00
|
|
|
BTRFS_FT_DIR, index);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (ret)
|
|
|
|
goto fail;
|
2008-11-18 09:37:39 +08:00
|
|
|
|
2009-01-06 04:43:43 +08:00
|
|
|
btrfs_i_size_write(dir, dir->i_size + namelen * 2);
|
|
|
|
ret = btrfs_update_inode(trans, root, dir);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
2008-11-18 09:37:39 +08:00
|
|
|
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
|
2009-09-22 03:56:00 +08:00
|
|
|
objectid, root->root_key.objectid,
|
2008-11-18 09:37:39 +08:00
|
|
|
dir->i_ino, index, name, namelen);
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
BUG_ON(ret);
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
|
2008-06-12 09:53:53 +08:00
|
|
|
fail:
|
2009-09-22 04:00:26 +08:00
|
|
|
err = btrfs_commit_transaction(trans, root);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (err && !ret)
|
|
|
|
ret = err;
|
2009-09-12 04:12:44 +08:00
|
|
|
|
|
|
|
btrfs_unreserve_metadata_space(root, 6);
|
2008-06-12 09:53:53 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-18 10:02:50 +08:00
|
|
|
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
|
|
|
char *name, int namelen)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
2009-11-12 17:37:02 +08:00
|
|
|
struct inode *inode;
|
2008-06-12 09:53:53 +08:00
|
|
|
struct btrfs_pending_snapshot *pending_snapshot;
|
|
|
|
struct btrfs_trans_handle *trans;
|
2009-11-12 17:37:02 +08:00
|
|
|
int ret;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
if (!root->ref_cows)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-09-12 04:12:44 +08:00
|
|
|
/*
|
|
|
|
* 1 - inode item
|
|
|
|
* 2 - refs
|
|
|
|
* 1 - root item
|
|
|
|
* 2 - dir items
|
|
|
|
*/
|
|
|
|
ret = btrfs_reserve_metadata_space(root, 6);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (ret)
|
2009-11-12 17:37:02 +08:00
|
|
|
goto fail;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2008-11-18 10:02:50 +08:00
|
|
|
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (!pending_snapshot) {
|
|
|
|
ret = -ENOMEM;
|
2009-09-12 04:12:44 +08:00
|
|
|
btrfs_unreserve_metadata_space(root, 6);
|
2009-11-12 17:37:02 +08:00
|
|
|
goto fail;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
|
|
|
|
if (!pending_snapshot->name) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
kfree(pending_snapshot);
|
2009-09-12 04:12:44 +08:00
|
|
|
btrfs_unreserve_metadata_space(root, 6);
|
2009-11-12 17:37:02 +08:00
|
|
|
goto fail;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
memcpy(pending_snapshot->name, name, namelen);
|
|
|
|
pending_snapshot->name[namelen] = '\0';
|
2008-11-18 10:02:50 +08:00
|
|
|
pending_snapshot->dentry = dentry;
|
2008-06-12 09:53:53 +08:00
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
BUG_ON(!trans);
|
|
|
|
pending_snapshot->root = root;
|
|
|
|
list_add(&pending_snapshot->list,
|
|
|
|
&trans->transaction->pending_snapshots);
|
2009-11-12 17:37:02 +08:00
|
|
|
ret = btrfs_commit_transaction(trans, root);
|
|
|
|
BUG_ON(ret);
|
|
|
|
btrfs_unreserve_metadata_space(root, 6);
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2009-11-12 17:37:02 +08:00
|
|
|
inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
|
|
|
|
if (IS_ERR(inode)) {
|
|
|
|
ret = PTR_ERR(inode);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
BUG_ON(!inode);
|
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
ret = 0;
|
|
|
|
fail:
|
2008-06-12 09:53:53 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-10-10 01:39:39 +08:00
|
|
|
/* copy of may_create in fs/namei.c() */
|
|
|
|
static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
|
|
|
|
{
|
|
|
|
if (child->d_inode)
|
|
|
|
return -EEXIST;
|
|
|
|
if (IS_DEADDIR(dir))
|
|
|
|
return -ENOENT;
|
|
|
|
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new subvolume below @parent. This is largely modeled after
|
|
|
|
* sys_mkdirat and vfs_mkdir, but we only do a single component lookup
|
|
|
|
* inside this filesystem so it's quite a bit simpler.
|
|
|
|
*/
|
2009-09-22 04:00:26 +08:00
|
|
|
static noinline int btrfs_mksubvol(struct path *parent,
|
|
|
|
char *name, int namelen,
|
2008-11-18 10:02:50 +08:00
|
|
|
struct btrfs_root *snap_src)
|
2008-10-10 01:39:39 +08:00
|
|
|
{
|
2009-09-22 04:00:26 +08:00
|
|
|
struct inode *dir = parent->dentry->d_inode;
|
2008-10-10 01:39:39 +08:00
|
|
|
struct dentry *dentry;
|
|
|
|
int error;
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
|
2008-10-10 01:39:39 +08:00
|
|
|
|
|
|
|
dentry = lookup_one_len(name, parent->dentry, namelen);
|
|
|
|
error = PTR_ERR(dentry);
|
|
|
|
if (IS_ERR(dentry))
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
error = -EEXIST;
|
|
|
|
if (dentry->d_inode)
|
|
|
|
goto out_dput;
|
|
|
|
|
|
|
|
error = mnt_want_write(parent->mnt);
|
|
|
|
if (error)
|
|
|
|
goto out_dput;
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
error = btrfs_may_create(dir, dentry);
|
2008-10-10 01:39:39 +08:00
|
|
|
if (error)
|
|
|
|
goto out_drop_write;
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
|
|
|
|
|
|
|
|
if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
|
|
|
|
goto out_up_read;
|
|
|
|
|
2008-11-18 10:02:50 +08:00
|
|
|
if (snap_src) {
|
2009-09-22 04:00:26 +08:00
|
|
|
error = create_snapshot(snap_src, dentry,
|
|
|
|
name, namelen);
|
2008-11-18 10:02:50 +08:00
|
|
|
} else {
|
2009-09-22 04:00:26 +08:00
|
|
|
error = create_subvol(BTRFS_I(dir)->root, dentry,
|
|
|
|
name, namelen);
|
2008-11-18 10:02:50 +08:00
|
|
|
}
|
2009-09-22 04:00:26 +08:00
|
|
|
if (!error)
|
|
|
|
fsnotify_mkdir(dir, dentry);
|
|
|
|
out_up_read:
|
|
|
|
up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
|
2008-10-10 01:39:39 +08:00
|
|
|
out_drop_write:
|
|
|
|
mnt_drop_write(parent->mnt);
|
|
|
|
out_dput:
|
|
|
|
dput(dentry);
|
|
|
|
out_unlock:
|
2009-09-22 04:00:26 +08:00
|
|
|
mutex_unlock(&dir->i_mutex);
|
2008-10-10 01:39:39 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2010-03-10 23:52:59 +08:00
|
|
|
static int should_defrag_range(struct inode *inode, u64 start, u64 len,
|
2010-03-11 22:42:04 +08:00
|
|
|
int thresh, u64 *last_len, u64 *skip,
|
|
|
|
u64 *defrag_end)
|
2010-03-10 23:52:59 +08:00
|
|
|
{
|
|
|
|
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
|
|
|
struct extent_map *em = NULL;
|
|
|
|
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
|
|
|
int ret = 1;
|
|
|
|
|
2010-03-11 22:42:04 +08:00
|
|
|
|
|
|
|
if (thresh == 0)
|
|
|
|
thresh = 256 * 1024;
|
|
|
|
|
2010-03-10 23:52:59 +08:00
|
|
|
/*
|
|
|
|
* make sure that once we start defragging and extent, we keep on
|
|
|
|
* defragging it
|
|
|
|
*/
|
|
|
|
if (start < *defrag_end)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
*skip = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hopefully we have this extent in the tree already, try without
|
|
|
|
* the full extent lock
|
|
|
|
*/
|
|
|
|
read_lock(&em_tree->lock);
|
|
|
|
em = lookup_extent_mapping(em_tree, start, len);
|
|
|
|
read_unlock(&em_tree->lock);
|
|
|
|
|
|
|
|
if (!em) {
|
|
|
|
/* get the big lock and read metadata off disk */
|
|
|
|
lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
|
|
|
|
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
|
|
|
unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
|
|
|
|
|
|
|
|
if (!em)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this will cover holes, and inline extents */
|
|
|
|
if (em->block_start >= EXTENT_MAP_LAST_BYTE)
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we hit a real extent, if it is big don't bother defragging it again
|
|
|
|
*/
|
2010-03-11 22:42:04 +08:00
|
|
|
if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
|
2010-03-10 23:52:59 +08:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* last_len ends up being a counter of how many bytes we've defragged.
|
|
|
|
* every time we choose not to defrag an extent, we reset *last_len
|
|
|
|
* so that the next tiny extent will force a defrag.
|
|
|
|
*
|
|
|
|
* The end result of this is that tiny extents before a single big
|
|
|
|
* extent will force at least part of that big extent to be defragged.
|
|
|
|
*/
|
|
|
|
if (ret) {
|
|
|
|
*last_len += len;
|
|
|
|
*defrag_end = extent_map_end(em);
|
|
|
|
} else {
|
|
|
|
*last_len = 0;
|
|
|
|
*skip = extent_map_end(em);
|
|
|
|
*defrag_end = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_extent_map(em);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-03-11 22:42:04 +08:00
|
|
|
static int btrfs_defrag_file(struct file *file,
|
|
|
|
struct btrfs_ioctl_defrag_range_args *range)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct inode *inode = fdentry(file)->d_inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
|
|
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
2008-07-24 23:57:52 +08:00
|
|
|
struct btrfs_ordered_extent *ordered;
|
2008-06-12 09:53:53 +08:00
|
|
|
struct page *page;
|
|
|
|
unsigned long last_index;
|
|
|
|
unsigned long ra_pages = root->fs_info->bdi.ra_pages;
|
|
|
|
unsigned long total_read = 0;
|
|
|
|
u64 page_start;
|
|
|
|
u64 page_end;
|
2010-03-10 23:52:59 +08:00
|
|
|
u64 last_len = 0;
|
|
|
|
u64 skip = 0;
|
|
|
|
u64 defrag_end = 0;
|
2008-06-12 09:53:53 +08:00
|
|
|
unsigned long i;
|
|
|
|
int ret;
|
|
|
|
|
2010-03-10 23:52:59 +08:00
|
|
|
if (inode->i_size == 0)
|
|
|
|
return 0;
|
|
|
|
|
2010-03-11 22:42:04 +08:00
|
|
|
if (range->start + range->len > range->start) {
|
|
|
|
last_index = min_t(u64, inode->i_size - 1,
|
|
|
|
range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
} else {
|
|
|
|
last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
i = range->start >> PAGE_CACHE_SHIFT;
|
2010-03-10 23:52:59 +08:00
|
|
|
while (i <= last_index) {
|
|
|
|
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
|
2010-03-11 22:42:04 +08:00
|
|
|
PAGE_CACHE_SIZE,
|
|
|
|
range->extent_thresh,
|
|
|
|
&last_len, &skip,
|
2010-03-10 23:52:59 +08:00
|
|
|
&defrag_end)) {
|
|
|
|
unsigned long next;
|
|
|
|
/*
|
|
|
|
* the should_defrag function tells us how much to skip
|
|
|
|
* bump our counter by the suggested amount
|
|
|
|
*/
|
|
|
|
next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
i = max(i + 1, next);
|
|
|
|
continue;
|
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
if (total_read % ra_pages == 0) {
|
|
|
|
btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
|
|
|
|
min(last_index, i + ra_pages - 1));
|
|
|
|
}
|
|
|
|
total_read++;
|
2010-03-10 23:52:59 +08:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2010-03-11 22:42:04 +08:00
|
|
|
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
|
|
|
|
BTRFS_I(inode)->force_compress = 1;
|
2010-03-10 23:52:59 +08:00
|
|
|
|
|
|
|
ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
|
|
|
|
if (ret) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
|
|
|
|
if (ret) {
|
|
|
|
btrfs_free_reserved_data_space(root, inode,
|
|
|
|
PAGE_CACHE_SIZE);
|
|
|
|
ret = -ENOSPC;
|
|
|
|
break;
|
|
|
|
}
|
2008-07-24 23:57:52 +08:00
|
|
|
again:
|
2010-03-10 23:52:59 +08:00
|
|
|
if (inode->i_size == 0 ||
|
|
|
|
i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) {
|
|
|
|
ret = 0;
|
|
|
|
goto err_reservations;
|
|
|
|
}
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
page = grab_cache_page(inode->i_mapping, i);
|
|
|
|
if (!page)
|
2010-03-10 23:52:59 +08:00
|
|
|
goto err_reservations;
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
btrfs_readpage(NULL, page);
|
|
|
|
lock_page(page);
|
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
2010-03-10 23:52:59 +08:00
|
|
|
goto err_reservations;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-10 23:52:59 +08:00
|
|
|
if (page->mapping != inode->i_mapping) {
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
2010-03-10 23:52:59 +08:00
|
|
|
if (PageDirty(page)) {
|
|
|
|
btrfs_free_reserved_data_space(root, inode,
|
|
|
|
PAGE_CACHE_SIZE);
|
|
|
|
goto loop_unlock;
|
|
|
|
}
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
|
|
|
|
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
|
|
|
lock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
2008-07-24 23:57:52 +08:00
|
|
|
|
|
|
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
|
|
|
if (ordered) {
|
|
|
|
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
btrfs_start_ordered_extent(inode, ordered, 1);
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
set_page_extent_mapped(page);
|
|
|
|
|
2008-08-01 23:27:23 +08:00
|
|
|
/*
|
|
|
|
* this makes sure page_mkwrite is called on the
|
|
|
|
* page if it is dirtied again later
|
|
|
|
*/
|
|
|
|
clear_page_dirty_for_io(page);
|
2010-03-10 23:52:59 +08:00
|
|
|
clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start,
|
|
|
|
page_end, EXTENT_DIRTY | EXTENT_DELALLOC |
|
|
|
|
EXTENT_DO_ACCOUNTING, GFP_NOFS);
|
2008-08-01 23:27:23 +08:00
|
|
|
|
2010-02-04 03:33:23 +08:00
|
|
|
btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
|
2010-03-10 23:52:59 +08:00
|
|
|
ClearPageChecked(page);
|
2008-06-12 09:53:53 +08:00
|
|
|
set_page_dirty(page);
|
2009-09-12 00:27:37 +08:00
|
|
|
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
2010-03-10 23:52:59 +08:00
|
|
|
|
|
|
|
loop_unlock:
|
2008-06-12 09:53:53 +08:00
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
2010-03-10 23:52:59 +08:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
|
|
|
|
btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
|
2008-06-12 09:53:53 +08:00
|
|
|
balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
|
2010-03-10 23:52:59 +08:00
|
|
|
i++;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
|
2010-03-11 22:42:04 +08:00
|
|
|
if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
|
|
|
|
filemap_flush(inode->i_mapping);
|
|
|
|
|
|
|
|
if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
|
|
|
|
/* the filemap_flush will queue IO into the worker threads, but
|
|
|
|
* we have to make sure the IO is actually started and that
|
|
|
|
* ordered extents get created before we return
|
|
|
|
*/
|
|
|
|
atomic_inc(&root->fs_info->async_submit_draining);
|
|
|
|
while (atomic_read(&root->fs_info->nr_async_submits) ||
|
|
|
|
atomic_read(&root->fs_info->async_delalloc_pages)) {
|
|
|
|
wait_event(root->fs_info->async_submit_wait,
|
|
|
|
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
|
|
|
|
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
|
|
|
|
}
|
|
|
|
atomic_dec(&root->fs_info->async_submit_draining);
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
BTRFS_I(inode)->force_compress = 0;
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
}
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
return 0;
|
2010-03-10 23:52:59 +08:00
|
|
|
|
|
|
|
err_reservations:
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
|
|
|
|
btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
|
|
|
|
return ret;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
|
|
|
|
void __user *arg)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
u64 new_size;
|
|
|
|
u64 old_size;
|
|
|
|
u64 devid = 1;
|
|
|
|
struct btrfs_ioctl_vol_args *vol_args;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_device *device = NULL;
|
|
|
|
char *sizestr;
|
|
|
|
char *devstr = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
int namelen;
|
|
|
|
int mod = 0;
|
|
|
|
|
2008-11-13 03:34:12 +08:00
|
|
|
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
|
|
|
return -EROFS;
|
|
|
|
|
2009-01-06 05:57:23 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2009-04-08 15:06:54 +08:00
|
|
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
|
|
|
if (IS_ERR(vol_args))
|
|
|
|
return PTR_ERR(vol_args);
|
2008-07-25 00:20:14 +08:00
|
|
|
|
|
|
|
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
|
2008-06-12 09:53:53 +08:00
|
|
|
namelen = strlen(vol_args->name);
|
|
|
|
|
2008-07-09 02:19:17 +08:00
|
|
|
mutex_lock(&root->fs_info->volume_mutex);
|
2008-06-12 09:53:53 +08:00
|
|
|
sizestr = vol_args->name;
|
|
|
|
devstr = strchr(sizestr, ':');
|
|
|
|
if (devstr) {
|
|
|
|
char *end;
|
|
|
|
sizestr = devstr + 1;
|
|
|
|
*devstr = '\0';
|
|
|
|
devstr = vol_args->name;
|
|
|
|
devid = simple_strtoull(devstr, &end, 10);
|
2009-04-22 03:38:29 +08:00
|
|
|
printk(KERN_INFO "resizing devid %llu\n",
|
|
|
|
(unsigned long long)devid);
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
2008-11-18 10:11:30 +08:00
|
|
|
device = btrfs_find_device(root, devid, NULL, NULL);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (!device) {
|
2009-04-22 03:38:29 +08:00
|
|
|
printk(KERN_INFO "resizer unable to find device %llu\n",
|
|
|
|
(unsigned long long)devid);
|
2008-06-12 09:53:53 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
if (!strcmp(sizestr, "max"))
|
|
|
|
new_size = device->bdev->bd_inode->i_size;
|
|
|
|
else {
|
|
|
|
if (sizestr[0] == '-') {
|
|
|
|
mod = -1;
|
|
|
|
sizestr++;
|
|
|
|
} else if (sizestr[0] == '+') {
|
|
|
|
mod = 1;
|
|
|
|
sizestr++;
|
|
|
|
}
|
2010-02-28 18:59:11 +08:00
|
|
|
new_size = memparse(sizestr, NULL);
|
2008-06-12 09:53:53 +08:00
|
|
|
if (new_size == 0) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
old_size = device->total_bytes;
|
|
|
|
|
|
|
|
if (mod < 0) {
|
|
|
|
if (new_size > old_size) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
new_size = old_size - new_size;
|
|
|
|
} else if (mod > 0) {
|
|
|
|
new_size = old_size + new_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_size < 256 * 1024 * 1024) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
if (new_size > device->bdev->bd_inode->i_size) {
|
|
|
|
ret = -EFBIG;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
do_div(new_size, root->sectorsize);
|
|
|
|
new_size *= root->sectorsize;
|
|
|
|
|
|
|
|
printk(KERN_INFO "new size for %s is %llu\n",
|
|
|
|
device->name, (unsigned long long)new_size);
|
|
|
|
|
|
|
|
if (new_size > old_size) {
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
ret = btrfs_grow_device(trans, device, new_size);
|
|
|
|
btrfs_commit_transaction(trans, root);
|
|
|
|
} else {
|
|
|
|
ret = btrfs_shrink_device(device, new_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
2008-07-09 02:19:17 +08:00
|
|
|
mutex_unlock(&root->fs_info->volume_mutex);
|
2008-06-12 09:53:53 +08:00
|
|
|
kfree(vol_args);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-10-10 01:39:39 +08:00
|
|
|
static noinline int btrfs_ioctl_snap_create(struct file *file,
|
2008-11-18 10:02:50 +08:00
|
|
|
void __user *arg, int subvol)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
2008-10-10 01:39:39 +08:00
|
|
|
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
2008-06-12 09:53:53 +08:00
|
|
|
struct btrfs_ioctl_vol_args *vol_args;
|
2008-11-18 10:02:50 +08:00
|
|
|
struct file *src_file;
|
2008-06-12 09:53:53 +08:00
|
|
|
int namelen;
|
2008-11-18 10:02:50 +08:00
|
|
|
int ret = 0;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2008-11-13 03:34:12 +08:00
|
|
|
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
|
|
|
return -EROFS;
|
|
|
|
|
2009-04-08 15:06:54 +08:00
|
|
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
|
|
|
if (IS_ERR(vol_args))
|
|
|
|
return PTR_ERR(vol_args);
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2008-07-25 00:20:14 +08:00
|
|
|
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
|
2008-06-12 09:53:53 +08:00
|
|
|
namelen = strlen(vol_args->name);
|
|
|
|
if (strchr(vol_args->name, '/')) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-11-18 10:02:50 +08:00
|
|
|
if (subvol) {
|
2009-09-22 04:00:26 +08:00
|
|
|
ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen,
|
|
|
|
NULL);
|
2008-10-10 01:39:39 +08:00
|
|
|
} else {
|
2008-11-18 10:02:50 +08:00
|
|
|
struct inode *src_inode;
|
|
|
|
src_file = fget(vol_args->fd);
|
|
|
|
if (!src_file) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
src_inode = src_file->f_path.dentry->d_inode;
|
|
|
|
if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
|
2009-01-06 10:25:51 +08:00
|
|
|
printk(KERN_INFO "btrfs: Snapshot src from "
|
|
|
|
"another FS\n");
|
2008-11-18 10:02:50 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
fput(src_file);
|
|
|
|
goto out;
|
|
|
|
}
|
2009-09-22 04:00:26 +08:00
|
|
|
ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen,
|
|
|
|
BTRFS_I(src_inode)->root);
|
2008-11-18 10:02:50 +08:00
|
|
|
fput(src_file);
|
2008-10-10 01:39:39 +08:00
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
out:
|
|
|
|
kfree(vol_args);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
/*
|
|
|
|
* helper to check if the subvolume references other subvolumes
|
|
|
|
*/
|
|
|
|
static noinline int may_destroy_subvol(struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = root->root_key.objectid;
|
|
|
|
key.type = BTRFS_ROOT_REF_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
|
|
|
|
&key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
BUG_ON(ret == 0);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
if (path->slots[0] > 0) {
|
|
|
|
path->slots[0]--;
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
|
|
|
if (key.objectid == root->root_key.objectid &&
|
|
|
|
key.type == BTRFS_ROOT_REF_KEY)
|
|
|
|
ret = -ENOTEMPTY;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-03-01 04:39:26 +08:00
|
|
|
static noinline int key_in_sk(struct btrfs_key *key,
|
|
|
|
struct btrfs_ioctl_search_key *sk)
|
|
|
|
{
|
2010-03-19 00:10:08 +08:00
|
|
|
struct btrfs_key test;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
test.objectid = sk->min_objectid;
|
|
|
|
test.type = sk->min_type;
|
|
|
|
test.offset = sk->min_offset;
|
|
|
|
|
|
|
|
ret = btrfs_comp_cpu_keys(key, &test);
|
|
|
|
if (ret < 0)
|
2010-03-01 04:39:26 +08:00
|
|
|
return 0;
|
2010-03-19 00:10:08 +08:00
|
|
|
|
|
|
|
test.objectid = sk->max_objectid;
|
|
|
|
test.type = sk->max_type;
|
|
|
|
test.offset = sk->max_offset;
|
|
|
|
|
|
|
|
ret = btrfs_comp_cpu_keys(key, &test);
|
|
|
|
if (ret > 0)
|
2010-03-01 04:39:26 +08:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline int copy_to_sk(struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_key *key,
|
|
|
|
struct btrfs_ioctl_search_key *sk,
|
|
|
|
char *buf,
|
|
|
|
unsigned long *sk_offset,
|
|
|
|
int *num_found)
|
|
|
|
{
|
|
|
|
u64 found_transid;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_ioctl_search_header sh;
|
|
|
|
unsigned long item_off;
|
|
|
|
unsigned long item_len;
|
|
|
|
int nritems;
|
|
|
|
int i;
|
|
|
|
int slot;
|
|
|
|
int found = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
|
|
|
|
|
|
|
if (btrfs_header_generation(leaf) > sk->max_transid) {
|
|
|
|
i = nritems;
|
|
|
|
goto advance_key;
|
|
|
|
}
|
|
|
|
found_transid = btrfs_header_generation(leaf);
|
|
|
|
|
|
|
|
for (i = slot; i < nritems; i++) {
|
|
|
|
item_off = btrfs_item_ptr_offset(leaf, i);
|
|
|
|
item_len = btrfs_item_size_nr(leaf, i);
|
|
|
|
|
|
|
|
if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
|
|
|
|
item_len = 0;
|
|
|
|
|
|
|
|
if (sizeof(sh) + item_len + *sk_offset >
|
|
|
|
BTRFS_SEARCH_ARGS_BUFSIZE) {
|
|
|
|
ret = 1;
|
|
|
|
goto overflow;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, key, i);
|
|
|
|
if (!key_in_sk(key, sk))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
sh.objectid = key->objectid;
|
|
|
|
sh.offset = key->offset;
|
|
|
|
sh.type = key->type;
|
|
|
|
sh.len = item_len;
|
|
|
|
sh.transid = found_transid;
|
|
|
|
|
|
|
|
/* copy search result header */
|
|
|
|
memcpy(buf + *sk_offset, &sh, sizeof(sh));
|
|
|
|
*sk_offset += sizeof(sh);
|
|
|
|
|
|
|
|
if (item_len) {
|
|
|
|
char *p = buf + *sk_offset;
|
|
|
|
/* copy the item */
|
|
|
|
read_extent_buffer(leaf, p,
|
|
|
|
item_off, item_len);
|
|
|
|
*sk_offset += item_len;
|
|
|
|
}
|
2010-03-19 00:14:54 +08:00
|
|
|
found++;
|
2010-03-01 04:39:26 +08:00
|
|
|
|
|
|
|
if (*num_found >= sk->nr_items)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
advance_key:
|
2010-03-19 00:10:08 +08:00
|
|
|
ret = 0;
|
|
|
|
if (key->offset < (u64)-1 && key->offset < sk->max_offset)
|
2010-03-01 04:39:26 +08:00
|
|
|
key->offset++;
|
2010-03-19 00:10:08 +08:00
|
|
|
else if (key->type < (u8)-1 && key->type < sk->max_type) {
|
|
|
|
key->offset = 0;
|
2010-03-01 04:39:26 +08:00
|
|
|
key->type++;
|
2010-03-19 00:10:08 +08:00
|
|
|
} else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) {
|
|
|
|
key->offset = 0;
|
|
|
|
key->type = 0;
|
2010-03-01 04:39:26 +08:00
|
|
|
key->objectid++;
|
2010-03-19 00:10:08 +08:00
|
|
|
} else
|
|
|
|
ret = 1;
|
2010-03-01 04:39:26 +08:00
|
|
|
overflow:
|
|
|
|
*num_found += found;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline int search_ioctl(struct inode *inode,
|
|
|
|
struct btrfs_ioctl_search_args *args)
|
|
|
|
{
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key max_key;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_ioctl_search_key *sk = &args->key;
|
|
|
|
struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
|
|
|
|
int ret;
|
|
|
|
int num_found = 0;
|
|
|
|
unsigned long sk_offset = 0;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (sk->tree_id == 0) {
|
|
|
|
/* search the root of the inode that was passed */
|
|
|
|
root = BTRFS_I(inode)->root;
|
|
|
|
} else {
|
|
|
|
key.objectid = sk->tree_id;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
root = btrfs_read_fs_root_no_name(info, &key);
|
|
|
|
if (IS_ERR(root)) {
|
|
|
|
printk(KERN_ERR "could not find root %llu\n",
|
|
|
|
sk->tree_id);
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
key.objectid = sk->min_objectid;
|
|
|
|
key.type = sk->min_type;
|
|
|
|
key.offset = sk->min_offset;
|
|
|
|
|
|
|
|
max_key.objectid = sk->max_objectid;
|
|
|
|
max_key.type = sk->max_type;
|
|
|
|
max_key.offset = sk->max_offset;
|
|
|
|
|
|
|
|
path->keep_locks = 1;
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
ret = btrfs_search_forward(root, &key, &max_key, path, 0,
|
|
|
|
sk->min_transid);
|
|
|
|
if (ret != 0) {
|
|
|
|
if (ret > 0)
|
|
|
|
ret = 0;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
ret = copy_to_sk(root, path, &key, sk, args->buf,
|
|
|
|
&sk_offset, &num_found);
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
if (ret || num_found >= sk->nr_items)
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
err:
|
|
|
|
sk->nr_items = num_found;
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline int btrfs_ioctl_tree_search(struct file *file,
|
|
|
|
void __user *argp)
|
|
|
|
{
|
|
|
|
struct btrfs_ioctl_search_args *args;
|
|
|
|
struct inode *inode;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
args = kmalloc(sizeof(*args), GFP_KERNEL);
|
|
|
|
if (!args)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (copy_from_user(args, argp, sizeof(*args))) {
|
|
|
|
kfree(args);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
inode = fdentry(file)->d_inode;
|
|
|
|
ret = search_ioctl(inode, args);
|
|
|
|
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
kfree(args);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-11-18 13:42:14 +08:00
|
|
|
/*
|
2010-03-01 04:39:26 +08:00
|
|
|
* Search INODE_REFs to identify path name of 'dirid' directory
|
|
|
|
* in a 'tree_id' tree. and sets path name to 'name'.
|
|
|
|
*/
|
2009-11-18 13:42:14 +08:00
|
|
|
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
|
|
|
|
u64 tree_id, u64 dirid, char *name)
|
|
|
|
{
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_key key;
|
2010-03-01 04:39:26 +08:00
|
|
|
char *ptr;
|
2009-11-18 13:42:14 +08:00
|
|
|
int ret = -1;
|
|
|
|
int slot;
|
|
|
|
int len;
|
|
|
|
int total_len = 0;
|
|
|
|
struct btrfs_inode_ref *iref;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
|
|
|
|
if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
|
|
|
|
name[0]='\0';
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-03-01 04:39:26 +08:00
|
|
|
ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
|
2009-11-18 13:42:14 +08:00
|
|
|
|
|
|
|
key.objectid = tree_id;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
root = btrfs_read_fs_root_no_name(info, &key);
|
|
|
|
if (IS_ERR(root)) {
|
|
|
|
printk(KERN_ERR "could not find root %llu\n", tree_id);
|
2010-03-19 00:23:10 +08:00
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
2009-11-18 13:42:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
key.objectid = dirid;
|
|
|
|
key.type = BTRFS_INODE_REF_KEY;
|
2010-03-19 00:23:10 +08:00
|
|
|
key.offset = (u64)-1;
|
2009-11-18 13:42:14 +08:00
|
|
|
|
|
|
|
while(1) {
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
l = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
2010-03-19 00:23:10 +08:00
|
|
|
if (ret > 0 && slot > 0)
|
|
|
|
slot--;
|
2009-11-18 13:42:14 +08:00
|
|
|
btrfs_item_key_to_cpu(l, &key, slot);
|
|
|
|
|
|
|
|
if (ret > 0 && (key.objectid != dirid ||
|
2010-03-01 04:39:26 +08:00
|
|
|
key.type != BTRFS_INODE_REF_KEY)) {
|
|
|
|
ret = -ENOENT;
|
2009-11-18 13:42:14 +08:00
|
|
|
goto out;
|
2010-03-01 04:39:26 +08:00
|
|
|
}
|
2009-11-18 13:42:14 +08:00
|
|
|
|
|
|
|
iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
|
|
|
|
len = btrfs_inode_ref_name_len(l, iref);
|
|
|
|
ptr -= len + 1;
|
|
|
|
total_len += len + 1;
|
2010-03-01 04:39:26 +08:00
|
|
|
if (ptr < name)
|
2009-11-18 13:42:14 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
*(ptr + len) = '/';
|
|
|
|
read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len);
|
|
|
|
|
|
|
|
if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
|
|
|
|
break;
|
|
|
|
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
key.objectid = key.offset;
|
2010-03-19 00:23:10 +08:00
|
|
|
key.offset = (u64)-1;
|
2009-11-18 13:42:14 +08:00
|
|
|
dirid = key.objectid;
|
|
|
|
|
|
|
|
}
|
2010-03-01 04:39:26 +08:00
|
|
|
if (ptr < name)
|
2009-11-18 13:42:14 +08:00
|
|
|
goto out;
|
2010-03-01 04:39:26 +08:00
|
|
|
memcpy(name, ptr, total_len);
|
2009-11-18 13:42:14 +08:00
|
|
|
name[total_len]='\0';
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
2010-03-01 04:39:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
|
|
|
|
void __user *argp)
|
|
|
|
{
|
|
|
|
struct btrfs_ioctl_ino_lookup_args *args;
|
|
|
|
struct inode *inode;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
args = kmalloc(sizeof(*args), GFP_KERNEL);
|
|
|
|
if (copy_from_user(args, argp, sizeof(*args))) {
|
|
|
|
kfree(args);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
inode = fdentry(file)->d_inode;
|
|
|
|
|
2010-03-19 00:17:05 +08:00
|
|
|
if (args->treeid == 0)
|
|
|
|
args->treeid = BTRFS_I(inode)->root->root_key.objectid;
|
|
|
|
|
2010-03-01 04:39:26 +08:00
|
|
|
ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
|
|
|
|
args->treeid, args->objectid,
|
|
|
|
args->name);
|
|
|
|
|
|
|
|
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kfree(args);
|
2009-11-18 13:42:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|
|
|
void __user *arg)
|
|
|
|
{
|
|
|
|
struct dentry *parent = fdentry(file);
|
|
|
|
struct dentry *dentry;
|
|
|
|
struct inode *dir = parent->d_inode;
|
|
|
|
struct inode *inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(dir)->root;
|
|
|
|
struct btrfs_root *dest = NULL;
|
|
|
|
struct btrfs_ioctl_vol_args *vol_args;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
int namelen;
|
|
|
|
int ret;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
|
|
|
if (IS_ERR(vol_args))
|
|
|
|
return PTR_ERR(vol_args);
|
|
|
|
|
|
|
|
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
|
|
|
|
namelen = strlen(vol_args->name);
|
|
|
|
if (strchr(vol_args->name, '/') ||
|
|
|
|
strncmp(vol_args->name, "..", namelen) == 0) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mnt_want_write(file->f_path.mnt);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
|
|
|
|
dentry = lookup_one_len(vol_args->name, parent, namelen);
|
|
|
|
if (IS_ERR(dentry)) {
|
|
|
|
err = PTR_ERR(dentry);
|
|
|
|
goto out_unlock_dir;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dentry->d_inode) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto out_dput;
|
|
|
|
}
|
|
|
|
|
|
|
|
inode = dentry->d_inode;
|
|
|
|
if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_dput;
|
|
|
|
}
|
|
|
|
|
|
|
|
dest = BTRFS_I(inode)->root;
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
err = d_invalidate(dentry);
|
|
|
|
if (err)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
down_write(&root->fs_info->subvol_sem);
|
|
|
|
|
|
|
|
err = may_destroy_subvol(dest);
|
|
|
|
if (err)
|
|
|
|
goto out_up_write;
|
|
|
|
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
ret = btrfs_unlink_subvol(trans, root, dir,
|
|
|
|
dest->root_key.objectid,
|
|
|
|
dentry->d_name.name,
|
|
|
|
dentry->d_name.len);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
btrfs_record_root_in_trans(trans, dest);
|
|
|
|
|
|
|
|
memset(&dest->root_item.drop_progress, 0,
|
|
|
|
sizeof(dest->root_item.drop_progress));
|
|
|
|
dest->root_item.drop_level = 0;
|
|
|
|
btrfs_set_root_refs(&dest->root_item, 0);
|
|
|
|
|
|
|
|
ret = btrfs_insert_orphan_item(trans,
|
|
|
|
root->fs_info->tree_root,
|
|
|
|
dest->root_key.objectid);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
ret = btrfs_commit_transaction(trans, root);
|
|
|
|
BUG_ON(ret);
|
|
|
|
inode->i_flags |= S_DEAD;
|
|
|
|
out_up_write:
|
|
|
|
up_write(&root->fs_info->subvol_sem);
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
if (!err) {
|
2009-10-09 21:25:16 +08:00
|
|
|
shrink_dcache_sb(root->fs_info->sb);
|
2009-09-22 04:00:26 +08:00
|
|
|
btrfs_invalidate_inodes(dest);
|
|
|
|
d_delete(dentry);
|
|
|
|
}
|
|
|
|
out_dput:
|
|
|
|
dput(dentry);
|
|
|
|
out_unlock_dir:
|
|
|
|
mutex_unlock(&dir->i_mutex);
|
|
|
|
mnt_drop_write(file->f_path.mnt);
|
|
|
|
out:
|
|
|
|
kfree(vol_args);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-03-11 22:42:04 +08:00
|
|
|
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct inode *inode = fdentry(file)->d_inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
2010-03-11 22:42:04 +08:00
|
|
|
struct btrfs_ioctl_defrag_range_args *range;
|
2008-11-13 03:34:12 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mnt_want_write(file->f_path.mnt);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFDIR:
|
2009-01-06 05:57:23 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN)) {
|
|
|
|
ret = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_defrag_root(root, 0);
|
|
|
|
btrfs_defrag_root(root->fs_info->extent_root, 0);
|
|
|
|
break;
|
|
|
|
case S_IFREG:
|
2009-01-06 05:57:23 +08:00
|
|
|
if (!(file->f_mode & FMODE_WRITE)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-03-11 22:42:04 +08:00
|
|
|
|
|
|
|
range = kzalloc(sizeof(*range), GFP_KERNEL);
|
|
|
|
if (!range) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (argp) {
|
|
|
|
if (copy_from_user(range, argp,
|
|
|
|
sizeof(*range))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
kfree(range);
|
2010-03-20 19:24:48 +08:00
|
|
|
goto out;
|
2010-03-11 22:42:04 +08:00
|
|
|
}
|
|
|
|
/* compression requires us to start the IO */
|
|
|
|
if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
|
|
|
|
range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
|
|
|
|
range->extent_thresh = (u32)-1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* the rest are all set to zero by kzalloc */
|
|
|
|
range->len = (u64)-1;
|
|
|
|
}
|
|
|
|
btrfs_defrag_file(file, range);
|
|
|
|
kfree(range);
|
2008-06-12 09:53:53 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-01-06 05:57:23 +08:00
|
|
|
out:
|
2008-12-19 23:58:39 +08:00
|
|
|
mnt_drop_write(file->f_path.mnt);
|
2009-01-06 05:57:23 +08:00
|
|
|
return ret;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
|
2008-12-02 22:54:17 +08:00
|
|
|
static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct btrfs_ioctl_vol_args *vol_args;
|
|
|
|
int ret;
|
|
|
|
|
2009-01-06 05:57:23 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2009-04-08 15:06:54 +08:00
|
|
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
|
|
|
if (IS_ERR(vol_args))
|
|
|
|
return PTR_ERR(vol_args);
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2008-07-25 00:20:14 +08:00
|
|
|
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
|
2008-06-12 09:53:53 +08:00
|
|
|
ret = btrfs_init_new_device(root, vol_args->name);
|
|
|
|
|
|
|
|
kfree(vol_args);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-12-02 22:54:17 +08:00
|
|
|
static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct btrfs_ioctl_vol_args *vol_args;
|
|
|
|
int ret;
|
|
|
|
|
2009-01-06 05:57:23 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2008-11-13 03:34:12 +08:00
|
|
|
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
|
|
|
return -EROFS;
|
|
|
|
|
2009-04-08 15:06:54 +08:00
|
|
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
|
|
|
if (IS_ERR(vol_args))
|
|
|
|
return PTR_ERR(vol_args);
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2008-07-25 00:20:14 +08:00
|
|
|
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
|
2008-06-12 09:53:53 +08:00
|
|
|
ret = btrfs_rm_device(root, vol_args->name);
|
|
|
|
|
|
|
|
kfree(vol_args);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-22 04:00:26 +08:00
|
|
|
static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|
|
|
u64 off, u64 olen, u64 destoff)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct inode *inode = fdentry(file)->d_inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
|
|
struct file *src_file;
|
|
|
|
struct inode *src;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct extent_buffer *leaf;
|
2008-08-05 11:23:47 +08:00
|
|
|
char *buf;
|
|
|
|
struct btrfs_key key;
|
2008-06-12 09:53:53 +08:00
|
|
|
u32 nritems;
|
|
|
|
int slot;
|
2008-08-05 11:23:47 +08:00
|
|
|
int ret;
|
2008-11-13 03:32:25 +08:00
|
|
|
u64 len = olen;
|
|
|
|
u64 bs = root->fs_info->sb->s_blocksize;
|
|
|
|
u64 hint_byte;
|
Btrfs: move data checksumming into a dedicated tree
Btrfs stores checksums for each data block. Until now, they have
been stored in the subvolume trees, indexed by the inode that is
referencing the data block. This means that when we read the inode,
we've probably read in at least some checksums as well.
But, this has a few problems:
* The checksums are indexed by logical offset in the file. When
compression is on, this means we have to do the expensive checksumming
on the uncompressed data. It would be faster if we could checksum
the compressed data instead.
* If we implement encryption, we'll be checksumming the plain text and
storing that on disk. This is significantly less secure.
* For either compression or encryption, we have to get the plain text
back before we can verify the checksum as correct. This makes the raid
layer balancing and extent moving much more expensive.
* It makes the front end caching code more complex, as we have touch
the subvolume and inodes as we cache extents.
* There is potentitally one copy of the checksum in each subvolume
referencing an extent.
The solution used here is to store the extent checksums in a dedicated
tree. This allows us to index the checksums by phyiscal extent
start and length. It means:
* The checksum is against the data stored on disk, after any compression
or encryption is done.
* The checksum is stored in a central location, and can be verified without
following back references, or reading inodes.
This makes compression significantly faster by reducing the amount of
data that needs to be checksummed. It will also allow much faster
raid management code in general.
The checksums are indexed by a key with a fixed objectid (a magic value
in ctree.h) and offset set to the starting byte of the extent. This
allows us to copy the checksum items into the fsync log tree directly (or
any other tree), without having to invent a second format for them.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-12-09 05:58:54 +08:00
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
/*
|
|
|
|
* TODO:
|
|
|
|
* - split compressed inline extents. annoying: we need to
|
|
|
|
* decompress into destination's address_space (the file offset
|
|
|
|
* may change, so source mapping won't do), then recompress (or
|
|
|
|
* otherwise reinsert) a subrange.
|
|
|
|
* - allow ranges within the same file to be cloned (provided
|
|
|
|
* they don't overlap)?
|
|
|
|
*/
|
|
|
|
|
2009-01-06 05:57:23 +08:00
|
|
|
/* the destination must be opened for writing */
|
|
|
|
if (!(file->f_mode & FMODE_WRITE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-11-13 03:34:12 +08:00
|
|
|
ret = mnt_want_write(file->f_path.mnt);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
src_file = fget(srcfd);
|
2008-12-19 23:58:39 +08:00
|
|
|
if (!src_file) {
|
|
|
|
ret = -EBADF;
|
|
|
|
goto out_drop_write;
|
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
src = src_file->f_dentry->d_inode;
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
if (src == inode)
|
|
|
|
goto out_fput;
|
|
|
|
|
2008-08-05 11:23:47 +08:00
|
|
|
ret = -EISDIR;
|
|
|
|
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
|
|
|
|
goto out_fput;
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
ret = -EXDEV;
|
2008-08-05 11:23:47 +08:00
|
|
|
if (src->i_sb != inode->i_sb || BTRFS_I(src)->root != root)
|
|
|
|
goto out_fput;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
buf = vmalloc(btrfs_level_size(root, 0));
|
|
|
|
if (!buf)
|
|
|
|
goto out_fput;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path) {
|
|
|
|
vfree(buf);
|
2008-06-12 09:53:53 +08:00
|
|
|
goto out_fput;
|
2008-08-05 11:23:47 +08:00
|
|
|
}
|
|
|
|
path->reada = 2;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
if (inode < src) {
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
mutex_lock(&src->i_mutex);
|
|
|
|
} else {
|
|
|
|
mutex_lock(&src->i_mutex);
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
}
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
/* determine range to clone */
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (off >= src->i_size || off + len > src->i_size)
|
2008-06-12 09:53:53 +08:00
|
|
|
goto out_unlock;
|
2008-11-13 03:32:25 +08:00
|
|
|
if (len == 0)
|
|
|
|
olen = len = src->i_size - off;
|
|
|
|
/* if we extend to eof, continue to block boundary */
|
|
|
|
if (off + len == src->i_size)
|
|
|
|
len = ((src->i_size + bs-1) & ~(bs-1))
|
|
|
|
- off;
|
|
|
|
|
|
|
|
/* verify the end result is block aligned */
|
|
|
|
if ((off & (bs-1)) ||
|
|
|
|
((off + len) & (bs-1)))
|
|
|
|
goto out_unlock;
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
/* do any pending delalloc/csum calc on src, one way or
|
|
|
|
another, and lock file content */
|
|
|
|
while (1) {
|
2008-09-24 01:14:14 +08:00
|
|
|
struct btrfs_ordered_extent *ordered;
|
2008-11-13 03:32:25 +08:00
|
|
|
lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
|
|
|
|
ordered = btrfs_lookup_first_ordered_extent(inode, off+len);
|
2008-08-05 11:23:47 +08:00
|
|
|
if (BTRFS_I(src)->delalloc_bytes == 0 && !ordered)
|
2008-06-12 09:53:53 +08:00
|
|
|
break;
|
2008-11-13 03:32:25 +08:00
|
|
|
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
|
2008-08-05 11:23:47 +08:00
|
|
|
if (ordered)
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
2008-11-13 03:32:25 +08:00
|
|
|
btrfs_wait_ordered_range(src, off, off+len);
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
|
2008-08-05 11:23:47 +08:00
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
BUG_ON(!trans);
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
/* punch hole in destination first */
|
2009-11-12 17:34:08 +08:00
|
|
|
btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1);
|
2008-11-13 03:32:25 +08:00
|
|
|
|
|
|
|
/* clone data */
|
2008-06-12 09:53:53 +08:00
|
|
|
key.objectid = src->i_ino;
|
2008-08-05 11:23:47 +08:00
|
|
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
|
|
|
key.offset = 0;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
/*
|
|
|
|
* note the key will change type as we walk through the
|
|
|
|
* tree.
|
|
|
|
*/
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
2008-08-05 11:23:47 +08:00
|
|
|
nritems = btrfs_header_nritems(path->nodes[0]);
|
|
|
|
if (path->slots[0] >= nritems) {
|
2008-06-12 09:53:53 +08:00
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret > 0)
|
|
|
|
break;
|
2008-08-05 11:23:47 +08:00
|
|
|
nritems = btrfs_header_nritems(path->nodes[0]);
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
|
2008-08-05 11:23:47 +08:00
|
|
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
Btrfs: move data checksumming into a dedicated tree
Btrfs stores checksums for each data block. Until now, they have
been stored in the subvolume trees, indexed by the inode that is
referencing the data block. This means that when we read the inode,
we've probably read in at least some checksums as well.
But, this has a few problems:
* The checksums are indexed by logical offset in the file. When
compression is on, this means we have to do the expensive checksumming
on the uncompressed data. It would be faster if we could checksum
the compressed data instead.
* If we implement encryption, we'll be checksumming the plain text and
storing that on disk. This is significantly less secure.
* For either compression or encryption, we have to get the plain text
back before we can verify the checksum as correct. This makes the raid
layer balancing and extent moving much more expensive.
* It makes the front end caching code more complex, as we have touch
the subvolume and inodes as we cache extents.
* There is potentitally one copy of the checksum in each subvolume
referencing an extent.
The solution used here is to store the extent checksums in a dedicated
tree. This allows us to index the checksums by phyiscal extent
start and length. It means:
* The checksum is against the data stored on disk, after any compression
or encryption is done.
* The checksum is stored in a central location, and can be verified without
following back references, or reading inodes.
This makes compression significantly faster by reducing the amount of
data that needs to be checksummed. It will also allow much faster
raid management code in general.
The checksums are indexed by a key with a fixed objectid (a magic value
in ctree.h) and offset set to the starting byte of the extent. This
allows us to copy the checksum items into the fsync log tree directly (or
any other tree), without having to invent a second format for them.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-12-09 05:58:54 +08:00
|
|
|
if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
|
2008-06-12 09:53:53 +08:00
|
|
|
key.objectid != src->i_ino)
|
|
|
|
break;
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
|
|
|
|
struct btrfs_file_extent_item *extent;
|
|
|
|
int type;
|
2008-09-24 01:14:14 +08:00
|
|
|
u32 size;
|
|
|
|
struct btrfs_key new_key;
|
2008-11-13 03:32:25 +08:00
|
|
|
u64 disko = 0, diskl = 0;
|
|
|
|
u64 datao = 0, datal = 0;
|
|
|
|
u8 comp;
|
2008-09-24 01:14:14 +08:00
|
|
|
|
|
|
|
size = btrfs_item_size_nr(leaf, slot);
|
|
|
|
read_extent_buffer(leaf, buf,
|
|
|
|
btrfs_item_ptr_offset(leaf, slot),
|
|
|
|
size);
|
2008-11-13 03:32:25 +08:00
|
|
|
|
|
|
|
extent = btrfs_item_ptr(leaf, slot,
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
comp = btrfs_file_extent_compression(leaf, extent);
|
|
|
|
type = btrfs_file_extent_type(leaf, extent);
|
2009-06-28 09:07:03 +08:00
|
|
|
if (type == BTRFS_FILE_EXTENT_REG ||
|
|
|
|
type == BTRFS_FILE_EXTENT_PREALLOC) {
|
2009-01-06 10:25:51 +08:00
|
|
|
disko = btrfs_file_extent_disk_bytenr(leaf,
|
|
|
|
extent);
|
|
|
|
diskl = btrfs_file_extent_disk_num_bytes(leaf,
|
|
|
|
extent);
|
2008-11-13 03:32:25 +08:00
|
|
|
datao = btrfs_file_extent_offset(leaf, extent);
|
2009-01-06 10:25:51 +08:00
|
|
|
datal = btrfs_file_extent_num_bytes(leaf,
|
|
|
|
extent);
|
2008-11-13 03:32:25 +08:00
|
|
|
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
|
|
|
|
/* take upper bound, may be compressed */
|
|
|
|
datal = btrfs_file_extent_ram_bytes(leaf,
|
|
|
|
extent);
|
|
|
|
}
|
2008-09-24 01:14:14 +08:00
|
|
|
btrfs_release_path(root, path);
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
if (key.offset + datal < off ||
|
|
|
|
key.offset >= off+len)
|
|
|
|
goto next;
|
|
|
|
|
2008-09-24 01:14:14 +08:00
|
|
|
memcpy(&new_key, &key, sizeof(new_key));
|
|
|
|
new_key.objectid = inode->i_ino;
|
2008-11-13 03:32:25 +08:00
|
|
|
new_key.offset = key.offset + destoff - off;
|
2008-09-24 01:14:14 +08:00
|
|
|
|
2009-06-28 09:07:03 +08:00
|
|
|
if (type == BTRFS_FILE_EXTENT_REG ||
|
|
|
|
type == BTRFS_FILE_EXTENT_PREALLOC) {
|
2008-11-13 03:32:25 +08:00
|
|
|
ret = btrfs_insert_empty_item(trans, root, path,
|
|
|
|
&new_key, size);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
write_extent_buffer(leaf, buf,
|
2008-09-24 01:14:14 +08:00
|
|
|
btrfs_item_ptr_offset(leaf, slot),
|
|
|
|
size);
|
2008-08-05 11:23:47 +08:00
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
extent = btrfs_item_ptr(leaf, slot,
|
2008-06-12 09:53:53 +08:00
|
|
|
struct btrfs_file_extent_item);
|
2008-11-13 03:32:25 +08:00
|
|
|
|
|
|
|
if (off > key.offset) {
|
|
|
|
datao += off - key.offset;
|
|
|
|
datal -= off - key.offset;
|
|
|
|
}
|
2009-10-09 23:29:53 +08:00
|
|
|
|
|
|
|
if (key.offset + datal > off + len)
|
|
|
|
datal = off + len - key.offset;
|
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
/* disko == 0 means it's a hole */
|
|
|
|
if (!disko)
|
|
|
|
datao = 0;
|
|
|
|
|
|
|
|
btrfs_set_file_extent_offset(leaf, extent,
|
|
|
|
datao);
|
|
|
|
btrfs_set_file_extent_num_bytes(leaf, extent,
|
|
|
|
datal);
|
|
|
|
if (disko) {
|
|
|
|
inode_add_bytes(inode, datal);
|
2008-08-05 11:23:47 +08:00
|
|
|
ret = btrfs_inc_extent_ref(trans, root,
|
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata.
Once a filesystem has been mounted with this commit, IT WILL NO LONGER
BE MOUNTABLE BY OLDER KERNELS.
When a tree block in subvolume tree is cow'd, the reference counts of all
extents it points to are increased by one. At transaction commit time,
the old root of the subvolume is recorded in a "dead root" data structure,
and the btree it points to is later walked, dropping reference counts
and freeing any blocks where the reference count goes to 0.
The increments done during cow and decrements done after commit cancel out,
and the walk is a very expensive way to go about freeing the blocks that
are no longer referenced by the new btree root. This commit reduces the
transaction overhead by avoiding the need for dead root records.
When a non-shared tree block is cow'd, we free the old block at once, and the
new block inherits old block's references. When a tree block with reference
count > 1 is cow'd, we increase the reference counts of all extents
the new block points to by one, and decrease the old block's reference count by
one.
This dead tree avoidance code removes the need to modify the reference
counts of lower level extents when a non-shared tree block is cow'd.
But we still need to update back ref for all pointers in the block.
This is because the location of the block is recorded in the back ref
item.
We can solve this by introducing a new type of back ref. The new
back ref provides information about pointer's key, level and in which
tree the pointer lives. This information allow us to find the pointer
by searching the tree. The shortcoming of the new back ref is that it
only works for pointers in tree blocks referenced by their owner trees.
This is mostly a problem for snapshots, where resolving one of these
fuzzy back references would be O(number_of_snapshots) and quite slow.
The solution used here is to use the fuzzy back references in the common
case where a given tree block is only referenced by one root,
and use the full back references when multiple roots have a reference
on a given block.
This commit adds per subvolume red-black tree to keep trace of cached
inodes. The red-black tree helps the balancing code to find cached
inodes whose inode numbers within a given range.
This commit improves the balancing code by introducing several data
structures to keep the state of balancing. The most important one
is the back ref cache. It caches how the upper level tree blocks are
referenced. This greatly reduce the overhead of checking back ref.
The improved balancing code scales significantly better with a large
number of snapshots.
This is a very large commit and was written in a number of
pieces. But, they depend heavily on the disk format change and were
squashed together to make sure git bisect didn't end up in a
bad state wrt space balancing or the format change.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2009-06-10 22:45:14 +08:00
|
|
|
disko, diskl, 0,
|
|
|
|
root->root_key.objectid,
|
|
|
|
inode->i_ino,
|
|
|
|
new_key.offset - datao);
|
2008-09-24 01:14:14 +08:00
|
|
|
BUG_ON(ret);
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
2008-11-13 03:32:25 +08:00
|
|
|
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
|
|
|
|
u64 skip = 0;
|
|
|
|
u64 trim = 0;
|
|
|
|
if (off > key.offset) {
|
|
|
|
skip = off - key.offset;
|
|
|
|
new_key.offset += skip;
|
|
|
|
}
|
2009-01-06 10:25:51 +08:00
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
if (key.offset + datal > off+len)
|
|
|
|
trim = key.offset + datal - (off+len);
|
2009-01-06 10:25:51 +08:00
|
|
|
|
2008-11-13 03:32:25 +08:00
|
|
|
if (comp && (skip || trim)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
size -= skip + trim;
|
|
|
|
datal -= skip + trim;
|
|
|
|
ret = btrfs_insert_empty_item(trans, root, path,
|
|
|
|
&new_key, size);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (skip) {
|
2009-01-06 10:25:51 +08:00
|
|
|
u32 start =
|
|
|
|
btrfs_file_extent_calc_inline_size(0);
|
2008-11-13 03:32:25 +08:00
|
|
|
memmove(buf+start, buf+start+skip,
|
|
|
|
datal);
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
write_extent_buffer(leaf, buf,
|
|
|
|
btrfs_item_ptr_offset(leaf, slot),
|
|
|
|
size);
|
|
|
|
inode_add_bytes(inode, datal);
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
2008-11-13 03:32:25 +08:00
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
2008-08-05 11:23:47 +08:00
|
|
|
}
|
2008-11-13 03:32:25 +08:00
|
|
|
|
2009-01-06 10:25:51 +08:00
|
|
|
next:
|
2008-09-24 01:14:14 +08:00
|
|
|
btrfs_release_path(root, path);
|
2008-06-12 09:53:53 +08:00
|
|
|
key.offset++;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2008-08-05 11:23:47 +08:00
|
|
|
btrfs_release_path(root, path);
|
|
|
|
if (ret == 0) {
|
|
|
|
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
2008-11-13 03:32:25 +08:00
|
|
|
if (destoff + olen > inode->i_size)
|
|
|
|
btrfs_i_size_write(inode, destoff + olen);
|
2008-08-05 11:23:47 +08:00
|
|
|
BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
|
|
|
|
ret = btrfs_update_inode(trans, root, inode);
|
|
|
|
}
|
2008-06-12 09:53:53 +08:00
|
|
|
btrfs_end_transaction(trans, root);
|
2008-11-13 03:32:25 +08:00
|
|
|
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
|
2008-08-05 11:23:47 +08:00
|
|
|
if (ret)
|
|
|
|
vmtruncate(inode, 0);
|
2008-06-12 09:53:53 +08:00
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&src->i_mutex);
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
2008-08-05 11:23:47 +08:00
|
|
|
vfree(buf);
|
|
|
|
btrfs_free_path(path);
|
2008-06-12 09:53:53 +08:00
|
|
|
out_fput:
|
|
|
|
fput(src_file);
|
2008-12-19 23:58:39 +08:00
|
|
|
out_drop_write:
|
|
|
|
mnt_drop_write(file->f_path.mnt);
|
2008-06-12 09:53:53 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-12-02 22:52:24 +08:00
|
|
|
static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
|
2008-11-13 03:32:25 +08:00
|
|
|
{
|
|
|
|
struct btrfs_ioctl_clone_range_args args;
|
|
|
|
|
2008-12-02 22:52:24 +08:00
|
|
|
if (copy_from_user(&args, argp, sizeof(args)))
|
2008-11-13 03:32:25 +08:00
|
|
|
return -EFAULT;
|
|
|
|
return btrfs_ioctl_clone(file, args.src_fd, args.src_offset,
|
|
|
|
args.src_length, args.dest_offset);
|
|
|
|
}
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
/*
|
|
|
|
* there are many ways the trans_start and trans_end ioctls can lead
|
|
|
|
* to deadlocks. They should only be used by applications that
|
|
|
|
* basically own the machine, and have a very in depth understanding
|
|
|
|
* of all the possible deadlocks and enospc problems.
|
|
|
|
*/
|
2008-12-02 22:54:17 +08:00
|
|
|
static long btrfs_ioctl_trans_start(struct file *file)
|
2008-06-12 09:53:53 +08:00
|
|
|
{
|
|
|
|
struct inode *inode = fdentry(file)->d_inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
|
|
struct btrfs_trans_handle *trans;
|
2009-09-30 06:38:44 +08:00
|
|
|
int ret;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
2009-09-30 06:38:44 +08:00
|
|
|
ret = -EPERM;
|
2008-06-12 09:53:58 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
2009-09-30 06:38:44 +08:00
|
|
|
goto out;
|
2008-06-12 09:53:58 +08:00
|
|
|
|
2009-09-30 06:38:44 +08:00
|
|
|
ret = -EINPROGRESS;
|
|
|
|
if (file->private_data)
|
2008-06-12 09:53:53 +08:00
|
|
|
goto out;
|
2008-08-04 22:41:27 +08:00
|
|
|
|
2008-11-13 03:34:12 +08:00
|
|
|
ret = mnt_want_write(file->f_path.mnt);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2008-08-04 22:41:27 +08:00
|
|
|
mutex_lock(&root->fs_info->trans_mutex);
|
|
|
|
root->fs_info->open_ioctl_trans++;
|
|
|
|
mutex_unlock(&root->fs_info->trans_mutex);
|
|
|
|
|
2009-09-30 06:38:44 +08:00
|
|
|
ret = -ENOMEM;
|
2008-08-04 22:41:27 +08:00
|
|
|
trans = btrfs_start_ioctl_transaction(root, 0);
|
2009-09-30 06:38:44 +08:00
|
|
|
if (!trans)
|
|
|
|
goto out_drop;
|
|
|
|
|
|
|
|
file->private_data = trans;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_drop:
|
|
|
|
mutex_lock(&root->fs_info->trans_mutex);
|
|
|
|
root->fs_info->open_ioctl_trans--;
|
|
|
|
mutex_unlock(&root->fs_info->trans_mutex);
|
|
|
|
mnt_drop_write(file->f_path.mnt);
|
2008-06-12 09:53:53 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-12 05:11:29 +08:00
|
|
|
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
|
|
|
{
|
|
|
|
struct inode *inode = fdentry(file)->d_inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
|
|
struct btrfs_root *new_root;
|
|
|
|
struct btrfs_dir_item *di;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key location;
|
|
|
|
struct btrfs_disk_key disk_key;
|
|
|
|
struct btrfs_super_block *disk_super;
|
|
|
|
u64 features;
|
|
|
|
u64 objectid = 0;
|
|
|
|
u64 dir_id;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (copy_from_user(&objectid, argp, sizeof(objectid)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!objectid)
|
|
|
|
objectid = root->root_key.objectid;
|
|
|
|
|
|
|
|
location.objectid = objectid;
|
|
|
|
location.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
location.offset = (u64)-1;
|
|
|
|
|
|
|
|
new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
|
|
|
|
if (IS_ERR(new_root))
|
|
|
|
return PTR_ERR(new_root);
|
|
|
|
|
|
|
|
if (btrfs_root_refs(&new_root->root_item) == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
path->leave_spinning = 1;
|
|
|
|
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
if (!trans) {
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
|
|
|
di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
|
|
|
|
dir_id, "default", 7, 1);
|
|
|
|
if (!di) {
|
|
|
|
btrfs_free_path(path);
|
|
|
|
btrfs_end_transaction(trans, root);
|
|
|
|
printk(KERN_ERR "Umm, you don't have the default dir item, "
|
|
|
|
"this isn't going to work\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
|
|
|
|
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
|
|
|
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
|
|
|
btrfs_free_path(path);
|
|
|
|
|
|
|
|
disk_super = &root->fs_info->super_copy;
|
|
|
|
features = btrfs_super_incompat_flags(disk_super);
|
|
|
|
if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) {
|
|
|
|
features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL;
|
|
|
|
btrfs_set_super_incompat_flags(disk_super, features);
|
|
|
|
}
|
|
|
|
btrfs_end_transaction(trans, root);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-01-14 02:19:06 +08:00
|
|
|
long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
|
|
|
{
|
|
|
|
struct btrfs_ioctl_space_args space_args;
|
|
|
|
struct btrfs_ioctl_space_info space;
|
|
|
|
struct btrfs_ioctl_space_info *dest;
|
2010-03-17 03:40:10 +08:00
|
|
|
struct btrfs_ioctl_space_info *dest_orig;
|
|
|
|
struct btrfs_ioctl_space_info *user_dest;
|
2010-01-14 02:19:06 +08:00
|
|
|
struct btrfs_space_info *info;
|
2010-03-17 03:40:10 +08:00
|
|
|
int alloc_size;
|
2010-01-14 02:19:06 +08:00
|
|
|
int ret = 0;
|
2010-03-17 03:40:10 +08:00
|
|
|
int slot_count = 0;
|
2010-01-14 02:19:06 +08:00
|
|
|
|
|
|
|
if (copy_from_user(&space_args,
|
|
|
|
(struct btrfs_ioctl_space_args __user *)arg,
|
|
|
|
sizeof(space_args)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2010-03-17 03:40:10 +08:00
|
|
|
/* first we count slots */
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(info, &root->fs_info->space_info, list)
|
|
|
|
slot_count++;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* space_slots == 0 means they are asking for a count */
|
|
|
|
if (space_args.space_slots == 0) {
|
|
|
|
space_args.total_spaces = slot_count;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
alloc_size = sizeof(*dest) * slot_count;
|
|
|
|
/* we generally have at most 6 or so space infos, one for each raid
|
|
|
|
* level. So, a whole page should be more than enough for everyone
|
|
|
|
*/
|
|
|
|
if (alloc_size > PAGE_CACHE_SIZE)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-01-14 02:19:06 +08:00
|
|
|
space_args.total_spaces = 0;
|
2010-03-17 03:40:10 +08:00
|
|
|
dest = kmalloc(alloc_size, GFP_NOFS);
|
|
|
|
if (!dest)
|
|
|
|
return -ENOMEM;
|
|
|
|
dest_orig = dest;
|
2010-01-14 02:19:06 +08:00
|
|
|
|
2010-03-17 03:40:10 +08:00
|
|
|
/* now we have a buffer to copy into */
|
2010-01-14 02:19:06 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(info, &root->fs_info->space_info, list) {
|
2010-03-17 03:40:10 +08:00
|
|
|
/* make sure we don't copy more than we allocated
|
|
|
|
* in our buffer
|
|
|
|
*/
|
|
|
|
if (slot_count == 0)
|
|
|
|
break;
|
|
|
|
slot_count--;
|
|
|
|
|
|
|
|
/* make sure userland has enough room in their buffer */
|
2010-01-14 02:19:06 +08:00
|
|
|
if (space_args.total_spaces >= space_args.space_slots)
|
|
|
|
break;
|
2010-03-17 03:40:10 +08:00
|
|
|
|
2010-01-14 02:19:06 +08:00
|
|
|
space.flags = info->flags;
|
|
|
|
space.total_bytes = info->total_bytes;
|
|
|
|
space.used_bytes = info->bytes_used;
|
2010-03-17 03:40:10 +08:00
|
|
|
memcpy(dest, &space, sizeof(space));
|
2010-01-14 02:19:06 +08:00
|
|
|
dest++;
|
|
|
|
space_args.total_spaces++;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2010-03-17 03:40:10 +08:00
|
|
|
user_dest = (struct btrfs_ioctl_space_info *)
|
|
|
|
(arg + sizeof(struct btrfs_ioctl_space_args));
|
|
|
|
|
|
|
|
if (copy_to_user(user_dest, dest_orig, alloc_size))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kfree(dest_orig);
|
|
|
|
out:
|
|
|
|
if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
|
2010-01-14 02:19:06 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-06-12 09:53:53 +08:00
|
|
|
/*
|
|
|
|
* there are many ways the trans_start and trans_end ioctls can lead
|
|
|
|
* to deadlocks. They should only be used by applications that
|
|
|
|
* basically own the machine, and have a very in depth understanding
|
|
|
|
* of all the possible deadlocks and enospc problems.
|
|
|
|
*/
|
|
|
|
long btrfs_ioctl_trans_end(struct file *file)
|
|
|
|
{
|
|
|
|
struct inode *inode = fdentry(file)->d_inode;
|
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
|
|
|
|
trans = file->private_data;
|
2009-09-30 06:38:44 +08:00
|
|
|
if (!trans)
|
|
|
|
return -EINVAL;
|
2008-09-06 04:43:31 +08:00
|
|
|
file->private_data = NULL;
|
2008-08-04 22:41:27 +08:00
|
|
|
|
2009-09-30 06:38:44 +08:00
|
|
|
btrfs_end_transaction(trans, root);
|
|
|
|
|
2008-08-04 22:41:27 +08:00
|
|
|
mutex_lock(&root->fs_info->trans_mutex);
|
|
|
|
root->fs_info->open_ioctl_trans--;
|
|
|
|
mutex_unlock(&root->fs_info->trans_mutex);
|
|
|
|
|
2008-12-12 05:30:06 +08:00
|
|
|
mnt_drop_write(file->f_path.mnt);
|
2009-09-30 06:38:44 +08:00
|
|
|
return 0;
|
2008-06-12 09:53:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
long btrfs_ioctl(struct file *file, unsigned int
|
|
|
|
cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
2008-12-02 19:36:08 +08:00
|
|
|
void __user *argp = (void __user *)arg;
|
2008-06-12 09:53:53 +08:00
|
|
|
|
|
|
|
switch (cmd) {
|
2009-04-17 16:37:41 +08:00
|
|
|
case FS_IOC_GETFLAGS:
|
|
|
|
return btrfs_ioctl_getflags(file, argp);
|
|
|
|
case FS_IOC_SETFLAGS:
|
|
|
|
return btrfs_ioctl_setflags(file, argp);
|
|
|
|
case FS_IOC_GETVERSION:
|
|
|
|
return btrfs_ioctl_getversion(file, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_SNAP_CREATE:
|
2008-12-02 19:36:08 +08:00
|
|
|
return btrfs_ioctl_snap_create(file, argp, 0);
|
2008-11-18 10:02:50 +08:00
|
|
|
case BTRFS_IOC_SUBVOL_CREATE:
|
2008-12-02 19:36:08 +08:00
|
|
|
return btrfs_ioctl_snap_create(file, argp, 1);
|
2009-09-22 04:00:26 +08:00
|
|
|
case BTRFS_IOC_SNAP_DESTROY:
|
|
|
|
return btrfs_ioctl_snap_destroy(file, argp);
|
2009-12-12 05:11:29 +08:00
|
|
|
case BTRFS_IOC_DEFAULT_SUBVOL:
|
|
|
|
return btrfs_ioctl_default_subvol(file, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_DEFRAG:
|
2010-03-11 22:42:04 +08:00
|
|
|
return btrfs_ioctl_defrag(file, NULL);
|
|
|
|
case BTRFS_IOC_DEFRAG_RANGE:
|
|
|
|
return btrfs_ioctl_defrag(file, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_RESIZE:
|
2008-12-02 19:36:08 +08:00
|
|
|
return btrfs_ioctl_resize(root, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_ADD_DEV:
|
2008-12-02 19:36:08 +08:00
|
|
|
return btrfs_ioctl_add_dev(root, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_RM_DEV:
|
2008-12-02 19:36:08 +08:00
|
|
|
return btrfs_ioctl_rm_dev(root, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_BALANCE:
|
|
|
|
return btrfs_balance(root->fs_info->dev_root);
|
|
|
|
case BTRFS_IOC_CLONE:
|
2008-11-13 03:32:25 +08:00
|
|
|
return btrfs_ioctl_clone(file, arg, 0, 0, 0);
|
|
|
|
case BTRFS_IOC_CLONE_RANGE:
|
2008-12-02 22:52:24 +08:00
|
|
|
return btrfs_ioctl_clone_range(file, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_TRANS_START:
|
|
|
|
return btrfs_ioctl_trans_start(file);
|
|
|
|
case BTRFS_IOC_TRANS_END:
|
|
|
|
return btrfs_ioctl_trans_end(file);
|
2010-03-01 04:39:26 +08:00
|
|
|
case BTRFS_IOC_TREE_SEARCH:
|
|
|
|
return btrfs_ioctl_tree_search(file, argp);
|
|
|
|
case BTRFS_IOC_INO_LOOKUP:
|
|
|
|
return btrfs_ioctl_ino_lookup(file, argp);
|
2010-01-14 02:19:06 +08:00
|
|
|
case BTRFS_IOC_SPACE_INFO:
|
|
|
|
return btrfs_ioctl_space_info(root, argp);
|
2008-06-12 09:53:53 +08:00
|
|
|
case BTRFS_IOC_SYNC:
|
|
|
|
btrfs_sync_fs(file->f_dentry->d_sb, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|