|
|
@ -155,15 +155,15 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
|
|
|
|
static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
old_nd->path.dentry = mnt->mnt_mountpoint;
|
|
|
|
old_path->dentry = mnt->mnt_mountpoint;
|
|
|
|
old_nd->path.mnt = mnt->mnt_parent;
|
|
|
|
old_path->mnt = mnt->mnt_parent;
|
|
|
|
mnt->mnt_parent = mnt;
|
|
|
|
mnt->mnt_parent = mnt;
|
|
|
|
mnt->mnt_mountpoint = mnt->mnt_root;
|
|
|
|
mnt->mnt_mountpoint = mnt->mnt_root;
|
|
|
|
list_del_init(&mnt->mnt_child);
|
|
|
|
list_del_init(&mnt->mnt_child);
|
|
|
|
list_del_init(&mnt->mnt_hash);
|
|
|
|
list_del_init(&mnt->mnt_hash);
|
|
|
|
old_nd->path.dentry->d_mounted--;
|
|
|
|
old_path->dentry->d_mounted--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
|
|
|
|
void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
|
|
|
@ -174,12 +174,12 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
|
|
|
|
dentry->d_mounted++;
|
|
|
|
dentry->d_mounted++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
|
|
|
|
static void attach_mnt(struct vfsmount *mnt, struct path *path)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
mnt_set_mountpoint(nd->path.mnt, nd->path.dentry, mnt);
|
|
|
|
mnt_set_mountpoint(path->mnt, path->dentry, mnt);
|
|
|
|
list_add_tail(&mnt->mnt_hash, mount_hashtable +
|
|
|
|
list_add_tail(&mnt->mnt_hash, mount_hashtable +
|
|
|
|
hash(nd->path.mnt, nd->path.dentry));
|
|
|
|
hash(path->mnt, path->dentry));
|
|
|
|
list_add_tail(&mnt->mnt_child, &nd->path.mnt->mnt_mounts);
|
|
|
|
list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
@ -262,10 +262,8 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
|
|
|
|
/* stick the duplicate mount on the same expiry list
|
|
|
|
/* stick the duplicate mount on the same expiry list
|
|
|
|
* as the original if that was on one */
|
|
|
|
* as the original if that was on one */
|
|
|
|
if (flag & CL_EXPIRE) {
|
|
|
|
if (flag & CL_EXPIRE) {
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
if (!list_empty(&old->mnt_expire))
|
|
|
|
if (!list_empty(&old->mnt_expire))
|
|
|
|
list_add(&mnt->mnt_expire, &old->mnt_expire);
|
|
|
|
list_add(&mnt->mnt_expire, &old->mnt_expire);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mnt;
|
|
|
|
return mnt;
|
|
|
@ -548,6 +546,7 @@ void release_mounts(struct list_head *head)
|
|
|
|
m = mnt->mnt_parent;
|
|
|
|
m = mnt->mnt_parent;
|
|
|
|
mnt->mnt_mountpoint = mnt->mnt_root;
|
|
|
|
mnt->mnt_mountpoint = mnt->mnt_root;
|
|
|
|
mnt->mnt_parent = mnt;
|
|
|
|
mnt->mnt_parent = mnt;
|
|
|
|
|
|
|
|
m->mnt_ghosts--;
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
dput(dentry);
|
|
|
|
dput(dentry);
|
|
|
|
mntput(m);
|
|
|
|
mntput(m);
|
|
|
@ -572,12 +571,16 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
|
|
|
|
__touch_mnt_namespace(p->mnt_ns);
|
|
|
|
__touch_mnt_namespace(p->mnt_ns);
|
|
|
|
p->mnt_ns = NULL;
|
|
|
|
p->mnt_ns = NULL;
|
|
|
|
list_del_init(&p->mnt_child);
|
|
|
|
list_del_init(&p->mnt_child);
|
|
|
|
if (p->mnt_parent != p)
|
|
|
|
if (p->mnt_parent != p) {
|
|
|
|
|
|
|
|
p->mnt_parent->mnt_ghosts++;
|
|
|
|
p->mnt_mountpoint->d_mounted--;
|
|
|
|
p->mnt_mountpoint->d_mounted--;
|
|
|
|
|
|
|
|
}
|
|
|
|
change_mnt_propagation(p, MS_PRIVATE);
|
|
|
|
change_mnt_propagation(p, MS_PRIVATE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
|
|
|
|
|
|
|
|
|
|
|
|
static int do_umount(struct vfsmount *mnt, int flags)
|
|
|
|
static int do_umount(struct vfsmount *mnt, int flags)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
struct super_block *sb = mnt->mnt_sb;
|
|
|
|
struct super_block *sb = mnt->mnt_sb;
|
|
|
@ -650,6 +653,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
event++;
|
|
|
|
event++;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!(flags & MNT_DETACH))
|
|
|
|
|
|
|
|
shrink_submounts(mnt, &umount_list);
|
|
|
|
|
|
|
|
|
|
|
|
retval = -EBUSY;
|
|
|
|
retval = -EBUSY;
|
|
|
|
if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
|
|
|
|
if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
|
|
|
|
if (!list_empty(&mnt->mnt_list))
|
|
|
|
if (!list_empty(&mnt->mnt_list))
|
|
|
@ -744,7 +750,7 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
|
|
|
|
int flag)
|
|
|
|
int flag)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
struct vfsmount *res, *p, *q, *r, *s;
|
|
|
|
struct vfsmount *res, *p, *q, *r, *s;
|
|
|
|
struct nameidata nd;
|
|
|
|
struct path path;
|
|
|
|
|
|
|
|
|
|
|
|
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
|
|
|
|
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
|
|
|
|
return NULL;
|
|
|
|
return NULL;
|
|
|
@ -769,14 +775,14 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
|
|
|
|
q = q->mnt_parent;
|
|
|
|
q = q->mnt_parent;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p = s;
|
|
|
|
p = s;
|
|
|
|
nd.path.mnt = q;
|
|
|
|
path.mnt = q;
|
|
|
|
nd.path.dentry = p->mnt_mountpoint;
|
|
|
|
path.dentry = p->mnt_mountpoint;
|
|
|
|
q = clone_mnt(p, p->mnt_root, flag);
|
|
|
|
q = clone_mnt(p, p->mnt_root, flag);
|
|
|
|
if (!q)
|
|
|
|
if (!q)
|
|
|
|
goto Enomem;
|
|
|
|
goto Enomem;
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
list_add_tail(&q->mnt_list, &res->mnt_list);
|
|
|
|
list_add_tail(&q->mnt_list, &res->mnt_list);
|
|
|
|
attach_mnt(q, &nd);
|
|
|
|
attach_mnt(q, &path);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -876,11 +882,11 @@ void drop_collected_mounts(struct vfsmount *mnt)
|
|
|
|
* in allocations.
|
|
|
|
* in allocations.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
static int attach_recursive_mnt(struct vfsmount *source_mnt,
|
|
|
|
static int attach_recursive_mnt(struct vfsmount *source_mnt,
|
|
|
|
struct nameidata *nd, struct nameidata *parent_nd)
|
|
|
|
struct path *path, struct path *parent_path)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
LIST_HEAD(tree_list);
|
|
|
|
LIST_HEAD(tree_list);
|
|
|
|
struct vfsmount *dest_mnt = nd->path.mnt;
|
|
|
|
struct vfsmount *dest_mnt = path->mnt;
|
|
|
|
struct dentry *dest_dentry = nd->path.dentry;
|
|
|
|
struct dentry *dest_dentry = path->dentry;
|
|
|
|
struct vfsmount *child, *p;
|
|
|
|
struct vfsmount *child, *p;
|
|
|
|
|
|
|
|
|
|
|
|
if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
|
|
|
|
if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
|
|
|
@ -892,9 +898,9 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
if (parent_nd) {
|
|
|
|
if (parent_path) {
|
|
|
|
detach_mnt(source_mnt, parent_nd);
|
|
|
|
detach_mnt(source_mnt, parent_path);
|
|
|
|
attach_mnt(source_mnt, nd);
|
|
|
|
attach_mnt(source_mnt, path);
|
|
|
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
|
|
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
|
|
|
|
mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
|
|
|
@ -930,7 +936,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
|
|
|
|
|
|
|
|
|
|
|
|
err = -ENOENT;
|
|
|
|
err = -ENOENT;
|
|
|
|
if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry))
|
|
|
|
if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry))
|
|
|
|
err = attach_recursive_mnt(mnt, nd, NULL);
|
|
|
|
err = attach_recursive_mnt(mnt, &nd->path, NULL);
|
|
|
|
out_unlock:
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
|
|
|
|
mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
|
|
|
|
if (!err)
|
|
|
|
if (!err)
|
|
|
@ -1059,7 +1065,8 @@ static inline int tree_contains_unbindable(struct vfsmount *mnt)
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
static noinline int do_move_mount(struct nameidata *nd, char *old_name)
|
|
|
|
static noinline int do_move_mount(struct nameidata *nd, char *old_name)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
struct nameidata old_nd, parent_nd;
|
|
|
|
struct nameidata old_nd;
|
|
|
|
|
|
|
|
struct path parent_path;
|
|
|
|
struct vfsmount *p;
|
|
|
|
struct vfsmount *p;
|
|
|
|
int err = 0;
|
|
|
|
int err = 0;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
@ -1114,21 +1121,19 @@ static noinline int do_move_mount(struct nameidata *nd, char *old_name)
|
|
|
|
if (p == old_nd.path.mnt)
|
|
|
|
if (p == old_nd.path.mnt)
|
|
|
|
goto out1;
|
|
|
|
goto out1;
|
|
|
|
|
|
|
|
|
|
|
|
err = attach_recursive_mnt(old_nd.path.mnt, nd, &parent_nd);
|
|
|
|
err = attach_recursive_mnt(old_nd.path.mnt, &nd->path, &parent_path);
|
|
|
|
if (err)
|
|
|
|
if (err)
|
|
|
|
goto out1;
|
|
|
|
goto out1;
|
|
|
|
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
/* if the mount is moved, it should no longer be expire
|
|
|
|
/* if the mount is moved, it should no longer be expire
|
|
|
|
* automatically */
|
|
|
|
* automatically */
|
|
|
|
list_del_init(&old_nd.path.mnt->mnt_expire);
|
|
|
|
list_del_init(&old_nd.path.mnt->mnt_expire);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
out1:
|
|
|
|
out1:
|
|
|
|
mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
|
|
|
|
mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
|
|
|
|
out:
|
|
|
|
out:
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
if (!err)
|
|
|
|
if (!err)
|
|
|
|
path_put(&parent_nd.path);
|
|
|
|
path_put(&parent_path);
|
|
|
|
path_put(&old_nd.path);
|
|
|
|
path_put(&old_nd.path);
|
|
|
|
return err;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1189,12 +1194,9 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
|
|
|
|
if ((err = graft_tree(newmnt, nd)))
|
|
|
|
if ((err = graft_tree(newmnt, nd)))
|
|
|
|
goto unlock;
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
|
|
|
|
if (fslist) {
|
|
|
|
if (fslist) /* add to the specified expiration list */
|
|
|
|
/* add to the specified expiration list */
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
list_add_tail(&newmnt->mnt_expire, fslist);
|
|
|
|
list_add_tail(&newmnt->mnt_expire, fslist);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
return 0;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
|
@ -1206,75 +1208,6 @@ unlock:
|
|
|
|
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(do_add_mount);
|
|
|
|
EXPORT_SYMBOL_GPL(do_add_mount);
|
|
|
|
|
|
|
|
|
|
|
|
static void expire_mount(struct vfsmount *mnt, struct list_head *mounts,
|
|
|
|
|
|
|
|
struct list_head *umounts)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
* Check if mount is still attached, if not, let whoever holds it deal
|
|
|
|
|
|
|
|
* with the sucker
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (mnt->mnt_parent == mnt) {
|
|
|
|
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
* Check that it is still dead: the count should now be 2 - as
|
|
|
|
|
|
|
|
* contributed by the vfsmount parent and the mntget above
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!propagate_mount_busy(mnt, 2)) {
|
|
|
|
|
|
|
|
/* delete from the namespace */
|
|
|
|
|
|
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
|
|
|
|
|
|
list_del_init(&mnt->mnt_list);
|
|
|
|
|
|
|
|
mnt->mnt_ns = NULL;
|
|
|
|
|
|
|
|
umount_tree(mnt, 1, umounts);
|
|
|
|
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
* Someone brought it back to life whilst we didn't have any
|
|
|
|
|
|
|
|
* locks held so return it to the expiration list
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
list_add_tail(&mnt->mnt_expire, mounts);
|
|
|
|
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
* go through the vfsmounts we've just consigned to the graveyard to
|
|
|
|
|
|
|
|
* - check that they're still dead
|
|
|
|
|
|
|
|
* - delete the vfsmount from the appropriate namespace under lock
|
|
|
|
|
|
|
|
* - dispose of the corpse
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void expire_mount_list(struct list_head *graveyard, struct list_head *mounts)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
struct mnt_namespace *ns;
|
|
|
|
|
|
|
|
struct vfsmount *mnt;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while (!list_empty(graveyard)) {
|
|
|
|
|
|
|
|
LIST_HEAD(umounts);
|
|
|
|
|
|
|
|
mnt = list_first_entry(graveyard, struct vfsmount, mnt_expire);
|
|
|
|
|
|
|
|
list_del_init(&mnt->mnt_expire);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* don't do anything if the namespace is dead - all the
|
|
|
|
|
|
|
|
* vfsmounts from it are going away anyway */
|
|
|
|
|
|
|
|
ns = mnt->mnt_ns;
|
|
|
|
|
|
|
|
if (!ns || !ns->root)
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
get_mnt_ns(ns);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
down_write(&namespace_sem);
|
|
|
|
|
|
|
|
expire_mount(mnt, mounts, &umounts);
|
|
|
|
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
|
|
|
|
release_mounts(&umounts);
|
|
|
|
|
|
|
|
mntput(mnt);
|
|
|
|
|
|
|
|
put_mnt_ns(ns);
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
|
* process a list of expirable mountpoints with the intent of discarding any
|
|
|
|
* process a list of expirable mountpoints with the intent of discarding any
|
|
|
|
* mountpoints that aren't in use and haven't been touched since last we came
|
|
|
|
* mountpoints that aren't in use and haven't been touched since last we came
|
|
|
@ -1284,10 +1217,12 @@ void mark_mounts_for_expiry(struct list_head *mounts)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
struct vfsmount *mnt, *next;
|
|
|
|
struct vfsmount *mnt, *next;
|
|
|
|
LIST_HEAD(graveyard);
|
|
|
|
LIST_HEAD(graveyard);
|
|
|
|
|
|
|
|
LIST_HEAD(umounts);
|
|
|
|
|
|
|
|
|
|
|
|
if (list_empty(mounts))
|
|
|
|
if (list_empty(mounts))
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
down_write(&namespace_sem);
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
|
|
|
|
/* extract from the expiration list every vfsmount that matches the
|
|
|
|
/* extract from the expiration list every vfsmount that matches the
|
|
|
@ -1298,16 +1233,19 @@ void mark_mounts_for_expiry(struct list_head *mounts)
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
|
|
|
|
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
|
|
|
|
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
|
|
|
|
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
|
|
|
|
atomic_read(&mnt->mnt_count) != 1)
|
|
|
|
propagate_mount_busy(mnt, 1))
|
|
|
|
continue;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
|
|
mntget(mnt);
|
|
|
|
|
|
|
|
list_move(&mnt->mnt_expire, &graveyard);
|
|
|
|
list_move(&mnt->mnt_expire, &graveyard);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!list_empty(&graveyard)) {
|
|
|
|
expire_mount_list(&graveyard, mounts);
|
|
|
|
mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
|
|
|
|
|
|
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
|
|
|
|
|
|
umount_tree(mnt, 1, &umounts);
|
|
|
|
|
|
|
|
}
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
release_mounts(&umounts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
|
|
|
|
EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
|
|
|
@ -1343,7 +1281,6 @@ resume:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!propagate_mount_busy(mnt, 1)) {
|
|
|
|
if (!propagate_mount_busy(mnt, 1)) {
|
|
|
|
mntget(mnt);
|
|
|
|
|
|
|
|
list_move_tail(&mnt->mnt_expire, graveyard);
|
|
|
|
list_move_tail(&mnt->mnt_expire, graveyard);
|
|
|
|
found++;
|
|
|
|
found++;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1363,22 +1300,22 @@ resume:
|
|
|
|
* process a list of expirable mountpoints with the intent of discarding any
|
|
|
|
* process a list of expirable mountpoints with the intent of discarding any
|
|
|
|
* submounts of a specific parent mountpoint
|
|
|
|
* submounts of a specific parent mountpoint
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
void shrink_submounts(struct vfsmount *mountpoint, struct list_head *mounts)
|
|
|
|
static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
LIST_HEAD(graveyard);
|
|
|
|
LIST_HEAD(graveyard);
|
|
|
|
int found;
|
|
|
|
struct vfsmount *m;
|
|
|
|
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* extract submounts of 'mountpoint' from the expiration list */
|
|
|
|
/* extract submounts of 'mountpoint' from the expiration list */
|
|
|
|
while ((found = select_submounts(mountpoint, &graveyard)) != 0)
|
|
|
|
while (select_submounts(mnt, &graveyard)) {
|
|
|
|
expire_mount_list(&graveyard, mounts);
|
|
|
|
while (!list_empty(&graveyard)) {
|
|
|
|
|
|
|
|
m = list_first_entry(&graveyard, struct vfsmount,
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
mnt_expire);
|
|
|
|
|
|
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
|
|
|
|
|
|
umount_tree(mnt, 1, umounts);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(shrink_submounts);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
|
* Some copy_from_user() implementations do not return the exact number of
|
|
|
|
* Some copy_from_user() implementations do not return the exact number of
|
|
|
|
* bytes remaining to copy on a fault. But copy_mount_options() requires that.
|
|
|
|
* bytes remaining to copy on a fault. But copy_mount_options() requires that.
|
|
|
@ -1683,7 +1620,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
|
|
|
|
path_put(&old_pwd);
|
|
|
|
path_put(&old_pwd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
|
|
|
|
static void chroot_fs_refs(struct path *old_root, struct path *new_root)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
|
|
|
struct task_struct *g, *p;
|
|
|
|
struct fs_struct *fs;
|
|
|
|
struct fs_struct *fs;
|
|
|
@ -1695,12 +1632,12 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
|
|
|
|
if (fs) {
|
|
|
|
if (fs) {
|
|
|
|
atomic_inc(&fs->count);
|
|
|
|
atomic_inc(&fs->count);
|
|
|
|
task_unlock(p);
|
|
|
|
task_unlock(p);
|
|
|
|
if (fs->root.dentry == old_nd->path.dentry
|
|
|
|
if (fs->root.dentry == old_root->dentry
|
|
|
|
&& fs->root.mnt == old_nd->path.mnt)
|
|
|
|
&& fs->root.mnt == old_root->mnt)
|
|
|
|
set_fs_root(fs, &new_nd->path);
|
|
|
|
set_fs_root(fs, new_root);
|
|
|
|
if (fs->pwd.dentry == old_nd->path.dentry
|
|
|
|
if (fs->pwd.dentry == old_root->dentry
|
|
|
|
&& fs->pwd.mnt == old_nd->path.mnt)
|
|
|
|
&& fs->pwd.mnt == old_root->mnt)
|
|
|
|
set_fs_pwd(fs, &new_nd->path);
|
|
|
|
set_fs_pwd(fs, new_root);
|
|
|
|
put_fs_struct(fs);
|
|
|
|
put_fs_struct(fs);
|
|
|
|
} else
|
|
|
|
} else
|
|
|
|
task_unlock(p);
|
|
|
|
task_unlock(p);
|
|
|
@ -1737,7 +1674,8 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
|
|
|
|
const char __user * put_old)
|
|
|
|
const char __user * put_old)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
struct vfsmount *tmp;
|
|
|
|
struct vfsmount *tmp;
|
|
|
|
struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
|
|
|
|
struct nameidata new_nd, old_nd, user_nd;
|
|
|
|
|
|
|
|
struct path parent_path, root_parent;
|
|
|
|
int error;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
@ -1811,19 +1749,19 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
|
|
|
|
goto out3;
|
|
|
|
goto out3;
|
|
|
|
} else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
|
|
|
|
} else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
|
|
|
|
goto out3;
|
|
|
|
goto out3;
|
|
|
|
detach_mnt(new_nd.path.mnt, &parent_nd);
|
|
|
|
detach_mnt(new_nd.path.mnt, &parent_path);
|
|
|
|
detach_mnt(user_nd.path.mnt, &root_parent);
|
|
|
|
detach_mnt(user_nd.path.mnt, &root_parent);
|
|
|
|
/* mount old root on put_old */
|
|
|
|
/* mount old root on put_old */
|
|
|
|
attach_mnt(user_nd.path.mnt, &old_nd);
|
|
|
|
attach_mnt(user_nd.path.mnt, &old_nd.path);
|
|
|
|
/* mount new_root on / */
|
|
|
|
/* mount new_root on / */
|
|
|
|
attach_mnt(new_nd.path.mnt, &root_parent);
|
|
|
|
attach_mnt(new_nd.path.mnt, &root_parent);
|
|
|
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
|
|
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
chroot_fs_refs(&user_nd, &new_nd);
|
|
|
|
chroot_fs_refs(&user_nd.path, &new_nd.path);
|
|
|
|
security_sb_post_pivotroot(&user_nd, &new_nd);
|
|
|
|
security_sb_post_pivotroot(&user_nd, &new_nd);
|
|
|
|
error = 0;
|
|
|
|
error = 0;
|
|
|
|
path_put(&root_parent.path);
|
|
|
|
path_put(&root_parent);
|
|
|
|
path_put(&parent_nd.path);
|
|
|
|
path_put(&parent_path);
|
|
|
|
out2:
|
|
|
|
out2:
|
|
|
|
mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
|
|
|
|
mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
|
|
|
|
up_write(&namespace_sem);
|
|
|
|
up_write(&namespace_sem);
|
|
|
|