mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 12:43:55 +08:00
1064f874ab
Ever since mount propagation was introduced in cases where a mount in
propagated to parent mount mountpoint pair that is already in use the
code has placed the new mount behind the old mount in the mount hash
table.
This implementation detail is problematic as it allows creating
arbitrary length mount hash chains.
Furthermore it invalidates the constraint maintained elsewhere in the
mount code that a parent mount and a mountpoint pair will have exactly
one mount upon them. Making it hard to deal with and to talk about
this special case in the mount code.
Modify mount propagation to notice when there is already a mount at
the parent mount and mountpoint where a new mount is propagating to
and place that preexisting mount on top of the new mount.
Modify unmount propagation to notice when a mount that is being
unmounted has another mount on top of it (and no other children), and
to replace the unmounted mount with the mount on top of it.
Move the MNT_UMUONT test from __lookup_mnt_last into
__propagate_umount as that is the only call of __lookup_mnt_last where
MNT_UMOUNT may be set on any mount visible in the mount hash table.
These modifications allow:
- __lookup_mnt_last to be removed.
- attach_shadows to be renamed __attach_mnt and its shadow
handling to be removed.
- commit_tree to be simplified
- copy_tree to be simplified
The result is an easier to understand tree of mounts that does not
allow creation of arbitrary length hash chains in the mount hash table.
The result is also a very slight userspace visible difference in semantics.
The following two cases now behave identically, where before order
mattered:
case 1: (explicit user action)
B is a slave of A
mount something on A/a , it will propagate to B/a
and than mount something on B/a
case 2: (tucked mount)
B is a slave of A
mount something on B/a
and than mount something on A/a
Histroically umount A/a would fail in case 1 and succeed in case 2.
Now umount A/a succeeds in both configurations.
This very small change in semantics appears if anything to be a bug
fix to me and my survey of userspace leads me to believe that no programs
will notice or care of this subtle semantic change.
v2: Updated to mnt_change_mountpoint to not call dput or mntput
and instead to decrement the counts directly. It is guaranteed
that there will be other references when mnt_change_mountpoint is
called so this is safe.
v3: Moved put_mountpoint under mount_lock in attach_recursive_mnt
As the locking in fs/namespace.c changed between v2 and v3.
v4: Reworked the logic in propagate_mount_busy and __propagate_umount
that detects when a mount completely covers another mount.
v5: Removed unnecessary tests whose result is alwasy true in
find_topper and attach_recursive_mnt.
v6: Document the user space visible semantic difference.
Cc: stable@vger.kernel.org
Fixes: b90fa9ae8f
("[PATCH] shared mount handling: bind and rbind")
Tested-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
59 lines
1.9 KiB
C
59 lines
1.9 KiB
C
/*
|
|
* linux/fs/pnode.h
|
|
*
|
|
* (C) Copyright IBM Corporation 2005.
|
|
* Released under GPL v2.
|
|
*
|
|
*/
|
|
#ifndef _LINUX_PNODE_H
|
|
#define _LINUX_PNODE_H
|
|
|
|
#include <linux/list.h>
|
|
#include "mount.h"
|
|
|
|
#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
|
|
#define IS_MNT_SLAVE(m) ((m)->mnt_master)
|
|
#define IS_MNT_NEW(m) (!(m)->mnt_ns)
|
|
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
|
|
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
|
|
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
|
|
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
|
|
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
|
|
#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
|
|
|
|
#define CL_EXPIRE 0x01
|
|
#define CL_SLAVE 0x02
|
|
#define CL_COPY_UNBINDABLE 0x04
|
|
#define CL_MAKE_SHARED 0x08
|
|
#define CL_PRIVATE 0x10
|
|
#define CL_SHARED_TO_SLAVE 0x20
|
|
#define CL_UNPRIVILEGED 0x40
|
|
#define CL_COPY_MNT_NS_FILE 0x80
|
|
|
|
#define CL_COPY_ALL (CL_COPY_UNBINDABLE | CL_COPY_MNT_NS_FILE)
|
|
|
|
static inline void set_mnt_shared(struct mount *mnt)
|
|
{
|
|
mnt->mnt.mnt_flags &= ~MNT_SHARED_MASK;
|
|
mnt->mnt.mnt_flags |= MNT_SHARED;
|
|
}
|
|
|
|
void change_mnt_propagation(struct mount *, int);
|
|
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
|
|
struct hlist_head *);
|
|
int propagate_umount(struct list_head *);
|
|
int propagate_mount_busy(struct mount *, int);
|
|
void propagate_mount_unlock(struct mount *);
|
|
void mnt_release_group_id(struct mount *);
|
|
int get_dominating_id(struct mount *mnt, const struct path *root);
|
|
unsigned int mnt_get_count(struct mount *mnt);
|
|
void mnt_set_mountpoint(struct mount *, struct mountpoint *,
|
|
struct mount *);
|
|
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
|
|
struct mount *mnt);
|
|
struct mount *copy_tree(struct mount *, struct dentry *, int);
|
|
bool is_path_reachable(struct mount *, struct dentry *,
|
|
const struct path *root);
|
|
int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
|
|
#endif /* _LINUX_PNODE_H */
|