mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
7d6beb71da
-----BEGIN PGP SIGNATURE-----
iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCYCegywAKCRCRxhvAZXjc
ouJ6AQDlf+7jCQlQdeKKoN9QDFfMzG1ooemat36EpRRTONaGuAD8D9A4sUsG4+5f
4IU5Lj9oY4DEmF8HenbWK2ZHsesL2Qg=
=yPaw
-----END PGP SIGNATURE-----
Merge tag 'idmapped-mounts-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux
Pull idmapped mounts from Christian Brauner:
"This introduces idmapped mounts which has been in the making for some
time. Simply put, different mounts can expose the same file or
directory with different ownership. This initial implementation comes
with ports for fat, ext4 and with Christoph's port for xfs with more
filesystems being actively worked on by independent people and
maintainers.
Idmapping mounts handle a wide range of long standing use-cases. Here
are just a few:
- Idmapped mounts make it possible to easily share files between
multiple users or multiple machines especially in complex
scenarios. For example, idmapped mounts will be used in the
implementation of portable home directories in
systemd-homed.service(8) where they allow users to move their home
directory to an external storage device and use it on multiple
computers where they are assigned different uids and gids. This
effectively makes it possible to assign random uids and gids at
login time.
- It is possible to share files from the host with unprivileged
containers without having to change ownership permanently through
chown(2).
- It is possible to idmap a container's rootfs and without having to
mangle every file. For example, Chromebooks use it to share the
user's Download folder with their unprivileged containers in their
Linux subsystem.
- It is possible to share files between containers with
non-overlapping idmappings.
- Filesystem that lack a proper concept of ownership such as fat can
use idmapped mounts to implement discretionary access (DAC)
permission checking.
- They allow users to efficiently changing ownership on a per-mount
basis without having to (recursively) chown(2) all files. In
contrast to chown (2) changing ownership of large sets of files is
instantenous with idmapped mounts. This is especially useful when
ownership of a whole root filesystem of a virtual machine or
container is changed. With idmapped mounts a single syscall
mount_setattr syscall will be sufficient to change the ownership of
all files.
- Idmapped mounts always take the current ownership into account as
idmappings specify what a given uid or gid is supposed to be mapped
to. This contrasts with the chown(2) syscall which cannot by itself
take the current ownership of the files it changes into account. It
simply changes the ownership to the specified uid and gid. This is
especially problematic when recursively chown(2)ing a large set of
files which is commong with the aforementioned portable home
directory and container and vm scenario.
- Idmapped mounts allow to change ownership locally, restricting it
to specific mounts, and temporarily as the ownership changes only
apply as long as the mount exists.
Several userspace projects have either already put up patches and
pull-requests for this feature or will do so should you decide to pull
this:
- systemd: In a wide variety of scenarios but especially right away
in their implementation of portable home directories.
https://systemd.io/HOME_DIRECTORY/
- container runtimes: containerd, runC, LXD:To share data between
host and unprivileged containers, unprivileged and privileged
containers, etc. The pull request for idmapped mounts support in
containerd, the default Kubernetes runtime is already up for quite
a while now: https://github.com/containerd/containerd/pull/4734
- The virtio-fs developers and several users have expressed interest
in using this feature with virtual machines once virtio-fs is
ported.
- ChromeOS: Sharing host-directories with unprivileged containers.
I've tightly synced with all those projects and all of those listed
here have also expressed their need/desire for this feature on the
mailing list. For more info on how people use this there's a bunch of
talks about this too. Here's just two recent ones:
https://www.cncf.io/wp-content/uploads/2020/12/Rootless-Containers-in-Gitpod.pdf
https://fosdem.org/2021/schedule/event/containers_idmap/
This comes with an extensive xfstests suite covering both ext4 and
xfs:
https://git.kernel.org/brauner/xfstests-dev/h/idmapped_mounts
It covers truncation, creation, opening, xattrs, vfscaps, setid
execution, setgid inheritance and more both with idmapped and
non-idmapped mounts. It already helped to discover an unrelated xfs
setgid inheritance bug which has since been fixed in mainline. It will
be sent for inclusion with the xfstests project should you decide to
merge this.
In order to support per-mount idmappings vfsmounts are marked with
user namespaces. The idmapping of the user namespace will be used to
map the ids of vfs objects when they are accessed through that mount.
By default all vfsmounts are marked with the initial user namespace.
The initial user namespace is used to indicate that a mount is not
idmapped. All operations behave as before and this is verified in the
testsuite.
Based on prior discussions we want to attach the whole user namespace
and not just a dedicated idmapping struct. This allows us to reuse all
the helpers that already exist for dealing with idmappings instead of
introducing a whole new range of helpers. In addition, if we decide in
the future that we are confident enough to enable unprivileged users
to setup idmapped mounts the permission checking can take into account
whether the caller is privileged in the user namespace the mount is
currently marked with.
The user namespace the mount will be marked with can be specified by
passing a file descriptor refering to the user namespace as an
argument to the new mount_setattr() syscall together with the new
MOUNT_ATTR_IDMAP flag. The system call follows the openat2() pattern
of extensibility.
The following conditions must be met in order to create an idmapped
mount:
- The caller must currently have the CAP_SYS_ADMIN capability in the
user namespace the underlying filesystem has been mounted in.
- The underlying filesystem must support idmapped mounts.
- The mount must not already be idmapped. This also implies that the
idmapping of a mount cannot be altered once it has been idmapped.
- The mount must be a detached/anonymous mount, i.e. it must have
been created by calling open_tree() with the OPEN_TREE_CLONE flag
and it must not already have been visible in the filesystem.
The last two points guarantee easier semantics for userspace and the
kernel and make the implementation significantly simpler.
By default vfsmounts are marked with the initial user namespace and no
behavioral or performance changes are observed.
The manpage with a detailed description can be found here:
1d7b902e28
In order to support idmapped mounts, filesystems need to be changed
and mark themselves with the FS_ALLOW_IDMAP flag in fs_flags. The
patches to convert individual filesystem are not very large or
complicated overall as can be seen from the included fat, ext4, and
xfs ports. Patches for other filesystems are actively worked on and
will be sent out separately. The xfstestsuite can be used to verify
that port has been done correctly.
The mount_setattr() syscall is motivated independent of the idmapped
mounts patches and it's been around since July 2019. One of the most
valuable features of the new mount api is the ability to perform
mounts based on file descriptors only.
Together with the lookup restrictions available in the openat2()
RESOLVE_* flag namespace which we added in v5.6 this is the first time
we are close to hardened and race-free (e.g. symlinks) mounting and
path resolution.
While userspace has started porting to the new mount api to mount
proper filesystems and create new bind-mounts it is currently not
possible to change mount options of an already existing bind mount in
the new mount api since the mount_setattr() syscall is missing.
With the addition of the mount_setattr() syscall we remove this last
restriction and userspace can now fully port to the new mount api,
covering every use-case the old mount api could. We also add the
crucial ability to recursively change mount options for a whole mount
tree, both removing and adding mount options at the same time. This
syscall has been requested multiple times by various people and
projects.
There is a simple tool available at
https://github.com/brauner/mount-idmapped
that allows to create idmapped mounts so people can play with this
patch series. I'll add support for the regular mount binary should you
decide to pull this in the following weeks:
Here's an example to a simple idmapped mount of another user's home
directory:
u1001@f2-vm:/$ sudo ./mount --idmap both:1000:1001:1 /home/ubuntu/ /mnt
u1001@f2-vm:/$ ls -al /home/ubuntu/
total 28
drwxr-xr-x 2 ubuntu ubuntu 4096 Oct 28 22:07 .
drwxr-xr-x 4 root root 4096 Oct 28 04:00 ..
-rw------- 1 ubuntu ubuntu 3154 Oct 28 22:12 .bash_history
-rw-r--r-- 1 ubuntu ubuntu 220 Feb 25 2020 .bash_logout
-rw-r--r-- 1 ubuntu ubuntu 3771 Feb 25 2020 .bashrc
-rw-r--r-- 1 ubuntu ubuntu 807 Feb 25 2020 .profile
-rw-r--r-- 1 ubuntu ubuntu 0 Oct 16 16:11 .sudo_as_admin_successful
-rw------- 1 ubuntu ubuntu 1144 Oct 28 00:43 .viminfo
u1001@f2-vm:/$ ls -al /mnt/
total 28
drwxr-xr-x 2 u1001 u1001 4096 Oct 28 22:07 .
drwxr-xr-x 29 root root 4096 Oct 28 22:01 ..
-rw------- 1 u1001 u1001 3154 Oct 28 22:12 .bash_history
-rw-r--r-- 1 u1001 u1001 220 Feb 25 2020 .bash_logout
-rw-r--r-- 1 u1001 u1001 3771 Feb 25 2020 .bashrc
-rw-r--r-- 1 u1001 u1001 807 Feb 25 2020 .profile
-rw-r--r-- 1 u1001 u1001 0 Oct 16 16:11 .sudo_as_admin_successful
-rw------- 1 u1001 u1001 1144 Oct 28 00:43 .viminfo
u1001@f2-vm:/$ touch /mnt/my-file
u1001@f2-vm:/$ setfacl -m u:1001:rwx /mnt/my-file
u1001@f2-vm:/$ sudo setcap -n 1001 cap_net_raw+ep /mnt/my-file
u1001@f2-vm:/$ ls -al /mnt/my-file
-rw-rwxr--+ 1 u1001 u1001 0 Oct 28 22:14 /mnt/my-file
u1001@f2-vm:/$ ls -al /home/ubuntu/my-file
-rw-rwxr--+ 1 ubuntu ubuntu 0 Oct 28 22:14 /home/ubuntu/my-file
u1001@f2-vm:/$ getfacl /mnt/my-file
getfacl: Removing leading '/' from absolute path names
# file: mnt/my-file
# owner: u1001
# group: u1001
user::rw-
user:u1001:rwx
group::rw-
mask::rwx
other::r--
u1001@f2-vm:/$ getfacl /home/ubuntu/my-file
getfacl: Removing leading '/' from absolute path names
# file: home/ubuntu/my-file
# owner: ubuntu
# group: ubuntu
user::rw-
user:ubuntu:rwx
group::rw-
mask::rwx
other::r--"
* tag 'idmapped-mounts-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux: (41 commits)
xfs: remove the possibly unused mp variable in xfs_file_compat_ioctl
xfs: support idmapped mounts
ext4: support idmapped mounts
fat: handle idmapped mounts
tests: add mount_setattr() selftests
fs: introduce MOUNT_ATTR_IDMAP
fs: add mount_setattr()
fs: add attr_flags_to_mnt_flags helper
fs: split out functions to hold writers
namespace: only take read lock in do_reconfigure_mnt()
mount: make {lock,unlock}_mount_hash() static
namespace: take lock_mount_hash() directly when changing flags
nfs: do not export idmapped mounts
overlayfs: do not mount on top of idmapped mounts
ecryptfs: do not mount on top of idmapped mounts
ima: handle idmapped mounts
apparmor: handle idmapped mounts
fs: make helpers idmap mount aware
exec: handle idmapped mounts
would_dump: handle idmapped mounts
...
1014 lines
23 KiB
C
1014 lines
23 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
*
|
|
* Copyright (C) 2011 Novell Inc.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/file.h>
|
|
#include <linux/splice.h>
|
|
#include <linux/xattr.h>
|
|
#include <linux/security.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/cred.h>
|
|
#include <linux/namei.h>
|
|
#include <linux/fdtable.h>
|
|
#include <linux/ratelimit.h>
|
|
#include <linux/exportfs.h>
|
|
#include "overlayfs.h"
|
|
|
|
#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
|
|
|
|
static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
|
|
{
|
|
pr_warn("\"check_copy_up\" module option is obsolete\n");
|
|
return 0;
|
|
}
|
|
|
|
static int ovl_ccup_get(char *buf, const struct kernel_param *param)
|
|
{
|
|
return sprintf(buf, "N\n");
|
|
}
|
|
|
|
module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644);
|
|
MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing");
|
|
|
|
static bool ovl_must_copy_xattr(const char *name)
|
|
{
|
|
return !strcmp(name, XATTR_POSIX_ACL_ACCESS) ||
|
|
!strcmp(name, XATTR_POSIX_ACL_DEFAULT) ||
|
|
!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
|
|
}
|
|
|
|
int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
|
|
struct dentry *new)
|
|
{
|
|
ssize_t list_size, size, value_size = 0;
|
|
char *buf, *name, *value = NULL;
|
|
int error = 0;
|
|
size_t slen;
|
|
|
|
if (!(old->d_inode->i_opflags & IOP_XATTR) ||
|
|
!(new->d_inode->i_opflags & IOP_XATTR))
|
|
return 0;
|
|
|
|
list_size = vfs_listxattr(old, NULL, 0);
|
|
if (list_size <= 0) {
|
|
if (list_size == -EOPNOTSUPP)
|
|
return 0;
|
|
return list_size;
|
|
}
|
|
|
|
buf = kzalloc(list_size, GFP_KERNEL);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
|
|
list_size = vfs_listxattr(old, buf, list_size);
|
|
if (list_size <= 0) {
|
|
error = list_size;
|
|
goto out;
|
|
}
|
|
|
|
for (name = buf; list_size; name += slen) {
|
|
slen = strnlen(name, list_size) + 1;
|
|
|
|
/* underlying fs providing us with an broken xattr list? */
|
|
if (WARN_ON(slen > list_size)) {
|
|
error = -EIO;
|
|
break;
|
|
}
|
|
list_size -= slen;
|
|
|
|
if (ovl_is_private_xattr(sb, name))
|
|
continue;
|
|
|
|
error = security_inode_copy_up_xattr(name);
|
|
if (error < 0 && error != -EOPNOTSUPP)
|
|
break;
|
|
if (error == 1) {
|
|
error = 0;
|
|
continue; /* Discard */
|
|
}
|
|
retry:
|
|
size = vfs_getxattr(&init_user_ns, old, name, value, value_size);
|
|
if (size == -ERANGE)
|
|
size = vfs_getxattr(&init_user_ns, old, name, NULL, 0);
|
|
|
|
if (size < 0) {
|
|
error = size;
|
|
break;
|
|
}
|
|
|
|
if (size > value_size) {
|
|
void *new;
|
|
|
|
new = krealloc(value, size, GFP_KERNEL);
|
|
if (!new) {
|
|
error = -ENOMEM;
|
|
break;
|
|
}
|
|
value = new;
|
|
value_size = size;
|
|
goto retry;
|
|
}
|
|
|
|
error = vfs_setxattr(&init_user_ns, new, name, value, size, 0);
|
|
if (error) {
|
|
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
|
|
break;
|
|
|
|
/* Ignore failure to copy unknown xattrs */
|
|
error = 0;
|
|
}
|
|
}
|
|
kfree(value);
|
|
out:
|
|
kfree(buf);
|
|
return error;
|
|
}
|
|
|
|
static int ovl_copy_up_data(struct ovl_fs *ofs, struct path *old,
|
|
struct path *new, loff_t len)
|
|
{
|
|
struct file *old_file;
|
|
struct file *new_file;
|
|
loff_t old_pos = 0;
|
|
loff_t new_pos = 0;
|
|
loff_t cloned;
|
|
loff_t data_pos = -1;
|
|
loff_t hole_len;
|
|
bool skip_hole = false;
|
|
int error = 0;
|
|
|
|
if (len == 0)
|
|
return 0;
|
|
|
|
old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
|
|
if (IS_ERR(old_file))
|
|
return PTR_ERR(old_file);
|
|
|
|
new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
|
|
if (IS_ERR(new_file)) {
|
|
error = PTR_ERR(new_file);
|
|
goto out_fput;
|
|
}
|
|
|
|
/* Try to use clone_file_range to clone up within the same fs */
|
|
cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
|
|
if (cloned == len)
|
|
goto out;
|
|
/* Couldn't clone, so now we try to copy the data */
|
|
|
|
/* Check if lower fs supports seek operation */
|
|
if (old_file->f_mode & FMODE_LSEEK &&
|
|
old_file->f_op->llseek)
|
|
skip_hole = true;
|
|
|
|
while (len) {
|
|
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
|
|
long bytes;
|
|
|
|
if (len < this_len)
|
|
this_len = len;
|
|
|
|
if (signal_pending_state(TASK_KILLABLE, current)) {
|
|
error = -EINTR;
|
|
break;
|
|
}
|
|
|
|
/*
|
|
* Fill zero for hole will cost unnecessary disk space
|
|
* and meanwhile slow down the copy-up speed, so we do
|
|
* an optimization for hole during copy-up, it relies
|
|
* on SEEK_DATA implementation in lower fs so if lower
|
|
* fs does not support it, copy-up will behave as before.
|
|
*
|
|
* Detail logic of hole detection as below:
|
|
* When we detect next data position is larger than current
|
|
* position we will skip that hole, otherwise we copy
|
|
* data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
|
|
* it may not recognize all kind of holes and sometimes
|
|
* only skips partial of hole area. However, it will be
|
|
* enough for most of the use cases.
|
|
*/
|
|
|
|
if (skip_hole && data_pos < old_pos) {
|
|
data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
|
|
if (data_pos > old_pos) {
|
|
hole_len = data_pos - old_pos;
|
|
len -= hole_len;
|
|
old_pos = new_pos = data_pos;
|
|
continue;
|
|
} else if (data_pos == -ENXIO) {
|
|
break;
|
|
} else if (data_pos < 0) {
|
|
skip_hole = false;
|
|
}
|
|
}
|
|
|
|
bytes = do_splice_direct(old_file, &old_pos,
|
|
new_file, &new_pos,
|
|
this_len, SPLICE_F_MOVE);
|
|
if (bytes <= 0) {
|
|
error = bytes;
|
|
break;
|
|
}
|
|
WARN_ON(old_pos != new_pos);
|
|
|
|
len -= bytes;
|
|
}
|
|
out:
|
|
if (!error && ovl_should_sync(ofs))
|
|
error = vfs_fsync(new_file, 0);
|
|
fput(new_file);
|
|
out_fput:
|
|
fput(old_file);
|
|
return error;
|
|
}
|
|
|
|
static int ovl_set_size(struct dentry *upperdentry, struct kstat *stat)
|
|
{
|
|
struct iattr attr = {
|
|
.ia_valid = ATTR_SIZE,
|
|
.ia_size = stat->size,
|
|
};
|
|
|
|
return notify_change(&init_user_ns, upperdentry, &attr, NULL);
|
|
}
|
|
|
|
static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
|
|
{
|
|
struct iattr attr = {
|
|
.ia_valid =
|
|
ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
|
|
.ia_atime = stat->atime,
|
|
.ia_mtime = stat->mtime,
|
|
};
|
|
|
|
return notify_change(&init_user_ns, upperdentry, &attr, NULL);
|
|
}
|
|
|
|
int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
|
|
{
|
|
int err = 0;
|
|
|
|
if (!S_ISLNK(stat->mode)) {
|
|
struct iattr attr = {
|
|
.ia_valid = ATTR_MODE,
|
|
.ia_mode = stat->mode,
|
|
};
|
|
err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
|
|
}
|
|
if (!err) {
|
|
struct iattr attr = {
|
|
.ia_valid = ATTR_UID | ATTR_GID,
|
|
.ia_uid = stat->uid,
|
|
.ia_gid = stat->gid,
|
|
};
|
|
err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
|
|
}
|
|
if (!err)
|
|
ovl_set_timestamps(upperdentry, stat);
|
|
|
|
return err;
|
|
}
|
|
|
|
struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
|
|
bool is_upper)
|
|
{
|
|
struct ovl_fh *fh;
|
|
int fh_type, dwords;
|
|
int buflen = MAX_HANDLE_SZ;
|
|
uuid_t *uuid = &real->d_sb->s_uuid;
|
|
int err;
|
|
|
|
/* Make sure the real fid stays 32bit aligned */
|
|
BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4);
|
|
BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255);
|
|
|
|
fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL);
|
|
if (!fh)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
/*
|
|
* We encode a non-connectable file handle for non-dir, because we
|
|
* only need to find the lower inode number and we don't want to pay
|
|
* the price or reconnecting the dentry.
|
|
*/
|
|
dwords = buflen >> 2;
|
|
fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
|
|
buflen = (dwords << 2);
|
|
|
|
err = -EIO;
|
|
if (WARN_ON(fh_type < 0) ||
|
|
WARN_ON(buflen > MAX_HANDLE_SZ) ||
|
|
WARN_ON(fh_type == FILEID_INVALID))
|
|
goto out_err;
|
|
|
|
fh->fb.version = OVL_FH_VERSION;
|
|
fh->fb.magic = OVL_FH_MAGIC;
|
|
fh->fb.type = fh_type;
|
|
fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN;
|
|
/*
|
|
* When we will want to decode an overlay dentry from this handle
|
|
* and all layers are on the same fs, if we get a disconncted real
|
|
* dentry when we decode fid, the only way to tell if we should assign
|
|
* it to upperdentry or to lowerstack is by checking this flag.
|
|
*/
|
|
if (is_upper)
|
|
fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER;
|
|
fh->fb.len = sizeof(fh->fb) + buflen;
|
|
if (ofs->config.uuid)
|
|
fh->fb.uuid = *uuid;
|
|
|
|
return fh;
|
|
|
|
out_err:
|
|
kfree(fh);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
int ovl_set_origin(struct ovl_fs *ofs, struct dentry *dentry,
|
|
struct dentry *lower, struct dentry *upper)
|
|
{
|
|
const struct ovl_fh *fh = NULL;
|
|
int err;
|
|
|
|
/*
|
|
* When lower layer doesn't support export operations store a 'null' fh,
|
|
* so we can use the overlay.origin xattr to distignuish between a copy
|
|
* up and a pure upper inode.
|
|
*/
|
|
if (ovl_can_decode_fh(lower->d_sb)) {
|
|
fh = ovl_encode_real_fh(ofs, lower, false);
|
|
if (IS_ERR(fh))
|
|
return PTR_ERR(fh);
|
|
}
|
|
|
|
/*
|
|
* Do not fail when upper doesn't support xattrs.
|
|
*/
|
|
err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh->buf,
|
|
fh ? fh->fb.len : 0, 0);
|
|
kfree(fh);
|
|
|
|
/* Ignore -EPERM from setting "user.*" on symlink/special */
|
|
return err == -EPERM ? 0 : err;
|
|
}
|
|
|
|
/* Store file handle of @upper dir in @index dir entry */
|
|
static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
|
|
struct dentry *index)
|
|
{
|
|
const struct ovl_fh *fh;
|
|
int err;
|
|
|
|
fh = ovl_encode_real_fh(ofs, upper, true);
|
|
if (IS_ERR(fh))
|
|
return PTR_ERR(fh);
|
|
|
|
err = ovl_do_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
|
|
|
|
kfree(fh);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Create and install index entry.
|
|
*
|
|
* Caller must hold i_mutex on indexdir.
|
|
*/
|
|
static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
|
|
struct dentry *upper)
|
|
{
|
|
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
|
|
struct dentry *indexdir = ovl_indexdir(dentry->d_sb);
|
|
struct inode *dir = d_inode(indexdir);
|
|
struct dentry *index = NULL;
|
|
struct dentry *temp = NULL;
|
|
struct qstr name = { };
|
|
int err;
|
|
|
|
/*
|
|
* For now this is only used for creating index entry for directories,
|
|
* because non-dir are copied up directly to index and then hardlinked
|
|
* to upper dir.
|
|
*
|
|
* TODO: implement create index for non-dir, so we can call it when
|
|
* encoding file handle for non-dir in case index does not exist.
|
|
*/
|
|
if (WARN_ON(!d_is_dir(dentry)))
|
|
return -EIO;
|
|
|
|
/* Directory not expected to be indexed before copy up */
|
|
if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry))))
|
|
return -EIO;
|
|
|
|
err = ovl_get_index_name(ofs, origin, &name);
|
|
if (err)
|
|
return err;
|
|
|
|
temp = ovl_create_temp(indexdir, OVL_CATTR(S_IFDIR | 0));
|
|
err = PTR_ERR(temp);
|
|
if (IS_ERR(temp))
|
|
goto free_name;
|
|
|
|
err = ovl_set_upper_fh(ofs, upper, temp);
|
|
if (err)
|
|
goto out;
|
|
|
|
index = lookup_one_len(name.name, indexdir, name.len);
|
|
if (IS_ERR(index)) {
|
|
err = PTR_ERR(index);
|
|
} else {
|
|
err = ovl_do_rename(dir, temp, dir, index, 0);
|
|
dput(index);
|
|
}
|
|
out:
|
|
if (err)
|
|
ovl_cleanup(dir, temp);
|
|
dput(temp);
|
|
free_name:
|
|
kfree(name.name);
|
|
return err;
|
|
}
|
|
|
|
struct ovl_copy_up_ctx {
|
|
struct dentry *parent;
|
|
struct dentry *dentry;
|
|
struct path lowerpath;
|
|
struct kstat stat;
|
|
struct kstat pstat;
|
|
const char *link;
|
|
struct dentry *destdir;
|
|
struct qstr destname;
|
|
struct dentry *workdir;
|
|
bool origin;
|
|
bool indexed;
|
|
bool metacopy;
|
|
};
|
|
|
|
static int ovl_link_up(struct ovl_copy_up_ctx *c)
|
|
{
|
|
int err;
|
|
struct dentry *upper;
|
|
struct dentry *upperdir = ovl_dentry_upper(c->parent);
|
|
struct inode *udir = d_inode(upperdir);
|
|
|
|
/* Mark parent "impure" because it may now contain non-pure upper */
|
|
err = ovl_set_impure(c->parent, upperdir);
|
|
if (err)
|
|
return err;
|
|
|
|
err = ovl_set_nlink_lower(c->dentry);
|
|
if (err)
|
|
return err;
|
|
|
|
inode_lock_nested(udir, I_MUTEX_PARENT);
|
|
upper = lookup_one_len(c->dentry->d_name.name, upperdir,
|
|
c->dentry->d_name.len);
|
|
err = PTR_ERR(upper);
|
|
if (!IS_ERR(upper)) {
|
|
err = ovl_do_link(ovl_dentry_upper(c->dentry), udir, upper);
|
|
dput(upper);
|
|
|
|
if (!err) {
|
|
/* Restore timestamps on parent (best effort) */
|
|
ovl_set_timestamps(upperdir, &c->pstat);
|
|
ovl_dentry_set_upper_alias(c->dentry);
|
|
}
|
|
}
|
|
inode_unlock(udir);
|
|
if (err)
|
|
return err;
|
|
|
|
err = ovl_set_nlink_upper(c->dentry);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
|
|
{
|
|
struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
|
|
int err;
|
|
|
|
/*
|
|
* Copy up data first and then xattrs. Writing data after
|
|
* xattrs will remove security.capability xattr automatically.
|
|
*/
|
|
if (S_ISREG(c->stat.mode) && !c->metacopy) {
|
|
struct path upperpath, datapath;
|
|
|
|
ovl_path_upper(c->dentry, &upperpath);
|
|
if (WARN_ON(upperpath.dentry != NULL))
|
|
return -EIO;
|
|
upperpath.dentry = temp;
|
|
|
|
ovl_path_lowerdata(c->dentry, &datapath);
|
|
err = ovl_copy_up_data(ofs, &datapath, &upperpath,
|
|
c->stat.size);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
err = ovl_copy_xattr(c->dentry->d_sb, c->lowerpath.dentry, temp);
|
|
if (err)
|
|
return err;
|
|
|
|
/*
|
|
* Store identifier of lower inode in upper inode xattr to
|
|
* allow lookup of the copy up origin inode.
|
|
*
|
|
* Don't set origin when we are breaking the association with a lower
|
|
* hard link.
|
|
*/
|
|
if (c->origin) {
|
|
err = ovl_set_origin(ofs, c->dentry, c->lowerpath.dentry, temp);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
if (c->metacopy) {
|
|
err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
|
|
NULL, 0, -EOPNOTSUPP);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
inode_lock(temp->d_inode);
|
|
if (S_ISREG(c->stat.mode))
|
|
err = ovl_set_size(temp, &c->stat);
|
|
if (!err)
|
|
err = ovl_set_attr(temp, &c->stat);
|
|
inode_unlock(temp->d_inode);
|
|
|
|
return err;
|
|
}
|
|
|
|
struct ovl_cu_creds {
|
|
const struct cred *old;
|
|
struct cred *new;
|
|
};
|
|
|
|
static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc)
|
|
{
|
|
int err;
|
|
|
|
cc->old = cc->new = NULL;
|
|
err = security_inode_copy_up(dentry, &cc->new);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
if (cc->new)
|
|
cc->old = override_creds(cc->new);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
|
|
{
|
|
if (cc->new) {
|
|
revert_creds(cc->old);
|
|
put_cred(cc->new);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Copyup using workdir to prepare temp file. Used when copying up directories,
|
|
* special files or when upper fs doesn't support O_TMPFILE.
|
|
*/
|
|
static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
|
|
{
|
|
struct inode *inode;
|
|
struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
|
|
struct dentry *temp, *upper;
|
|
struct ovl_cu_creds cc;
|
|
int err;
|
|
struct ovl_cattr cattr = {
|
|
/* Can't properly set mode on creation because of the umask */
|
|
.mode = c->stat.mode & S_IFMT,
|
|
.rdev = c->stat.rdev,
|
|
.link = c->link
|
|
};
|
|
|
|
/* workdir and destdir could be the same when copying up to indexdir */
|
|
err = -EIO;
|
|
if (lock_rename(c->workdir, c->destdir) != NULL)
|
|
goto unlock;
|
|
|
|
err = ovl_prep_cu_creds(c->dentry, &cc);
|
|
if (err)
|
|
goto unlock;
|
|
|
|
temp = ovl_create_temp(c->workdir, &cattr);
|
|
ovl_revert_cu_creds(&cc);
|
|
|
|
err = PTR_ERR(temp);
|
|
if (IS_ERR(temp))
|
|
goto unlock;
|
|
|
|
err = ovl_copy_up_inode(c, temp);
|
|
if (err)
|
|
goto cleanup;
|
|
|
|
if (S_ISDIR(c->stat.mode) && c->indexed) {
|
|
err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp);
|
|
if (err)
|
|
goto cleanup;
|
|
}
|
|
|
|
upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
|
|
err = PTR_ERR(upper);
|
|
if (IS_ERR(upper))
|
|
goto cleanup;
|
|
|
|
err = ovl_do_rename(wdir, temp, udir, upper, 0);
|
|
dput(upper);
|
|
if (err)
|
|
goto cleanup;
|
|
|
|
if (!c->metacopy)
|
|
ovl_set_upperdata(d_inode(c->dentry));
|
|
inode = d_inode(c->dentry);
|
|
ovl_inode_update(inode, temp);
|
|
if (S_ISDIR(inode->i_mode))
|
|
ovl_set_flag(OVL_WHITEOUTS, inode);
|
|
unlock:
|
|
unlock_rename(c->workdir, c->destdir);
|
|
|
|
return err;
|
|
|
|
cleanup:
|
|
ovl_cleanup(wdir, temp);
|
|
dput(temp);
|
|
goto unlock;
|
|
}
|
|
|
|
/* Copyup using O_TMPFILE which does not require cross dir locking */
|
|
static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
|
|
{
|
|
struct inode *udir = d_inode(c->destdir);
|
|
struct dentry *temp, *upper;
|
|
struct ovl_cu_creds cc;
|
|
int err;
|
|
|
|
err = ovl_prep_cu_creds(c->dentry, &cc);
|
|
if (err)
|
|
return err;
|
|
|
|
temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
|
|
ovl_revert_cu_creds(&cc);
|
|
|
|
if (IS_ERR(temp))
|
|
return PTR_ERR(temp);
|
|
|
|
err = ovl_copy_up_inode(c, temp);
|
|
if (err)
|
|
goto out_dput;
|
|
|
|
inode_lock_nested(udir, I_MUTEX_PARENT);
|
|
|
|
upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
|
|
err = PTR_ERR(upper);
|
|
if (!IS_ERR(upper)) {
|
|
err = ovl_do_link(temp, udir, upper);
|
|
dput(upper);
|
|
}
|
|
inode_unlock(udir);
|
|
|
|
if (err)
|
|
goto out_dput;
|
|
|
|
if (!c->metacopy)
|
|
ovl_set_upperdata(d_inode(c->dentry));
|
|
ovl_inode_update(d_inode(c->dentry), temp);
|
|
|
|
return 0;
|
|
|
|
out_dput:
|
|
dput(temp);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Copy up a single dentry
|
|
*
|
|
* All renames start with copy up of source if necessary. The actual
|
|
* rename will only proceed once the copy up was successful. Copy up uses
|
|
* upper parent i_mutex for exclusion. Since rename can change d_parent it
|
|
* is possible that the copy up will lock the old parent. At that point
|
|
* the file will have already been copied up anyway.
|
|
*/
|
|
static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
|
|
{
|
|
int err;
|
|
struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
|
|
bool to_index = false;
|
|
|
|
/*
|
|
* Indexed non-dir is copied up directly to the index entry and then
|
|
* hardlinked to upper dir. Indexed dir is copied up to indexdir,
|
|
* then index entry is created and then copied up dir installed.
|
|
* Copying dir up to indexdir instead of workdir simplifies locking.
|
|
*/
|
|
if (ovl_need_index(c->dentry)) {
|
|
c->indexed = true;
|
|
if (S_ISDIR(c->stat.mode))
|
|
c->workdir = ovl_indexdir(c->dentry->d_sb);
|
|
else
|
|
to_index = true;
|
|
}
|
|
|
|
if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index)
|
|
c->origin = true;
|
|
|
|
if (to_index) {
|
|
c->destdir = ovl_indexdir(c->dentry->d_sb);
|
|
err = ovl_get_index_name(ofs, c->lowerpath.dentry, &c->destname);
|
|
if (err)
|
|
return err;
|
|
} else if (WARN_ON(!c->parent)) {
|
|
/* Disconnected dentry must be copied up to index dir */
|
|
return -EIO;
|
|
} else {
|
|
/*
|
|
* Mark parent "impure" because it may now contain non-pure
|
|
* upper
|
|
*/
|
|
err = ovl_set_impure(c->parent, c->destdir);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
/* Should we copyup with O_TMPFILE or with workdir? */
|
|
if (S_ISREG(c->stat.mode) && ofs->tmpfile)
|
|
err = ovl_copy_up_tmpfile(c);
|
|
else
|
|
err = ovl_copy_up_workdir(c);
|
|
if (err)
|
|
goto out;
|
|
|
|
if (c->indexed)
|
|
ovl_set_flag(OVL_INDEX, d_inode(c->dentry));
|
|
|
|
if (to_index) {
|
|
/* Initialize nlink for copy up of disconnected dentry */
|
|
err = ovl_set_nlink_upper(c->dentry);
|
|
} else {
|
|
struct inode *udir = d_inode(c->destdir);
|
|
|
|
/* Restore timestamps on parent (best effort) */
|
|
inode_lock(udir);
|
|
ovl_set_timestamps(c->destdir, &c->pstat);
|
|
inode_unlock(udir);
|
|
|
|
ovl_dentry_set_upper_alias(c->dentry);
|
|
}
|
|
|
|
out:
|
|
if (to_index)
|
|
kfree(c->destname.name);
|
|
return err;
|
|
}
|
|
|
|
static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode,
|
|
int flags)
|
|
{
|
|
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
|
|
|
|
if (!ofs->config.metacopy)
|
|
return false;
|
|
|
|
if (!S_ISREG(mode))
|
|
return false;
|
|
|
|
if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC)))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
|
|
{
|
|
ssize_t res;
|
|
char *buf;
|
|
|
|
res = vfs_getxattr(&init_user_ns, dentry, name, NULL, 0);
|
|
if (res == -ENODATA || res == -EOPNOTSUPP)
|
|
res = 0;
|
|
|
|
if (res > 0) {
|
|
buf = kzalloc(res, GFP_KERNEL);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
|
|
res = vfs_getxattr(&init_user_ns, dentry, name, buf, res);
|
|
if (res < 0)
|
|
kfree(buf);
|
|
else
|
|
*value = buf;
|
|
}
|
|
return res;
|
|
}
|
|
|
|
/* Copy up data of an inode which was copied up metadata only in the past. */
|
|
static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
|
|
{
|
|
struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
|
|
struct path upperpath, datapath;
|
|
int err;
|
|
char *capability = NULL;
|
|
ssize_t cap_size;
|
|
|
|
ovl_path_upper(c->dentry, &upperpath);
|
|
if (WARN_ON(upperpath.dentry == NULL))
|
|
return -EIO;
|
|
|
|
ovl_path_lowerdata(c->dentry, &datapath);
|
|
if (WARN_ON(datapath.dentry == NULL))
|
|
return -EIO;
|
|
|
|
if (c->stat.size) {
|
|
err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
|
|
&capability);
|
|
if (cap_size < 0)
|
|
goto out;
|
|
}
|
|
|
|
err = ovl_copy_up_data(ofs, &datapath, &upperpath, c->stat.size);
|
|
if (err)
|
|
goto out_free;
|
|
|
|
/*
|
|
* Writing to upper file will clear security.capability xattr. We
|
|
* don't want that to happen for normal copy-up operation.
|
|
*/
|
|
if (capability) {
|
|
err = vfs_setxattr(&init_user_ns, upperpath.dentry,
|
|
XATTR_NAME_CAPS, capability, cap_size, 0);
|
|
if (err)
|
|
goto out_free;
|
|
}
|
|
|
|
|
|
err = ovl_do_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
|
|
if (err)
|
|
goto out_free;
|
|
|
|
ovl_set_upperdata(d_inode(c->dentry));
|
|
out_free:
|
|
kfree(capability);
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
|
|
int flags)
|
|
{
|
|
int err;
|
|
DEFINE_DELAYED_CALL(done);
|
|
struct path parentpath;
|
|
struct ovl_copy_up_ctx ctx = {
|
|
.parent = parent,
|
|
.dentry = dentry,
|
|
.workdir = ovl_workdir(dentry),
|
|
};
|
|
|
|
if (WARN_ON(!ctx.workdir))
|
|
return -EROFS;
|
|
|
|
ovl_path_lower(dentry, &ctx.lowerpath);
|
|
err = vfs_getattr(&ctx.lowerpath, &ctx.stat,
|
|
STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
|
|
if (err)
|
|
return err;
|
|
|
|
ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
|
|
|
|
if (parent) {
|
|
ovl_path_upper(parent, &parentpath);
|
|
ctx.destdir = parentpath.dentry;
|
|
ctx.destname = dentry->d_name;
|
|
|
|
err = vfs_getattr(&parentpath, &ctx.pstat,
|
|
STATX_ATIME | STATX_MTIME,
|
|
AT_STATX_SYNC_AS_STAT);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
/* maybe truncate regular file. this has no effect on dirs */
|
|
if (flags & O_TRUNC)
|
|
ctx.stat.size = 0;
|
|
|
|
if (S_ISLNK(ctx.stat.mode)) {
|
|
ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done);
|
|
if (IS_ERR(ctx.link))
|
|
return PTR_ERR(ctx.link);
|
|
}
|
|
|
|
err = ovl_copy_up_start(dentry, flags);
|
|
/* err < 0: interrupted, err > 0: raced with another copy-up */
|
|
if (unlikely(err)) {
|
|
if (err > 0)
|
|
err = 0;
|
|
} else {
|
|
if (!ovl_dentry_upper(dentry))
|
|
err = ovl_do_copy_up(&ctx);
|
|
if (!err && parent && !ovl_dentry_has_upper_alias(dentry))
|
|
err = ovl_link_up(&ctx);
|
|
if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags))
|
|
err = ovl_copy_up_meta_inode_data(&ctx);
|
|
ovl_copy_up_end(dentry);
|
|
}
|
|
do_delayed_call(&done);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int ovl_copy_up_flags(struct dentry *dentry, int flags)
|
|
{
|
|
int err = 0;
|
|
const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
|
|
bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
|
|
|
|
/*
|
|
* With NFS export, copy up can get called for a disconnected non-dir.
|
|
* In this case, we will copy up lower inode to index dir without
|
|
* linking it to upper dir.
|
|
*/
|
|
if (WARN_ON(disconnected && d_is_dir(dentry)))
|
|
return -EIO;
|
|
|
|
while (!err) {
|
|
struct dentry *next;
|
|
struct dentry *parent = NULL;
|
|
|
|
if (ovl_already_copied_up(dentry, flags))
|
|
break;
|
|
|
|
next = dget(dentry);
|
|
/* find the topmost dentry not yet copied up */
|
|
for (; !disconnected;) {
|
|
parent = dget_parent(next);
|
|
|
|
if (ovl_dentry_upper(parent))
|
|
break;
|
|
|
|
dput(next);
|
|
next = parent;
|
|
}
|
|
|
|
err = ovl_copy_up_one(parent, next, flags);
|
|
|
|
dput(parent);
|
|
dput(next);
|
|
}
|
|
revert_creds(old_cred);
|
|
|
|
return err;
|
|
}
|
|
|
|
static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
|
|
{
|
|
/* Copy up of disconnected dentry does not set upper alias */
|
|
if (ovl_already_copied_up(dentry, flags))
|
|
return false;
|
|
|
|
if (special_file(d_inode(dentry)->i_mode))
|
|
return false;
|
|
|
|
if (!ovl_open_flags_need_copy_up(flags))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
int ovl_maybe_copy_up(struct dentry *dentry, int flags)
|
|
{
|
|
int err = 0;
|
|
|
|
if (ovl_open_need_copy_up(dentry, flags)) {
|
|
err = ovl_want_write(dentry);
|
|
if (!err) {
|
|
err = ovl_copy_up_flags(dentry, flags);
|
|
ovl_drop_write(dentry);
|
|
}
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
int ovl_copy_up_with_data(struct dentry *dentry)
|
|
{
|
|
return ovl_copy_up_flags(dentry, O_WRONLY);
|
|
}
|
|
|
|
int ovl_copy_up(struct dentry *dentry)
|
|
{
|
|
return ovl_copy_up_flags(dentry, 0);
|
|
}
|