mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
5ffd2c37cb
Eric has pointed out that we still have 3 users of do_each_thread(). Change them to use for_each_process_thread() and kill this helper. There is a subtle change, after do_each_thread/while_each_thread g == t == &init_task, while after for_each_process_thread() they both point to nowhere, but this doesn't matter. > Why is for_each_process_thread() better than do_each_thread()? Say, for_each_process_thread() is rcu safe, do_each_thread() is not. And certainly for_each_process_thread(p, t) { do_something(p, t); } looks better than do_each_thread(p, t) { do_something(p, t); } while_each_thread(p, t); And again, there are only 3 users of this awkward helper left. It should have been killed years ago and in fact I thought it had already been killed. It uses while_each_thread() which needs some changes. Link: https://lkml.kernel.org/r/20230817163708.GA8248@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Kees Cook <keescook@chromium.org> Cc: "Christian Brauner (Microsoft)" <brauner@kernel.org> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Jiri Slaby <jirislaby@kernel.org> # tty/serial Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
169 lines
3.4 KiB
C
169 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
#include <linux/export.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/path.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/fs_struct.h>
|
|
#include "internal.h"
|
|
|
|
/*
|
|
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
|
|
* It can block.
|
|
*/
|
|
void set_fs_root(struct fs_struct *fs, const struct path *path)
|
|
{
|
|
struct path old_root;
|
|
|
|
path_get(path);
|
|
spin_lock(&fs->lock);
|
|
write_seqcount_begin(&fs->seq);
|
|
old_root = fs->root;
|
|
fs->root = *path;
|
|
write_seqcount_end(&fs->seq);
|
|
spin_unlock(&fs->lock);
|
|
if (old_root.dentry)
|
|
path_put(&old_root);
|
|
}
|
|
|
|
/*
|
|
* Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
|
|
* It can block.
|
|
*/
|
|
void set_fs_pwd(struct fs_struct *fs, const struct path *path)
|
|
{
|
|
struct path old_pwd;
|
|
|
|
path_get(path);
|
|
spin_lock(&fs->lock);
|
|
write_seqcount_begin(&fs->seq);
|
|
old_pwd = fs->pwd;
|
|
fs->pwd = *path;
|
|
write_seqcount_end(&fs->seq);
|
|
spin_unlock(&fs->lock);
|
|
|
|
if (old_pwd.dentry)
|
|
path_put(&old_pwd);
|
|
}
|
|
|
|
static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
|
|
{
|
|
if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
|
|
return 0;
|
|
*p = *new;
|
|
return 1;
|
|
}
|
|
|
|
void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
|
|
{
|
|
struct task_struct *g, *p;
|
|
struct fs_struct *fs;
|
|
int count = 0;
|
|
|
|
read_lock(&tasklist_lock);
|
|
for_each_process_thread(g, p) {
|
|
task_lock(p);
|
|
fs = p->fs;
|
|
if (fs) {
|
|
int hits = 0;
|
|
spin_lock(&fs->lock);
|
|
write_seqcount_begin(&fs->seq);
|
|
hits += replace_path(&fs->root, old_root, new_root);
|
|
hits += replace_path(&fs->pwd, old_root, new_root);
|
|
write_seqcount_end(&fs->seq);
|
|
while (hits--) {
|
|
count++;
|
|
path_get(new_root);
|
|
}
|
|
spin_unlock(&fs->lock);
|
|
}
|
|
task_unlock(p);
|
|
}
|
|
read_unlock(&tasklist_lock);
|
|
while (count--)
|
|
path_put(old_root);
|
|
}
|
|
|
|
void free_fs_struct(struct fs_struct *fs)
|
|
{
|
|
path_put(&fs->root);
|
|
path_put(&fs->pwd);
|
|
kmem_cache_free(fs_cachep, fs);
|
|
}
|
|
|
|
void exit_fs(struct task_struct *tsk)
|
|
{
|
|
struct fs_struct *fs = tsk->fs;
|
|
|
|
if (fs) {
|
|
int kill;
|
|
task_lock(tsk);
|
|
spin_lock(&fs->lock);
|
|
tsk->fs = NULL;
|
|
kill = !--fs->users;
|
|
spin_unlock(&fs->lock);
|
|
task_unlock(tsk);
|
|
if (kill)
|
|
free_fs_struct(fs);
|
|
}
|
|
}
|
|
|
|
struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
|
{
|
|
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
|
|
/* We don't need to lock fs - think why ;-) */
|
|
if (fs) {
|
|
fs->users = 1;
|
|
fs->in_exec = 0;
|
|
spin_lock_init(&fs->lock);
|
|
seqcount_spinlock_init(&fs->seq, &fs->lock);
|
|
fs->umask = old->umask;
|
|
|
|
spin_lock(&old->lock);
|
|
fs->root = old->root;
|
|
path_get(&fs->root);
|
|
fs->pwd = old->pwd;
|
|
path_get(&fs->pwd);
|
|
spin_unlock(&old->lock);
|
|
}
|
|
return fs;
|
|
}
|
|
|
|
int unshare_fs_struct(void)
|
|
{
|
|
struct fs_struct *fs = current->fs;
|
|
struct fs_struct *new_fs = copy_fs_struct(fs);
|
|
int kill;
|
|
|
|
if (!new_fs)
|
|
return -ENOMEM;
|
|
|
|
task_lock(current);
|
|
spin_lock(&fs->lock);
|
|
kill = !--fs->users;
|
|
current->fs = new_fs;
|
|
spin_unlock(&fs->lock);
|
|
task_unlock(current);
|
|
|
|
if (kill)
|
|
free_fs_struct(fs);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(unshare_fs_struct);
|
|
|
|
int current_umask(void)
|
|
{
|
|
return current->fs->umask;
|
|
}
|
|
EXPORT_SYMBOL(current_umask);
|
|
|
|
/* to be mentioned only in INIT_TASK */
|
|
struct fs_struct init_fs = {
|
|
.users = 1,
|
|
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
|
|
.seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
|
|
.umask = 0022,
|
|
};
|