mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
35ce8ae9ae
Pull signal/exit/ptrace updates from Eric Biederman: "This set of changes deletes some dead code, makes a lot of cleanups which hopefully make the code easier to follow, and fixes bugs found along the way. The end-game which I have not yet reached yet is for fatal signals that generate coredumps to be short-circuit deliverable from complete_signal, for force_siginfo_to_task not to require changing userspace configured signal delivery state, and for the ptrace stops to always happen in locations where we can guarantee on all architectures that the all of the registers are saved and available on the stack. Removal of profile_task_ext, profile_munmap, and profile_handoff_task are the big successes for dead code removal this round. A bunch of small bug fixes are included, as most of the issues reported were small enough that they would not affect bisection so I simply added the fixes and did not fold the fixes into the changes they were fixing. There was a bug that broke coredumps piped to systemd-coredump. I dropped the change that caused that bug and replaced it entirely with something much more restrained. Unfortunately that required some rebasing. Some successes after this set of changes: There are few enough calls to do_exit to audit in a reasonable amount of time. The lifetime of struct kthread now matches the lifetime of struct task, and the pointer to struct kthread is no longer stored in set_child_tid. The flag SIGNAL_GROUP_COREDUMP is removed. The field group_exit_task is removed. Issues where task->exit_code was examined with signal->group_exit_code should been examined were fixed. There are several loosely related changes included because I am cleaning up and if I don't include them they will probably get lost. The original postings of these changes can be found at: https://lkml.kernel.org/r/87a6ha4zsd.fsf@email.froward.int.ebiederm.org https://lkml.kernel.org/r/87bl1kunjj.fsf@email.froward.int.ebiederm.org https://lkml.kernel.org/r/87r19opkx1.fsf_-_@email.froward.int.ebiederm.org I trimmed back the last set of changes to only the obviously correct once. Simply because there was less time for review than I had hoped" * 'signal-for-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (44 commits) ptrace/m68k: Stop open coding ptrace_report_syscall ptrace: Remove unused regs argument from ptrace_report_syscall ptrace: Remove second setting of PT_SEIZED in ptrace_attach taskstats: Cleanup the use of task->exit_code exit: Use the correct exit_code in /proc/<pid>/stat exit: Fix the exit_code for wait_task_zombie exit: Coredumps reach do_group_exit exit: Remove profile_handoff_task exit: Remove profile_task_exit & profile_munmap signal: clean up kernel-doc comments signal: Remove the helper signal_group_exit signal: Rename group_exit_task group_exec_task coredump: Stop setting signal->group_exit_task signal: Remove SIGNAL_GROUP_COREDUMP signal: During coredumps set SIGNAL_GROUP_EXIT in zap_process signal: Make coredump handling explicit in complete_signal signal: Have prepare_signal detect coredumps using signal->core_state signal: Have the oom killer detect coredumps using signal->core_state exit: Move force_uaccess back into do_exit exit: Guarantee make_task_dead leaks the tsk when calling do_task_exit ...
355 lines
8.7 KiB
C
355 lines
8.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* fs/signalfd.c
|
|
*
|
|
* Copyright (C) 2003 Linus Torvalds
|
|
*
|
|
* Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org>
|
|
* Changed ->read() to return a siginfo strcture instead of signal number.
|
|
* Fixed locking in ->poll().
|
|
* Added sighand-detach notification.
|
|
* Added fd re-use in sys_signalfd() syscall.
|
|
* Now using anonymous inode source.
|
|
* Thanks to Oleg Nesterov for useful code review and suggestions.
|
|
* More comments and suggestions from Arnd Bergmann.
|
|
* Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
|
|
* Retrieve multiple signals with one read() call
|
|
* Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
|
|
* Attach to the sighand only during read() and poll().
|
|
*/
|
|
|
|
#include <linux/file.h>
|
|
#include <linux/poll.h>
|
|
#include <linux/init.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/signal.h>
|
|
#include <linux/list.h>
|
|
#include <linux/anon_inodes.h>
|
|
#include <linux/signalfd.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/compat.h>
|
|
|
|
void signalfd_cleanup(struct sighand_struct *sighand)
|
|
{
|
|
wake_up_pollfree(&sighand->signalfd_wqh);
|
|
}
|
|
|
|
struct signalfd_ctx {
|
|
sigset_t sigmask;
|
|
};
|
|
|
|
static int signalfd_release(struct inode *inode, struct file *file)
|
|
{
|
|
kfree(file->private_data);
|
|
return 0;
|
|
}
|
|
|
|
static __poll_t signalfd_poll(struct file *file, poll_table *wait)
|
|
{
|
|
struct signalfd_ctx *ctx = file->private_data;
|
|
__poll_t events = 0;
|
|
|
|
poll_wait(file, ¤t->sighand->signalfd_wqh, wait);
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
if (next_signal(¤t->pending, &ctx->sigmask) ||
|
|
next_signal(¤t->signal->shared_pending,
|
|
&ctx->sigmask))
|
|
events |= EPOLLIN;
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
return events;
|
|
}
|
|
|
|
/*
|
|
* Copied from copy_siginfo_to_user() in kernel/signal.c
|
|
*/
|
|
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
|
|
kernel_siginfo_t const *kinfo)
|
|
{
|
|
struct signalfd_siginfo new;
|
|
|
|
BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128);
|
|
|
|
/*
|
|
* Unused members should be zero ...
|
|
*/
|
|
memset(&new, 0, sizeof(new));
|
|
|
|
/*
|
|
* If you change siginfo_t structure, please be sure
|
|
* this code is fixed accordingly.
|
|
*/
|
|
new.ssi_signo = kinfo->si_signo;
|
|
new.ssi_errno = kinfo->si_errno;
|
|
new.ssi_code = kinfo->si_code;
|
|
switch (siginfo_layout(kinfo->si_signo, kinfo->si_code)) {
|
|
case SIL_KILL:
|
|
new.ssi_pid = kinfo->si_pid;
|
|
new.ssi_uid = kinfo->si_uid;
|
|
break;
|
|
case SIL_TIMER:
|
|
new.ssi_tid = kinfo->si_tid;
|
|
new.ssi_overrun = kinfo->si_overrun;
|
|
new.ssi_ptr = (long) kinfo->si_ptr;
|
|
new.ssi_int = kinfo->si_int;
|
|
break;
|
|
case SIL_POLL:
|
|
new.ssi_band = kinfo->si_band;
|
|
new.ssi_fd = kinfo->si_fd;
|
|
break;
|
|
case SIL_FAULT_BNDERR:
|
|
case SIL_FAULT_PKUERR:
|
|
case SIL_FAULT_PERF_EVENT:
|
|
/*
|
|
* Fall through to the SIL_FAULT case. SIL_FAULT_BNDERR,
|
|
* SIL_FAULT_PKUERR, and SIL_FAULT_PERF_EVENT are only
|
|
* generated by faults that deliver them synchronously to
|
|
* userspace. In case someone injects one of these signals
|
|
* and signalfd catches it treat it as SIL_FAULT.
|
|
*/
|
|
case SIL_FAULT:
|
|
new.ssi_addr = (long) kinfo->si_addr;
|
|
break;
|
|
case SIL_FAULT_TRAPNO:
|
|
new.ssi_addr = (long) kinfo->si_addr;
|
|
new.ssi_trapno = kinfo->si_trapno;
|
|
break;
|
|
case SIL_FAULT_MCEERR:
|
|
new.ssi_addr = (long) kinfo->si_addr;
|
|
new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
|
|
break;
|
|
case SIL_CHLD:
|
|
new.ssi_pid = kinfo->si_pid;
|
|
new.ssi_uid = kinfo->si_uid;
|
|
new.ssi_status = kinfo->si_status;
|
|
new.ssi_utime = kinfo->si_utime;
|
|
new.ssi_stime = kinfo->si_stime;
|
|
break;
|
|
case SIL_RT:
|
|
/*
|
|
* This case catches also the signals queued by sigqueue().
|
|
*/
|
|
new.ssi_pid = kinfo->si_pid;
|
|
new.ssi_uid = kinfo->si_uid;
|
|
new.ssi_ptr = (long) kinfo->si_ptr;
|
|
new.ssi_int = kinfo->si_int;
|
|
break;
|
|
case SIL_SYS:
|
|
new.ssi_call_addr = (long) kinfo->si_call_addr;
|
|
new.ssi_syscall = kinfo->si_syscall;
|
|
new.ssi_arch = kinfo->si_arch;
|
|
break;
|
|
}
|
|
|
|
if (copy_to_user(uinfo, &new, sizeof(struct signalfd_siginfo)))
|
|
return -EFAULT;
|
|
|
|
return sizeof(*uinfo);
|
|
}
|
|
|
|
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
|
|
int nonblock)
|
|
{
|
|
enum pid_type type;
|
|
ssize_t ret;
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
|
|
switch (ret) {
|
|
case 0:
|
|
if (!nonblock)
|
|
break;
|
|
ret = -EAGAIN;
|
|
fallthrough;
|
|
default:
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
return ret;
|
|
}
|
|
|
|
add_wait_queue(¤t->sighand->signalfd_wqh, &wait);
|
|
for (;;) {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
|
|
if (ret != 0)
|
|
break;
|
|
if (signal_pending(current)) {
|
|
ret = -ERESTARTSYS;
|
|
break;
|
|
}
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
schedule();
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
}
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
remove_wait_queue(¤t->sighand->signalfd_wqh, &wait);
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
|
|
* error code. The "count" parameter must be at least the size of a
|
|
* "struct signalfd_siginfo".
|
|
*/
|
|
static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
|
|
loff_t *ppos)
|
|
{
|
|
struct signalfd_ctx *ctx = file->private_data;
|
|
struct signalfd_siginfo __user *siginfo;
|
|
int nonblock = file->f_flags & O_NONBLOCK;
|
|
ssize_t ret, total = 0;
|
|
kernel_siginfo_t info;
|
|
|
|
count /= sizeof(struct signalfd_siginfo);
|
|
if (!count)
|
|
return -EINVAL;
|
|
|
|
siginfo = (struct signalfd_siginfo __user *) buf;
|
|
do {
|
|
ret = signalfd_dequeue(ctx, &info, nonblock);
|
|
if (unlikely(ret <= 0))
|
|
break;
|
|
ret = signalfd_copyinfo(siginfo, &info);
|
|
if (ret < 0)
|
|
break;
|
|
siginfo++;
|
|
total += ret;
|
|
nonblock = 1;
|
|
} while (--count);
|
|
|
|
return total ? total: ret;
|
|
}
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
static void signalfd_show_fdinfo(struct seq_file *m, struct file *f)
|
|
{
|
|
struct signalfd_ctx *ctx = f->private_data;
|
|
sigset_t sigmask;
|
|
|
|
sigmask = ctx->sigmask;
|
|
signotset(&sigmask);
|
|
render_sigset_t(m, "sigmask:\t", &sigmask);
|
|
}
|
|
#endif
|
|
|
|
static const struct file_operations signalfd_fops = {
|
|
#ifdef CONFIG_PROC_FS
|
|
.show_fdinfo = signalfd_show_fdinfo,
|
|
#endif
|
|
.release = signalfd_release,
|
|
.poll = signalfd_poll,
|
|
.read = signalfd_read,
|
|
.llseek = noop_llseek,
|
|
};
|
|
|
|
static int do_signalfd4(int ufd, sigset_t *mask, int flags)
|
|
{
|
|
struct signalfd_ctx *ctx;
|
|
|
|
/* Check the SFD_* constants for consistency. */
|
|
BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
|
|
BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK);
|
|
|
|
if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
|
|
return -EINVAL;
|
|
|
|
sigdelsetmask(mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
|
|
signotset(mask);
|
|
|
|
if (ufd == -1) {
|
|
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
|
if (!ctx)
|
|
return -ENOMEM;
|
|
|
|
ctx->sigmask = *mask;
|
|
|
|
/*
|
|
* When we call this, the initialization must be complete, since
|
|
* anon_inode_getfd() will install the fd.
|
|
*/
|
|
ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
|
|
O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
|
|
if (ufd < 0)
|
|
kfree(ctx);
|
|
} else {
|
|
struct fd f = fdget(ufd);
|
|
if (!f.file)
|
|
return -EBADF;
|
|
ctx = f.file->private_data;
|
|
if (f.file->f_op != &signalfd_fops) {
|
|
fdput(f);
|
|
return -EINVAL;
|
|
}
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
ctx->sigmask = *mask;
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
wake_up(¤t->sighand->signalfd_wqh);
|
|
fdput(f);
|
|
}
|
|
|
|
return ufd;
|
|
}
|
|
|
|
SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
|
|
size_t, sizemask, int, flags)
|
|
{
|
|
sigset_t mask;
|
|
|
|
if (sizemask != sizeof(sigset_t))
|
|
return -EINVAL;
|
|
if (copy_from_user(&mask, user_mask, sizeof(mask)))
|
|
return -EFAULT;
|
|
return do_signalfd4(ufd, &mask, flags);
|
|
}
|
|
|
|
SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
|
|
size_t, sizemask)
|
|
{
|
|
sigset_t mask;
|
|
|
|
if (sizemask != sizeof(sigset_t))
|
|
return -EINVAL;
|
|
if (copy_from_user(&mask, user_mask, sizeof(mask)))
|
|
return -EFAULT;
|
|
return do_signalfd4(ufd, &mask, 0);
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
static long do_compat_signalfd4(int ufd,
|
|
const compat_sigset_t __user *user_mask,
|
|
compat_size_t sigsetsize, int flags)
|
|
{
|
|
sigset_t mask;
|
|
|
|
if (sigsetsize != sizeof(compat_sigset_t))
|
|
return -EINVAL;
|
|
if (get_compat_sigset(&mask, user_mask))
|
|
return -EFAULT;
|
|
return do_signalfd4(ufd, &mask, flags);
|
|
}
|
|
|
|
COMPAT_SYSCALL_DEFINE4(signalfd4, int, ufd,
|
|
const compat_sigset_t __user *, user_mask,
|
|
compat_size_t, sigsetsize,
|
|
int, flags)
|
|
{
|
|
return do_compat_signalfd4(ufd, user_mask, sigsetsize, flags);
|
|
}
|
|
|
|
COMPAT_SYSCALL_DEFINE3(signalfd, int, ufd,
|
|
const compat_sigset_t __user *, user_mask,
|
|
compat_size_t, sigsetsize)
|
|
{
|
|
return do_compat_signalfd4(ufd, user_mask, sigsetsize, 0);
|
|
}
|
|
#endif
|