mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
67850b7bdc
of Peter Zijlstra was encountering with ptrace in his freezer rewrite I identified some cleanups to ptrace_stop that make sense on their own and move make resolving the other problems much simpler. The biggest issue is the habbit of the ptrace code to change task->__state from the tracer to suppress TASK_WAKEKILL from waking up the tracee. No other code in the kernel does that and it is straight forward to update signal_wake_up and friends to make that unnecessary. Peter's task freezer sets frozen tasks to a new state TASK_FROZEN and then it stores them by calling "wake_up_state(t, TASK_FROZEN)" relying on the fact that all stopped states except the special stop states can tolerate spurious wake up and recover their state. The state of stopped and traced tasked is changed to be stored in task->jobctl as well as in task->__state. This makes it possible for the freezer to recover tasks in these special states, as well as serving as a general cleanup. With a little more work in that direction I believe TASK_STOPPED can learn to tolerate spurious wake ups and become an ordinary stop state. The TASK_TRACED state has to remain a special state as the registers for a process are only reliably available when the process is stopped in the scheduler. Fundamentally ptrace needs acess to the saved register values of a task. There are bunch of semi-random ptrace related cleanups that were found while looking at these issues. One cleanup that deserves to be called out is from commit57b6de08b5
("ptrace: Admit ptrace_stop can generate spuriuos SIGTRAPs"). This makes a change that is technically user space visible, in the handling of what happens to a tracee when a tracer dies unexpectedly. According to our testing and our understanding of userspace nothing cares that spurious SIGTRAPs can be generated in that case. The entire discussion can be found at: https://lkml.kernel.org/r/87a6bv6dl6.fsf_-_@email.froward.int.ebiederm.org Eric W. Biederman (11): signal: Rename send_signal send_signal_locked signal: Replace __group_send_sig_info with send_signal_locked ptrace/um: Replace PT_DTRACE with TIF_SINGLESTEP ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP ptrace: Remove arch_ptrace_attach signal: Use lockdep_assert_held instead of assert_spin_locked ptrace: Reimplement PTRACE_KILL by always sending SIGKILL ptrace: Document that wait_task_inactive can't fail ptrace: Admit ptrace_stop can generate spuriuos SIGTRAPs ptrace: Don't change __state ptrace: Always take siglock in ptrace_resume Peter Zijlstra (1): sched,signal,ptrace: Rework TASK_TRACED, TASK_STOPPED state arch/ia64/include/asm/ptrace.h | 4 -- arch/ia64/kernel/ptrace.c | 57 ---------------- arch/um/include/asm/thread_info.h | 2 + arch/um/kernel/exec.c | 2 +- arch/um/kernel/process.c | 2 +- arch/um/kernel/ptrace.c | 8 +-- arch/um/kernel/signal.c | 4 +- arch/x86/kernel/step.c | 3 +- arch/xtensa/kernel/ptrace.c | 4 +- arch/xtensa/kernel/signal.c | 4 +- drivers/tty/tty_jobctrl.c | 4 +- include/linux/ptrace.h | 7 -- include/linux/sched.h | 10 ++- include/linux/sched/jobctl.h | 8 +++ include/linux/sched/signal.h | 20 ++++-- include/linux/signal.h | 3 +- kernel/ptrace.c | 87 ++++++++--------------- kernel/sched/core.c | 5 +- kernel/signal.c | 140 +++++++++++++++++--------------------- kernel/time/posix-cpu-timers.c | 6 +- 20 files changed, 140 insertions(+), 240 deletions(-) Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEgjlraLDcwBA2B+6cC/v6Eiajj0AFAmKaXaYACgkQC/v6Eiaj j0CgoA/+JncSQ6PY2D5Jh1apvHzmnRsFXzr3DRvtv/CVx4oIebOXRQFyVDeD5tRn TmMgB29HpBlHRDLojlmlZRGAld1HR/aPEW9j8W1D3Sy/ZFO5L8lQitv9aDHO9Ntw 4lZvlhS1M0KhATudVVBqSPixiG6CnV5SsGmixqdOyg7xcXSY6G1l2nB7Zk9I3Tat ZlmhuZ6R5Z5qsm4MEq0vUSrnsHiGxYrpk6uQOaVz8Wkv8ZFmbutt6XgxF0tsyZNn mHSmWSiZzIgBjTlaibEmxi8urYJTPj3vGBeJQVYHblFwLFi6+Oy7bDxQbWjQvaZh DsgWPScfBF4Jm0+8hhCiSYpvPp8XnZuklb4LNCeok/VFr+KfSmpJTIhn00kagQ1u vxQDqLws8YLW4qsfGydfx9uUIFCbQE/V2VDYk5J3Re3gkUNDOOR1A56hPniKv6VB 2aqGO2Fl0RdBbUa3JF+XI5Pwq5y1WrqR93EUvj+5+u5W9rZL/8WLBHBMEz6gbmfD DhwFE0y8TG2WRlWJVEDRId+5zo3di/YvasH0vJZ5HbrxhS2RE/yIGAd+kKGx/lZO qWDJC7IHvFJ7Mw5KugacyF0SHeNdloyBM7KZW6HeXmgKn9IMJBpmwib92uUkRZJx D8j/bHHqD/zsgQ39nO+c4M0MmhO/DsPLG/dnGKrRCu7v1tmEnkY= =ZUuO -----END PGP SIGNATURE----- Merge tag 'ptrace_stop-cleanup-for-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace Pull ptrace_stop cleanups from Eric Biederman: "While looking at the ptrace problems with PREEMPT_RT and the problems Peter Zijlstra was encountering with ptrace in his freezer rewrite I identified some cleanups to ptrace_stop that make sense on their own and move make resolving the other problems much simpler. The biggest issue is the habit of the ptrace code to change task->__state from the tracer to suppress TASK_WAKEKILL from waking up the tracee. No other code in the kernel does that and it is straight forward to update signal_wake_up and friends to make that unnecessary. Peter's task freezer sets frozen tasks to a new state TASK_FROZEN and then it stores them by calling "wake_up_state(t, TASK_FROZEN)" relying on the fact that all stopped states except the special stop states can tolerate spurious wake up and recover their state. The state of stopped and traced tasked is changed to be stored in task->jobctl as well as in task->__state. This makes it possible for the freezer to recover tasks in these special states, as well as serving as a general cleanup. With a little more work in that direction I believe TASK_STOPPED can learn to tolerate spurious wake ups and become an ordinary stop state. The TASK_TRACED state has to remain a special state as the registers for a process are only reliably available when the process is stopped in the scheduler. Fundamentally ptrace needs acess to the saved register values of a task. There are bunch of semi-random ptrace related cleanups that were found while looking at these issues. One cleanup that deserves to be called out is from commit57b6de08b5
("ptrace: Admit ptrace_stop can generate spuriuos SIGTRAPs"). This makes a change that is technically user space visible, in the handling of what happens to a tracee when a tracer dies unexpectedly. According to our testing and our understanding of userspace nothing cares that spurious SIGTRAPs can be generated in that case" * tag 'ptrace_stop-cleanup-for-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: sched,signal,ptrace: Rework TASK_TRACED, TASK_STOPPED state ptrace: Always take siglock in ptrace_resume ptrace: Don't change __state ptrace: Admit ptrace_stop can generate spuriuos SIGTRAPs ptrace: Document that wait_task_inactive can't fail ptrace: Reimplement PTRACE_KILL by always sending SIGKILL signal: Use lockdep_assert_held instead of assert_spin_locked ptrace: Remove arch_ptrace_attach ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP ptrace/um: Replace PT_DTRACE with TIF_SINGLESTEP signal: Replace __group_send_sig_info with send_signal_locked signal: Rename send_signal send_signal_locked
586 lines
14 KiB
C
586 lines
14 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2001 - 2007 Tensilica Inc.
|
|
*
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
* Chris Zankel <chris@zankel.net>
|
|
* Scott Foehner<sfoehner@yahoo.com>,
|
|
* Kevin Chea
|
|
* Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
|
|
*/
|
|
|
|
#include <linux/audit.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/hw_breakpoint.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/regset.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/seccomp.h>
|
|
#include <linux/security.h>
|
|
#include <linux/signal.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/syscalls.h>
|
|
|
|
#include <asm/coprocessor.h>
|
|
#include <asm/elf.h>
|
|
#include <asm/page.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
static int gpr_get(struct task_struct *target,
|
|
const struct user_regset *regset,
|
|
struct membuf to)
|
|
{
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
struct user_pt_regs newregs = {
|
|
.pc = regs->pc,
|
|
.ps = regs->ps & ~(1 << PS_EXCM_BIT),
|
|
.lbeg = regs->lbeg,
|
|
.lend = regs->lend,
|
|
.lcount = regs->lcount,
|
|
.sar = regs->sar,
|
|
.threadptr = regs->threadptr,
|
|
.windowbase = regs->windowbase,
|
|
.windowstart = regs->windowstart,
|
|
.syscall = regs->syscall,
|
|
};
|
|
|
|
memcpy(newregs.a,
|
|
regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4,
|
|
regs->windowbase * 16);
|
|
memcpy(newregs.a + regs->windowbase * 4,
|
|
regs->areg,
|
|
(WSBITS - regs->windowbase) * 16);
|
|
|
|
return membuf_write(&to, &newregs, sizeof(newregs));
|
|
}
|
|
|
|
static int gpr_set(struct task_struct *target,
|
|
const struct user_regset *regset,
|
|
unsigned int pos, unsigned int count,
|
|
const void *kbuf, const void __user *ubuf)
|
|
{
|
|
int ret;
|
|
struct user_pt_regs newregs = {0};
|
|
struct pt_regs *regs;
|
|
const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (newregs.windowbase >= XCHAL_NUM_AREGS / 4)
|
|
return -EINVAL;
|
|
|
|
regs = task_pt_regs(target);
|
|
regs->pc = newregs.pc;
|
|
regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask);
|
|
regs->lbeg = newregs.lbeg;
|
|
regs->lend = newregs.lend;
|
|
regs->lcount = newregs.lcount;
|
|
regs->sar = newregs.sar;
|
|
regs->threadptr = newregs.threadptr;
|
|
|
|
if (newregs.syscall)
|
|
regs->syscall = newregs.syscall;
|
|
|
|
if (newregs.windowbase != regs->windowbase ||
|
|
newregs.windowstart != regs->windowstart) {
|
|
u32 rotws, wmask;
|
|
|
|
rotws = (((newregs.windowstart |
|
|
(newregs.windowstart << WSBITS)) >>
|
|
newregs.windowbase) &
|
|
((1 << WSBITS) - 1)) & ~1;
|
|
wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
|
|
(rotws & 0xF) | 1;
|
|
regs->windowbase = newregs.windowbase;
|
|
regs->windowstart = newregs.windowstart;
|
|
regs->wmask = wmask;
|
|
}
|
|
|
|
memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4,
|
|
newregs.a, newregs.windowbase * 16);
|
|
memcpy(regs->areg, newregs.a + newregs.windowbase * 4,
|
|
(WSBITS - newregs.windowbase) * 16);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int tie_get(struct task_struct *target,
|
|
const struct user_regset *regset,
|
|
struct membuf to)
|
|
{
|
|
int ret;
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
struct thread_info *ti = task_thread_info(target);
|
|
elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
|
|
|
|
if (!newregs)
|
|
return -ENOMEM;
|
|
|
|
newregs->opt = regs->xtregs_opt;
|
|
newregs->user = ti->xtregs_user;
|
|
|
|
#if XTENSA_HAVE_COPROCESSORS
|
|
/* Flush all coprocessor registers to memory. */
|
|
coprocessor_flush_all(ti);
|
|
newregs->cp0 = ti->xtregs_cp.cp0;
|
|
newregs->cp1 = ti->xtregs_cp.cp1;
|
|
newregs->cp2 = ti->xtregs_cp.cp2;
|
|
newregs->cp3 = ti->xtregs_cp.cp3;
|
|
newregs->cp4 = ti->xtregs_cp.cp4;
|
|
newregs->cp5 = ti->xtregs_cp.cp5;
|
|
newregs->cp6 = ti->xtregs_cp.cp6;
|
|
newregs->cp7 = ti->xtregs_cp.cp7;
|
|
#endif
|
|
ret = membuf_write(&to, newregs, sizeof(*newregs));
|
|
kfree(newregs);
|
|
return ret;
|
|
}
|
|
|
|
static int tie_set(struct task_struct *target,
|
|
const struct user_regset *regset,
|
|
unsigned int pos, unsigned int count,
|
|
const void *kbuf, const void __user *ubuf)
|
|
{
|
|
int ret;
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
struct thread_info *ti = task_thread_info(target);
|
|
elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
|
|
|
|
if (!newregs)
|
|
return -ENOMEM;
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
newregs, 0, -1);
|
|
|
|
if (ret)
|
|
goto exit;
|
|
regs->xtregs_opt = newregs->opt;
|
|
ti->xtregs_user = newregs->user;
|
|
|
|
#if XTENSA_HAVE_COPROCESSORS
|
|
/* Flush all coprocessors before we overwrite them. */
|
|
coprocessor_flush_release_all(ti);
|
|
ti->xtregs_cp.cp0 = newregs->cp0;
|
|
ti->xtregs_cp.cp1 = newregs->cp1;
|
|
ti->xtregs_cp.cp2 = newregs->cp2;
|
|
ti->xtregs_cp.cp3 = newregs->cp3;
|
|
ti->xtregs_cp.cp4 = newregs->cp4;
|
|
ti->xtregs_cp.cp5 = newregs->cp5;
|
|
ti->xtregs_cp.cp6 = newregs->cp6;
|
|
ti->xtregs_cp.cp7 = newregs->cp7;
|
|
#endif
|
|
exit:
|
|
kfree(newregs);
|
|
return ret;
|
|
}
|
|
|
|
enum xtensa_regset {
|
|
REGSET_GPR,
|
|
REGSET_TIE,
|
|
};
|
|
|
|
static const struct user_regset xtensa_regsets[] = {
|
|
[REGSET_GPR] = {
|
|
.core_note_type = NT_PRSTATUS,
|
|
.n = sizeof(struct user_pt_regs) / sizeof(u32),
|
|
.size = sizeof(u32),
|
|
.align = sizeof(u32),
|
|
.regset_get = gpr_get,
|
|
.set = gpr_set,
|
|
},
|
|
[REGSET_TIE] = {
|
|
.core_note_type = NT_PRFPREG,
|
|
.n = sizeof(elf_xtregs_t) / sizeof(u32),
|
|
.size = sizeof(u32),
|
|
.align = sizeof(u32),
|
|
.regset_get = tie_get,
|
|
.set = tie_set,
|
|
},
|
|
};
|
|
|
|
static const struct user_regset_view user_xtensa_view = {
|
|
.name = "xtensa",
|
|
.e_machine = EM_XTENSA,
|
|
.regsets = xtensa_regsets,
|
|
.n = ARRAY_SIZE(xtensa_regsets)
|
|
};
|
|
|
|
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
{
|
|
return &user_xtensa_view;
|
|
}
|
|
|
|
void user_enable_single_step(struct task_struct *child)
|
|
{
|
|
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
|
}
|
|
|
|
void user_disable_single_step(struct task_struct *child)
|
|
{
|
|
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
|
}
|
|
|
|
/*
|
|
* Called by kernel/ptrace.c when detaching to disable single stepping.
|
|
*/
|
|
|
|
void ptrace_disable(struct task_struct *child)
|
|
{
|
|
/* Nothing to do.. */
|
|
}
|
|
|
|
static int ptrace_getregs(struct task_struct *child, void __user *uregs)
|
|
{
|
|
return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR,
|
|
0, sizeof(xtensa_gregset_t), uregs);
|
|
}
|
|
|
|
static int ptrace_setregs(struct task_struct *child, void __user *uregs)
|
|
{
|
|
return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR,
|
|
0, sizeof(xtensa_gregset_t), uregs);
|
|
}
|
|
|
|
static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
|
|
{
|
|
return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE,
|
|
0, sizeof(elf_xtregs_t), uregs);
|
|
}
|
|
|
|
static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
|
|
{
|
|
return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE,
|
|
0, sizeof(elf_xtregs_t), uregs);
|
|
}
|
|
|
|
static int ptrace_peekusr(struct task_struct *child, long regno,
|
|
long __user *ret)
|
|
{
|
|
struct pt_regs *regs;
|
|
unsigned long tmp;
|
|
|
|
regs = task_pt_regs(child);
|
|
tmp = 0; /* Default return value. */
|
|
|
|
switch(regno) {
|
|
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
|
tmp = regs->areg[regno - REG_AR_BASE];
|
|
break;
|
|
|
|
case REG_A_BASE ... REG_A_BASE + 15:
|
|
tmp = regs->areg[regno - REG_A_BASE];
|
|
break;
|
|
|
|
case REG_PC:
|
|
tmp = regs->pc;
|
|
break;
|
|
|
|
case REG_PS:
|
|
/* Note: PS.EXCM is not set while user task is running;
|
|
* its being set in regs is for exception handling
|
|
* convenience.
|
|
*/
|
|
tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
|
|
break;
|
|
|
|
case REG_WB:
|
|
break; /* tmp = 0 */
|
|
|
|
case REG_WS:
|
|
{
|
|
unsigned long wb = regs->windowbase;
|
|
unsigned long ws = regs->windowstart;
|
|
tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
|
|
((1 << WSBITS) - 1);
|
|
break;
|
|
}
|
|
case REG_LBEG:
|
|
tmp = regs->lbeg;
|
|
break;
|
|
|
|
case REG_LEND:
|
|
tmp = regs->lend;
|
|
break;
|
|
|
|
case REG_LCOUNT:
|
|
tmp = regs->lcount;
|
|
break;
|
|
|
|
case REG_SAR:
|
|
tmp = regs->sar;
|
|
break;
|
|
|
|
case SYSCALL_NR:
|
|
tmp = regs->syscall;
|
|
break;
|
|
|
|
default:
|
|
return -EIO;
|
|
}
|
|
return put_user(tmp, ret);
|
|
}
|
|
|
|
static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
|
|
{
|
|
struct pt_regs *regs;
|
|
regs = task_pt_regs(child);
|
|
|
|
switch (regno) {
|
|
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
|
regs->areg[regno - REG_AR_BASE] = val;
|
|
break;
|
|
|
|
case REG_A_BASE ... REG_A_BASE + 15:
|
|
regs->areg[regno - REG_A_BASE] = val;
|
|
break;
|
|
|
|
case REG_PC:
|
|
regs->pc = val;
|
|
break;
|
|
|
|
case SYSCALL_NR:
|
|
regs->syscall = val;
|
|
break;
|
|
|
|
default:
|
|
return -EIO;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
static void ptrace_hbptriggered(struct perf_event *bp,
|
|
struct perf_sample_data *data,
|
|
struct pt_regs *regs)
|
|
{
|
|
int i;
|
|
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
|
|
|
|
if (bp->attr.bp_type & HW_BREAKPOINT_X) {
|
|
for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
|
|
if (current->thread.ptrace_bp[i] == bp)
|
|
break;
|
|
i <<= 1;
|
|
} else {
|
|
for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
|
|
if (current->thread.ptrace_wp[i] == bp)
|
|
break;
|
|
i = (i << 1) | 1;
|
|
}
|
|
|
|
force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
|
|
}
|
|
|
|
static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
|
|
{
|
|
struct perf_event_attr attr;
|
|
|
|
ptrace_breakpoint_init(&attr);
|
|
|
|
/* Initialise fields to sane defaults. */
|
|
attr.bp_addr = 0;
|
|
attr.bp_len = 1;
|
|
attr.bp_type = type;
|
|
attr.disabled = 1;
|
|
|
|
return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
|
|
tsk);
|
|
}
|
|
|
|
/*
|
|
* Address bit 0 choose instruction (0) or data (1) break register, bits
|
|
* 31..1 are the register number.
|
|
* Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words:
|
|
* address (0) and control (1).
|
|
* Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set.
|
|
* Data breakpoint control word bit 31 is 'trigger on store', bit 30 is
|
|
* 'trigger on load, bits 29..0 are length. Length 0 is used to clear a
|
|
* breakpoint. To set a breakpoint length must be a power of 2 in the range
|
|
* 1..64 and the address must be length-aligned.
|
|
*/
|
|
|
|
static long ptrace_gethbpregs(struct task_struct *child, long addr,
|
|
long __user *datap)
|
|
{
|
|
struct perf_event *bp;
|
|
u32 user_data[2] = {0};
|
|
bool dbreak = addr & 1;
|
|
unsigned idx = addr >> 1;
|
|
|
|
if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
|
|
(dbreak && idx >= XCHAL_NUM_DBREAK))
|
|
return -EINVAL;
|
|
|
|
if (dbreak)
|
|
bp = child->thread.ptrace_wp[idx];
|
|
else
|
|
bp = child->thread.ptrace_bp[idx];
|
|
|
|
if (bp) {
|
|
user_data[0] = bp->attr.bp_addr;
|
|
user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
|
|
if (dbreak) {
|
|
if (bp->attr.bp_type & HW_BREAKPOINT_R)
|
|
user_data[1] |= DBREAKC_LOAD_MASK;
|
|
if (bp->attr.bp_type & HW_BREAKPOINT_W)
|
|
user_data[1] |= DBREAKC_STOR_MASK;
|
|
}
|
|
}
|
|
|
|
if (copy_to_user(datap, user_data, sizeof(user_data)))
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static long ptrace_sethbpregs(struct task_struct *child, long addr,
|
|
long __user *datap)
|
|
{
|
|
struct perf_event *bp;
|
|
struct perf_event_attr attr;
|
|
u32 user_data[2];
|
|
bool dbreak = addr & 1;
|
|
unsigned idx = addr >> 1;
|
|
int bp_type = 0;
|
|
|
|
if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
|
|
(dbreak && idx >= XCHAL_NUM_DBREAK))
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(user_data, datap, sizeof(user_data)))
|
|
return -EFAULT;
|
|
|
|
if (dbreak) {
|
|
bp = child->thread.ptrace_wp[idx];
|
|
if (user_data[1] & DBREAKC_LOAD_MASK)
|
|
bp_type |= HW_BREAKPOINT_R;
|
|
if (user_data[1] & DBREAKC_STOR_MASK)
|
|
bp_type |= HW_BREAKPOINT_W;
|
|
} else {
|
|
bp = child->thread.ptrace_bp[idx];
|
|
bp_type = HW_BREAKPOINT_X;
|
|
}
|
|
|
|
if (!bp) {
|
|
bp = ptrace_hbp_create(child,
|
|
bp_type ? bp_type : HW_BREAKPOINT_RW);
|
|
if (IS_ERR(bp))
|
|
return PTR_ERR(bp);
|
|
if (dbreak)
|
|
child->thread.ptrace_wp[idx] = bp;
|
|
else
|
|
child->thread.ptrace_bp[idx] = bp;
|
|
}
|
|
|
|
attr = bp->attr;
|
|
attr.bp_addr = user_data[0];
|
|
attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
|
|
attr.bp_type = bp_type;
|
|
attr.disabled = !attr.bp_len;
|
|
|
|
return modify_user_hw_breakpoint(bp, &attr);
|
|
}
|
|
#endif
|
|
|
|
long arch_ptrace(struct task_struct *child, long request,
|
|
unsigned long addr, unsigned long data)
|
|
{
|
|
int ret = -EPERM;
|
|
void __user *datap = (void __user *) data;
|
|
|
|
switch (request) {
|
|
case PTRACE_PEEKUSR: /* read register specified by addr. */
|
|
ret = ptrace_peekusr(child, addr, datap);
|
|
break;
|
|
|
|
case PTRACE_POKEUSR: /* write register specified by addr. */
|
|
ret = ptrace_pokeusr(child, addr, data);
|
|
break;
|
|
|
|
case PTRACE_GETREGS:
|
|
ret = ptrace_getregs(child, datap);
|
|
break;
|
|
|
|
case PTRACE_SETREGS:
|
|
ret = ptrace_setregs(child, datap);
|
|
break;
|
|
|
|
case PTRACE_GETXTREGS:
|
|
ret = ptrace_getxregs(child, datap);
|
|
break;
|
|
|
|
case PTRACE_SETXTREGS:
|
|
ret = ptrace_setxregs(child, datap);
|
|
break;
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
case PTRACE_GETHBPREGS:
|
|
ret = ptrace_gethbpregs(child, addr, datap);
|
|
break;
|
|
|
|
case PTRACE_SETHBPREGS:
|
|
ret = ptrace_sethbpregs(child, addr, datap);
|
|
break;
|
|
#endif
|
|
default:
|
|
ret = ptrace_request(child, request, addr, data);
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void do_syscall_trace_leave(struct pt_regs *regs);
|
|
int do_syscall_trace_enter(struct pt_regs *regs)
|
|
{
|
|
if (regs->syscall == NO_SYSCALL)
|
|
regs->areg[2] = -ENOSYS;
|
|
|
|
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
|
ptrace_report_syscall_entry(regs)) {
|
|
regs->areg[2] = -ENOSYS;
|
|
regs->syscall = NO_SYSCALL;
|
|
return 0;
|
|
}
|
|
|
|
if (regs->syscall == NO_SYSCALL ||
|
|
secure_computing() == -1) {
|
|
do_syscall_trace_leave(regs);
|
|
return 0;
|
|
}
|
|
|
|
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
|
trace_sys_enter(regs, syscall_get_nr(current, regs));
|
|
|
|
audit_syscall_entry(regs->syscall, regs->areg[6],
|
|
regs->areg[3], regs->areg[4],
|
|
regs->areg[5]);
|
|
return 1;
|
|
}
|
|
|
|
void do_syscall_trace_leave(struct pt_regs *regs)
|
|
{
|
|
int step;
|
|
|
|
audit_syscall_exit(regs);
|
|
|
|
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
|
trace_sys_exit(regs, regs_return_value(regs));
|
|
|
|
step = test_thread_flag(TIF_SINGLESTEP);
|
|
|
|
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
|
ptrace_report_syscall_exit(regs, step);
|
|
}
|