diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 88cdeb9f72d9..d563884833e9 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -39,6 +39,7 @@ config SUPERH32 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER select HAVE_ARCH_KGDB + select HAVE_HW_BREAKPOINT if CPU_SH4A select ARCH_HIBERNATION_POSSIBLE if MMU config SUPERH64 diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index e121c30f797d..46cb93477bcb 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,6 +1,8 @@ include include/asm-generic/Kbuild.asm -header-y += cachectl.h cpu-features.h +header-y += cachectl.h +header-y += cpu-features.h +header-y += hw_breakpoint.h unifdef-y += unistd_32.h unifdef-y += unistd_64.h diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..0f4a00f60058 --- /dev/null +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -0,0 +1,53 @@ +#ifndef __ASM_SH_HW_BREAKPOINT_H +#define __ASM_SH_HW_BREAKPOINT_H + +#include +#include +#include + +#ifdef __KERNEL__ +#define __ARCH_HW_BREAKPOINT_H + +struct arch_hw_breakpoint { + char *name; /* Contains name of the symbol to set bkpt */ + unsigned long address; + unsigned long asid; + u16 len; + u16 type; +}; + +enum { + SH_BREAKPOINT_READ = (1 << 1), + SH_BREAKPOINT_WRITE = (1 << 2), + SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE, + + SH_BREAKPOINT_LEN_1 = (1 << 12), + SH_BREAKPOINT_LEN_2 = (1 << 13), + SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2, + SH_BREAKPOINT_LEN_8 = (1 << 14), +}; + +/* Total number of available UBC channels */ +#define HBP_NUM 1 /* XXX */ + +struct perf_event; +struct task_struct; +struct pmu; + +extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len); +extern int arch_validate_hwbkpt_settings(struct perf_event *bp, + struct task_struct *tsk); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); + +int arch_install_hw_breakpoint(struct perf_event *bp); +void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void hw_breakpoint_pmu_read(struct perf_event *bp); +void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); + +extern void arch_fill_perf_breakpoint(struct perf_event *bp); + +extern struct pmu perf_ops_bp; + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_HW_BREAKPOINT_H */ diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 985219f9759e..5f6d2e9ccb7c 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -6,6 +6,8 @@ enum die_val { DIE_TRAP, DIE_NMI, DIE_OOPS, + DIE_BREAKPOINT, + DIE_SSTEP, }; #endif /* __ASM_SH_KDEBUG_H */ diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 9a8714945dc9..f4b54040dbc3 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -14,6 +14,7 @@ #include #include #include +#include /* * Default implementation of macro that returns current @@ -99,8 +100,8 @@ struct thread_struct { unsigned long sp; unsigned long pc; - /* Hardware debugging registers */ - unsigned long ubc_pc; + /* Save middle states of ptrace breakpoints */ + struct perf_event *ptrace_bps[NR_UBC_CHANNELS]; /* floating point info */ union sh_fpu_union fpu; @@ -111,9 +112,6 @@ struct thread_struct { #endif }; -/* Count of active tasks with UBC settings */ -extern int ubc_usercnt; - #define INIT_THREAD { \ .sp = sizeof(init_stack) + (long) &init_stack, \ } diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index b5c5acdc8c0e..1014da8b3ed3 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -144,8 +144,6 @@ void per_cpu_trap_init(void); void default_idle(void); void cpu_idle_wait(void); -asmlinkage void break_point_trap(void); - #ifdef CONFIG_SUPERH32 #define BUILD_TRAP_HANDLER(name) \ asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17d..8ab9145bf50b 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -117,6 +117,7 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ +#define TIF_DEBUG 9 /* uses UBC */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -131,6 +132,7 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_DEBUG (1 << TIF_DEBUG) #define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h index 4ca4b7717371..dd7878197b6e 100644 --- a/arch/sh/include/asm/ubc.h +++ b/arch/sh/include/asm/ubc.h @@ -10,8 +10,8 @@ */ #ifndef __ASM_SH_UBC_H #define __ASM_SH_UBC_H -#ifdef __KERNEL__ +#ifdef __KERNEL__ #include /* User Break Controller */ @@ -60,6 +60,12 @@ #define BRCR_UBDE (1 << 0) #endif +/* + * All SH parts have 2 UBC channels. I defy any hardware designer to + * invalidate this assertion. + */ +#define NR_UBC_CHANNELS 2 + #ifndef __ASSEMBLY__ /* arch/sh/kernel/cpu/ubc.S */ extern void ubc_sleep(void); diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index a2d0a40f3848..649daadd4519 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index 46610c35c232..99b4d020179a 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S @@ -49,7 +49,7 @@ ENTRY(exception_handling_table) .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger - .long break_point_trap /* 1E0 */ + .long breakpoint_trap_handler /* 1E0 */ /* * Pad the remainder of the table out, exceptions residing in far diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..ff3cb3d7df8f --- /dev/null +++ b/arch/sh/kernel/hw_breakpoint.c @@ -0,0 +1,416 @@ +/* + * arch/sh/kernel/hw_breakpoint.c + * + * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. + * + * Copyright (C) 2009 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ubc_context { + unsigned long pc; + unsigned long state; +}; + +/* Per cpu ubc channel state */ +static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]); + +/* + * Stores the breakpoints currently in use on each breakpoint address + * register for each cpus + */ +static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); + +static int __init ubc_init(void) +{ + __raw_writel(0, UBC_CAMR0); + __raw_writel(0, UBC_CBR0); + __raw_writel(0, UBC_CBCR); + + __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0); + + /* dummy read for write posting */ + (void)__raw_readl(UBC_CRR0); + + return 0; +} +arch_initcall(ubc_init); + +/* + * Install a perf counter breakpoint. + * + * We seek a free UBC channel and use it for this breakpoint. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct ubc_context *ubc_ctx; + int i; + + for (i = 0; i < HBP_NUM; i++) { + struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); + + if (!*slot) { + *slot = bp; + break; + } + } + + if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) + return -EBUSY; + + ubc_ctx = &__get_cpu_var(ubc_ctx[i]); + + ubc_ctx->pc = info->address; + ubc_ctx->state = info->len | info->type; + + __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0); + __raw_writel(ubc_ctx->pc, UBC_CAR0); + + return 0; +} + +/* + * Uninstall the breakpoint contained in the given counter. + * + * First we search the debug address register it uses and then we disable + * it. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. + */ +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct ubc_context *ubc_ctx; + int i; + + for (i = 0; i < HBP_NUM; i++) { + struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); + + if (*slot == bp) { + *slot = NULL; + break; + } + } + + if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) + return; + + ubc_ctx = &__get_cpu_var(ubc_ctx[i]); + ubc_ctx->pc = 0; + ubc_ctx->state &= ~(info->len | info->type); + + __raw_writel(ubc_ctx->pc, UBC_CBR0); + __raw_writel(ubc_ctx->state, UBC_CAR0); +} + +static int get_hbp_len(u16 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case SH_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case SH_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case SH_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; + case SH_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; + } + return len_in_bytes; +} + +/* + * Check for virtual address in user space. + */ +int arch_check_va_in_userspace(unsigned long va, u16 hbp_len) +{ + unsigned int len; + + len = get_hbp_len(hbp_len); + + return (va <= TASK_SIZE - len); +} + +/* + * Check for virtual address in kernel space. + */ +static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) +{ + unsigned int len; + + len = get_hbp_len(hbp_len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Store a breakpoint's encoded address, length, and type. + */ +static int arch_store_info(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + /* + * User-space requests will always have the address field populated + * For kernel-addresses, either the address or symbol name can be + * specified. + */ + if (info->name) + info->address = (unsigned long)kallsyms_lookup_name(info->name); + if (info->address) { + info->asid = get_asid(); + return 0; + } + + return -EINVAL; +} + +int arch_bp_generic_fields(int sh_len, int sh_type, + int *gen_len, int *gen_type) +{ + /* Len */ + switch (sh_len) { + case SH_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; + break; + case SH_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; + break; + case SH_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; + break; + case SH_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + /* Type */ + switch (sh_type) { + case SH_BREAKPOINT_READ: + *gen_type = HW_BREAKPOINT_R; + case SH_BREAKPOINT_WRITE: + *gen_type = HW_BREAKPOINT_W; + break; + case SH_BREAKPOINT_RW: + *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int arch_build_bp_info(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + info->address = bp->attr.bp_addr; + + /* Len */ + switch (bp->attr.bp_len) { + case HW_BREAKPOINT_LEN_1: + info->len = SH_BREAKPOINT_LEN_1; + break; + case HW_BREAKPOINT_LEN_2: + info->len = SH_BREAKPOINT_LEN_2; + break; + case HW_BREAKPOINT_LEN_4: + info->len = SH_BREAKPOINT_LEN_4; + break; + case HW_BREAKPOINT_LEN_8: + info->len = SH_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + /* Type */ + switch (bp->attr.bp_type) { + case HW_BREAKPOINT_R: + info->type = SH_BREAKPOINT_READ; + break; + case HW_BREAKPOINT_W: + info->type = SH_BREAKPOINT_WRITE; + break; + case HW_BREAKPOINT_W | HW_BREAKPOINT_R: + info->type = SH_BREAKPOINT_RW; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Validate the arch-specific HW Breakpoint register settings + */ +int arch_validate_hwbkpt_settings(struct perf_event *bp, + struct task_struct *tsk) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + unsigned int align; + int ret; + + ret = arch_build_bp_info(bp); + if (ret) + return ret; + + ret = -EINVAL; + + switch (info->len) { + case SH_BREAKPOINT_LEN_1: + align = 0; + break; + case SH_BREAKPOINT_LEN_2: + align = 1; + break; + case SH_BREAKPOINT_LEN_4: + align = 3; + break; + case SH_BREAKPOINT_LEN_8: + align = 7; + break; + default: + return ret; + } + + if (bp->callback) + ret = arch_store_info(bp); + + if (ret < 0) + return ret; + + /* + * Check that the low-order bits of the address are appropriate + * for the alignment implied by len. + */ + if (info->address & align) + return -EINVAL; + + /* Check that the virtual address is in the proper range */ + if (tsk) { + if (!arch_check_va_in_userspace(info->address, info->len)) + return -EFAULT; + } else { + if (!arch_check_va_in_kernelspace(info->address, info->len)) + return -EFAULT; + } + + return 0; +} + +/* + * Release the user breakpoints used by ptrace + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *t = &tsk->thread; + + for (i = 0; i < HBP_NUM; i++) { + unregister_hw_breakpoint(t->ptrace_bps[i]); + t->ptrace_bps[i] = NULL; + } +} + +static int __kprobes hw_breakpoint_handler(struct die_args *args) +{ + int cpu, i, rc = NOTIFY_STOP; + struct perf_event *bp; + unsigned long val; + + val = __raw_readl(UBC_CBR0); + __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0); + + cpu = get_cpu(); + for (i = 0; i < HBP_NUM; i++) { + /* + * The counter may be concurrently released but that can only + * occur from a call_rcu() path. We can then safely fetch + * the breakpoint, use its callback, touch its counter + * while we are in an rcu_read_lock() path. + */ + rcu_read_lock(); + + bp = per_cpu(bp_per_reg[i], cpu); + if (bp) { + rc = NOTIFY_DONE; + } else { + rcu_read_unlock(); + break; + } + + (bp->callback)(bp, args->regs); + + rcu_read_unlock(); + } + + if (bp) { + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0); + __raw_writel(info->address, UBC_CAR0); + } + + put_cpu(); + + return rc; +} + +BUILD_TRAP_HANDLER(breakpoint) +{ + unsigned long ex = lookup_exception_vector(); + TRAP_HANDLER_DECL; + + notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); +} + +/* + * Handle debug exception notifications. + */ +int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data) +{ + if (val != DIE_BREAKPOINT) + return NOTIFY_DONE; + + return hw_breakpoint_handler(data); +} + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ + /* TODO */ +} + +void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) +{ + /* TODO */ +} diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be3..4a2c866f9773 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -34,8 +35,6 @@ #include #include -int ubc_usercnt = 0; - #ifdef CONFIG_32BIT static void watchdog_trigger_immediate(void) { @@ -148,16 +147,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) */ void exit_thread(void) { - if (current->thread.ubc_pc) { - current->thread.ubc_pc = 0; - ubc_usercnt -= 1; - } } void flush_thread(void) { -#if defined(CONFIG_SH_FPU) struct task_struct *tsk = current; + + flush_ptrace_hw_breakpoint(tsk); + +#if defined(CONFIG_SH_FPU) /* Forget lazy FPU state */ clear_fpu(tsk, task_pt_regs(tsk)); clear_used_math(); @@ -195,9 +193,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; -#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) struct task_struct *tsk = current; -#endif #if defined(CONFIG_SH_FPU) unlazy_fpu(tsk, regs); @@ -234,53 +230,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.sp = (unsigned long) childregs; p->thread.pc = (unsigned long) ret_from_fork; - p->thread.ubc_pc = 0; + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); return 0; } -/* Tracing by user break controller. */ -static void ubc_set_tracing(int asid, unsigned long pc) -{ -#if defined(CONFIG_CPU_SH4A) - unsigned long val; - - val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); - val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); - - ctrl_outl(val, UBC_CBR0); - ctrl_outl(pc, UBC_CAR0); - ctrl_outl(0x0, UBC_CAMR0); - ctrl_outl(0x0, UBC_CBCR); - - val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); - ctrl_outl(val, UBC_CRR0); - - /* Read UBC register that we wrote last, for checking update */ - val = ctrl_inl(UBC_CRR0); - -#else /* CONFIG_CPU_SH4A */ - ctrl_outl(pc, UBC_BARA); - -#ifdef CONFIG_MMU - ctrl_outb(asid, UBC_BASRA); -#endif - - ctrl_outl(0, UBC_BAMRA); - - if (current_cpu_data.type == CPU_SH7729 || - current_cpu_data.type == CPU_SH7710 || - current_cpu_data.type == CPU_SH7712 || - current_cpu_data.type == CPU_SH7203){ - ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); - ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); - } else { - ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); - ctrl_outw(BRCR_PCBA, UBC_BRCR); - } -#endif /* CONFIG_CPU_SH4A */ -} - /* * switch_to(x,y) should switch tasks from x to y. * @@ -302,25 +256,6 @@ __switch_to(struct task_struct *prev, struct task_struct *next) : "r" (task_thread_info(next))); #endif - /* If no tasks are using the UBC, we're done */ - if (ubc_usercnt == 0) - /* If no tasks are using the UBC, we're done */; - else if (next->thread.ubc_pc && next->mm) { - int asid = 0; -#ifdef CONFIG_MMU - asid |= cpu_asid(smp_processor_id(), next->mm); -#endif - ubc_set_tracing(asid, next->thread.ubc_pc); - } else { -#if defined(CONFIG_CPU_SH4A) - ctrl_outl(UBC_CBR_INIT, UBC_CBR0); - ctrl_outl(UBC_CRR_INIT, UBC_CRR0); -#else - ctrl_outw(0, UBC_BBRA); - ctrl_outw(0, UBC_BBRB); -#endif - } - return prev; } @@ -412,20 +347,3 @@ unsigned long get_wchan(struct task_struct *p) return pc; } - -asmlinkage void break_point_trap(void) -{ - /* Clear tracing. */ -#if defined(CONFIG_CPU_SH4A) - ctrl_outl(UBC_CBR_INIT, UBC_CBR0); - ctrl_outl(UBC_CRR_INIT, UBC_CRR0); -#else - ctrl_outw(0, UBC_BBRA); - ctrl_outw(0, UBC_BBRB); - ctrl_outl(0, UBC_BRCR); -#endif - current->thread.ubc_pc = 0; - ubc_usercnt -= 1; - - force_sig(SIGTRAP, current); -} diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 9be35f348093..bdb10446cbac 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -65,31 +65,12 @@ static inline int put_stack_long(struct task_struct *task, int offset, void user_enable_single_step(struct task_struct *child) { - /* Next scheduling will set up UBC */ - if (child->thread.ubc_pc == 0) - ubc_usercnt += 1; - - child->thread.ubc_pc = get_stack_long(child, - offsetof(struct pt_regs, pc)); - set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) { clear_tsk_thread_flag(child, TIF_SINGLESTEP); - - /* - * Ensure the UBC is not programmed at the next context switch. - * - * Normally this is not needed but there are sequences such as - * singlestep, signal delivery, and continue that leave the - * ubc_pc non-zero leading to spurious SIGTRAPs. - */ - if (child->thread.ubc_pc != 0) { - ubc_usercnt -= 1; - child->thread.ubc_pc = 0; - } } /*