2005-06-24 13:01:16 +08:00
|
|
|
/*
|
|
|
|
* arch/xtensa/kernel/traps.c
|
|
|
|
*
|
|
|
|
* Exception handling.
|
|
|
|
*
|
|
|
|
* Derived from code with the following copyrights:
|
|
|
|
* Copyright (C) 1994 - 1999 by Ralf Baechle
|
|
|
|
* Modified for R3000 by Paul M. Antoine, 1995, 1996
|
|
|
|
* Complete output from die() by Ulf Carlsson, 1998
|
|
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
|
|
|
*
|
|
|
|
* Essentially rewritten for the Xtensa architecture port.
|
|
|
|
*
|
2013-04-15 13:20:48 +08:00
|
|
|
* Copyright (C) 2001 - 2013 Tensilica Inc.
|
2005-06-24 13:01:16 +08:00
|
|
|
*
|
|
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
|
|
* Chris Zankel <chris@zankel.net>
|
|
|
|
* Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
|
|
|
* Kevin Chea
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/debug.h>
|
2017-02-05 21:31:22 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#include <linux/kallsyms.h>
|
2005-07-13 04:58:26 +08:00
|
|
|
#include <linux/delay.h>
|
2009-03-11 03:55:49 +08:00
|
|
|
#include <linux/hardirq.h>
|
2017-12-16 04:00:30 +08:00
|
|
|
#include <linux/ratelimit.h>
|
2020-06-09 12:32:42 +08:00
|
|
|
#include <linux/pgtable.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2013-04-15 13:20:48 +08:00
|
|
|
#include <asm/stacktrace.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/timex.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
#include <asm/processor.h>
|
2013-02-03 09:39:22 +08:00
|
|
|
#include <asm/traps.h>
|
2016-01-24 15:32:10 +08:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Machine specific interrupt handlers
|
|
|
|
*/
|
|
|
|
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_illegal_instruction(struct pt_regs *regs);
|
2022-05-13 18:40:54 +08:00
|
|
|
static void do_div0(struct pt_regs *regs);
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_interrupt(struct pt_regs *regs);
|
|
|
|
#if XTENSA_FAKE_NMI
|
|
|
|
static void do_nmi(struct pt_regs *regs);
|
|
|
|
#endif
|
2023-06-14 07:51:18 +08:00
|
|
|
#ifdef CONFIG_XTENSA_LOAD_STORE
|
|
|
|
static void do_load_store(struct pt_regs *regs);
|
|
|
|
#endif
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_unaligned_user(struct pt_regs *regs);
|
|
|
|
static void do_multihit(struct pt_regs *regs);
|
2022-04-15 18:05:31 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSORS
|
|
|
|
static void do_coprocessor(struct pt_regs *regs);
|
|
|
|
#endif
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_debug(struct pt_regs *regs);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The vector table must be preceded by a save area (which
|
|
|
|
* implies it must be in RAM, unless one places RAM immediately
|
|
|
|
* before a ROM and puts the vector at the start of the ROM (!))
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define KRNL 0x01
|
|
|
|
#define USER 0x02
|
|
|
|
|
|
|
|
#define COPROCESSOR(x) \
|
2022-04-15 18:05:31 +08:00
|
|
|
{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
|
|
|
|
{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
int cause;
|
|
|
|
int fast;
|
|
|
|
void* handler;
|
|
|
|
} dispatch_init_table_t;
|
|
|
|
|
2007-08-04 06:54:36 +08:00
|
|
|
static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2015-01-12 14:44:44 +08:00
|
|
|
#ifdef CONFIG_USER_ABI_CALL0_PROBE
|
|
|
|
{ EXCCAUSE_ILLEGAL_INSTRUCTION, USER, fast_illegal_instruction_user },
|
|
|
|
#endif
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
|
|
|
|
{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
|
|
|
|
{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
|
|
|
|
/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
|
2023-06-14 07:51:18 +08:00
|
|
|
#ifdef CONFIG_XTENSA_LOAD_STORE
|
|
|
|
{ EXCCAUSE_LOAD_STORE_ERROR, USER|KRNL, fast_load_store },
|
|
|
|
{ EXCCAUSE_LOAD_STORE_ERROR, 0, do_load_store },
|
|
|
|
#endif
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
|
2021-07-26 22:32:55 +08:00
|
|
|
#ifdef SUPPORT_WINDOWED
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
|
2021-07-26 22:32:55 +08:00
|
|
|
#endif
|
2022-05-13 18:40:54 +08:00
|
|
|
{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
|
2006-12-10 18:18:48 +08:00
|
|
|
/* EXCCAUSE_PRIVILEGED unhandled */
|
2023-07-11 06:13:27 +08:00
|
|
|
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION || \
|
|
|
|
IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)
|
2012-10-16 01:23:02 +08:00
|
|
|
#ifdef CONFIG_XTENSA_UNALIGNED_USER
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
|
2005-06-24 13:01:16 +08:00
|
|
|
#endif
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
|
2005-06-24 13:01:16 +08:00
|
|
|
#endif
|
2023-06-14 11:45:53 +08:00
|
|
|
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
|
2009-03-04 23:21:31 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2019-12-11 06:23:49 +08:00
|
|
|
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
|
|
|
|
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
|
|
|
|
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
|
|
|
|
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
|
|
|
|
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#ifdef CONFIG_PFAULT
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
|
2019-12-11 06:23:49 +08:00
|
|
|
{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
|
|
|
|
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
|
2019-12-11 06:23:49 +08:00
|
|
|
{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
|
2006-12-10 18:18:48 +08:00
|
|
|
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
|
|
|
|
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
|
2019-12-11 06:23:49 +08:00
|
|
|
#endif
|
2005-06-24 13:01:16 +08:00
|
|
|
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(0)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(0),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(1)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(1),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(2)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(2),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(3)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(3),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(4)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(4),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(5)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(5),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(6)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(6),
|
|
|
|
#endif
|
2008-02-13 05:17:07 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSOR(7)
|
2005-06-24 13:01:16 +08:00
|
|
|
COPROCESSOR(7),
|
|
|
|
#endif
|
2015-07-16 15:37:31 +08:00
|
|
|
#if XTENSA_FAKE_NMI
|
|
|
|
{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
|
|
|
|
#endif
|
2005-06-24 13:01:16 +08:00
|
|
|
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
|
|
|
|
{ -1, -1, 0 }
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
/* The exception table <exc_table> serves two functions:
|
|
|
|
* 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
|
|
|
|
* 2. it is a temporary memory buffer for the exception handlers.
|
|
|
|
*/
|
|
|
|
|
2017-12-16 08:08:16 +08:00
|
|
|
DEFINE_PER_CPU(struct exc_table, exc_table);
|
2016-03-07 06:36:33 +08:00
|
|
|
DEFINE_PER_CPU(struct debug_table, debug_table);
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
void die(const char*, struct pt_regs*, long);
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
__die_if_kernel(const char *str, struct pt_regs *regs, long err)
|
|
|
|
{
|
|
|
|
if (!user_mode(regs))
|
|
|
|
die(str, regs, err);
|
|
|
|
}
|
|
|
|
|
2023-05-08 07:18:17 +08:00
|
|
|
#ifdef CONFIG_PRINT_USER_CODE_ON_UNHANDLED_EXCEPTION
|
|
|
|
static inline void dump_user_code(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
char buf[32];
|
|
|
|
|
|
|
|
if (copy_from_user(buf, (void __user *)(regs->pc & -16), sizeof(buf)) == 0) {
|
|
|
|
print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
|
|
|
|
32, 1, buf, sizeof(buf), false);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void dump_user_code(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
/*
|
|
|
|
* Unhandled Exceptions. Kill user task or panic if in kernel space.
|
|
|
|
*/
|
|
|
|
|
2022-04-21 17:35:23 +08:00
|
|
|
void do_unhandled(struct pt_regs *regs)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
|
|
|
__die_if_kernel("Caught unhandled exception - should not happen",
|
2017-12-16 04:00:30 +08:00
|
|
|
regs, SIGKILL);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
/* If in user mode, send SIGILL signal to current process */
|
2017-12-16 04:00:30 +08:00
|
|
|
pr_info_ratelimited("Caught unhandled exception in '%s' "
|
|
|
|
"(pid = %d, pc = %#010lx) - should not happen\n"
|
|
|
|
"\tEXCCAUSE is %ld\n",
|
|
|
|
current->comm, task_pid_nr(current), regs->pc,
|
2022-04-21 17:35:23 +08:00
|
|
|
regs->exccause);
|
2023-05-08 07:18:17 +08:00
|
|
|
dump_user_code(regs);
|
2019-05-23 23:17:27 +08:00
|
|
|
force_sig(SIGILL);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Multi-hit exception. This if fatal!
|
|
|
|
*/
|
|
|
|
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_multihit(struct pt_regs *regs)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
|
|
|
die("Caught multihit exception", regs, SIGKILL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-01-05 08:57:17 +08:00
|
|
|
* IRQ handler.
|
2005-06-24 13:01:16 +08:00
|
|
|
*/
|
|
|
|
|
2015-07-16 15:37:31 +08:00
|
|
|
#if XTENSA_FAKE_NMI
|
|
|
|
|
2015-11-27 21:26:41 +08:00
|
|
|
#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
|
|
|
|
|
|
|
|
#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
|
|
|
|
IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
|
|
|
|
#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
|
|
|
|
#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
|
|
|
|
|
|
|
|
static inline void check_valid_nmi(void)
|
|
|
|
{
|
2018-11-28 08:27:47 +08:00
|
|
|
unsigned intread = xtensa_get_sr(interrupt);
|
|
|
|
unsigned intenable = xtensa_get_sr(intenable);
|
2015-11-27 21:26:41 +08:00
|
|
|
|
|
|
|
BUG_ON(intread & intenable &
|
|
|
|
~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
|
|
|
|
XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
|
|
|
|
BIT(XCHAL_PROFILING_INTERRUPT)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void check_valid_nmi(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2015-07-16 15:37:31 +08:00
|
|
|
irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(unsigned long, nmi_count);
|
|
|
|
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_nmi(struct pt_regs *regs)
|
2015-07-16 15:37:31 +08:00
|
|
|
{
|
2022-04-04 12:29:46 +08:00
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
2015-07-16 15:37:31 +08:00
|
|
|
|
|
|
|
nmi_enter();
|
|
|
|
++*this_cpu_ptr(&nmi_count);
|
2015-11-27 21:26:41 +08:00
|
|
|
check_valid_nmi();
|
2015-07-16 15:37:31 +08:00
|
|
|
xtensa_pmu_irq_handler(0, NULL);
|
|
|
|
nmi_exit();
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_interrupt(struct pt_regs *regs)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
2013-01-05 08:57:17 +08:00
|
|
|
static const unsigned int_level_mask[] = {
|
|
|
|
0,
|
|
|
|
XCHAL_INTLEVEL1_MASK,
|
|
|
|
XCHAL_INTLEVEL2_MASK,
|
|
|
|
XCHAL_INTLEVEL3_MASK,
|
|
|
|
XCHAL_INTLEVEL4_MASK,
|
|
|
|
XCHAL_INTLEVEL5_MASK,
|
|
|
|
XCHAL_INTLEVEL6_MASK,
|
|
|
|
XCHAL_INTLEVEL7_MASK,
|
|
|
|
};
|
2022-04-04 12:29:46 +08:00
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
2021-07-09 19:13:23 +08:00
|
|
|
unsigned unhandled = ~0u;
|
2013-10-17 06:42:23 +08:00
|
|
|
|
|
|
|
irq_enter();
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2013-01-05 08:57:17 +08:00
|
|
|
for (;;) {
|
2018-11-28 08:27:47 +08:00
|
|
|
unsigned intread = xtensa_get_sr(interrupt);
|
|
|
|
unsigned intenable = xtensa_get_sr(intenable);
|
2013-03-26 06:51:43 +08:00
|
|
|
unsigned int_at_level = intread & intenable;
|
|
|
|
unsigned level;
|
|
|
|
|
|
|
|
for (level = LOCKLEVEL; level > 0; --level) {
|
|
|
|
if (int_at_level & int_level_mask[level]) {
|
|
|
|
int_at_level &= int_level_mask[level];
|
2021-07-09 19:13:23 +08:00
|
|
|
if (int_at_level & unhandled)
|
|
|
|
int_at_level &= unhandled;
|
|
|
|
else
|
|
|
|
unhandled |= int_level_mask[level];
|
2013-03-26 06:51:43 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-01-05 08:57:17 +08:00
|
|
|
|
2013-03-26 06:51:43 +08:00
|
|
|
if (level == 0)
|
2013-10-17 06:42:23 +08:00
|
|
|
break;
|
|
|
|
|
2021-07-09 19:13:23 +08:00
|
|
|
/* clear lowest pending irq in the unhandled mask */
|
|
|
|
unhandled ^= (int_at_level & -int_at_level);
|
2013-10-17 06:42:23 +08:00
|
|
|
do_IRQ(__ffs(int_at_level), regs);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
2013-10-17 06:42:23 +08:00
|
|
|
|
|
|
|
irq_exit();
|
|
|
|
set_irq_regs(old_regs);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
2022-05-13 23:11:14 +08:00
|
|
|
static bool check_div0(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
static const u8 pattern[] = {'D', 'I', 'V', '0'};
|
|
|
|
const u8 *p;
|
|
|
|
u8 buf[5];
|
|
|
|
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
|
2022-05-19 07:09:53 +08:00
|
|
|
return false;
|
2022-05-13 23:11:14 +08:00
|
|
|
p = buf;
|
|
|
|
} else {
|
|
|
|
p = (const u8 *)regs->pc + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return memcmp(p, pattern, sizeof(pattern)) == 0 ||
|
|
|
|
memcmp(p + 1, pattern, sizeof(pattern)) == 0;
|
|
|
|
}
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
/*
|
|
|
|
* Illegal instruction. Fatal if in kernel space.
|
|
|
|
*/
|
|
|
|
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_illegal_instruction(struct pt_regs *regs)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
2022-05-13 23:11:14 +08:00
|
|
|
#ifdef CONFIG_USER_ABI_CALL0_PROBE
|
|
|
|
/*
|
|
|
|
* When call0 application encounters an illegal instruction fast
|
|
|
|
* exception handler will attempt to set PS.WOE and retry failing
|
|
|
|
* instruction.
|
|
|
|
* If we get here we know that that instruction is also illegal
|
|
|
|
* with PS.WOE set, so it's not related to the windowed option
|
|
|
|
* hence PS.WOE may be cleared.
|
|
|
|
*/
|
|
|
|
if (regs->pc == current_thread_info()->ps_woe_fix_addr)
|
|
|
|
regs->ps &= ~PS_WOE_MASK;
|
|
|
|
#endif
|
2022-05-13 23:11:14 +08:00
|
|
|
if (check_div0(regs)) {
|
|
|
|
do_div0(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
|
|
|
|
|
|
|
|
/* If in user mode, send SIGILL signal to current process. */
|
|
|
|
|
2017-12-16 04:00:30 +08:00
|
|
|
pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
|
|
|
|
current->comm, task_pid_nr(current), regs->pc);
|
2019-05-23 23:17:27 +08:00
|
|
|
force_sig(SIGILL);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
2022-05-13 18:40:54 +08:00
|
|
|
static void do_div0(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
__die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
|
|
|
|
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
|
|
|
|
}
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2023-06-14 07:51:18 +08:00
|
|
|
#ifdef CONFIG_XTENSA_LOAD_STORE
|
|
|
|
static void do_load_store(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
__die_if_kernel("Unhandled load/store exception in kernel",
|
|
|
|
regs, SIGKILL);
|
|
|
|
|
|
|
|
pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n",
|
|
|
|
regs->excvaddr, current->comm,
|
|
|
|
task_pid_nr(current), regs->pc);
|
|
|
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
/*
|
|
|
|
* Handle unaligned memory accesses from user space. Kill task.
|
|
|
|
*
|
|
|
|
* If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
|
|
|
|
* accesses causes from user space.
|
|
|
|
*/
|
|
|
|
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_unaligned_user(struct pt_regs *regs)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
|
|
|
__die_if_kernel("Unhandled unaligned exception in kernel",
|
2017-12-16 04:00:30 +08:00
|
|
|
regs, SIGKILL);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2017-12-16 04:00:30 +08:00
|
|
|
pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
|
|
|
|
"(pid = %d, pc = %#010lx)\n",
|
|
|
|
regs->excvaddr, current->comm,
|
|
|
|
task_pid_nr(current), regs->pc);
|
2019-05-24 00:04:24 +08:00
|
|
|
force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
2022-04-15 18:05:31 +08:00
|
|
|
#if XTENSA_HAVE_COPROCESSORS
|
|
|
|
static void do_coprocessor(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
coprocessor_flush_release_all(current_thread_info());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-01-24 15:32:10 +08:00
|
|
|
/* Handle debug events.
|
|
|
|
* When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
|
|
|
|
* preemption disabled to avoid rescheduling and keep mapping of hardware
|
|
|
|
* breakpoint structures to debug registers intact, so that
|
|
|
|
* DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
|
|
|
|
*/
|
2022-04-15 17:47:04 +08:00
|
|
|
static void do_debug(struct pt_regs *regs)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
2016-01-24 15:32:10 +08:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
int ret = check_hw_breakpoint(regs);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
if (ret == 0)
|
|
|
|
return;
|
|
|
|
#endif
|
2005-06-24 13:01:16 +08:00
|
|
|
__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
|
|
|
|
|
|
|
|
/* If in user mode, send SIGTRAP signal to current process */
|
|
|
|
|
2019-05-23 23:17:27 +08:00
|
|
|
force_sig(SIGTRAP);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-12-16 08:08:16 +08:00
|
|
|
#define set_handler(type, cause, handler) \
|
|
|
|
do { \
|
|
|
|
unsigned int cpu; \
|
|
|
|
\
|
|
|
|
for_each_possible_cpu(cpu) \
|
|
|
|
per_cpu(exc_table, cpu).type[cause] = (handler);\
|
|
|
|
} while (0)
|
2013-10-17 06:42:26 +08:00
|
|
|
|
2012-11-19 12:30:15 +08:00
|
|
|
/* Set exception C handler - for temporary use when probing exceptions */
|
|
|
|
|
2022-04-21 17:35:23 +08:00
|
|
|
xtensa_exception_handler *
|
|
|
|
__init trap_set_handler(int cause, xtensa_exception_handler *handler)
|
2012-11-19 12:30:15 +08:00
|
|
|
{
|
2017-12-16 08:08:16 +08:00
|
|
|
void *previous = per_cpu(exc_table, 0).default_handler[cause];
|
|
|
|
|
|
|
|
set_handler(default_handler, cause, handler);
|
2012-11-19 12:30:15 +08:00
|
|
|
return previous;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-10-17 06:42:28 +08:00
|
|
|
static void trap_init_excsave(void)
|
2013-10-17 06:42:26 +08:00
|
|
|
{
|
2022-04-15 17:59:33 +08:00
|
|
|
xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
|
2013-10-17 06:42:26 +08:00
|
|
|
}
|
|
|
|
|
2016-03-07 06:36:33 +08:00
|
|
|
static void trap_init_debug(void)
|
|
|
|
{
|
|
|
|
unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
|
|
|
|
|
|
|
|
this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
|
|
|
|
__asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
|
|
|
|
:: "a"(debugsave));
|
|
|
|
}
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
/*
|
|
|
|
* Initialize dispatch tables.
|
|
|
|
*
|
|
|
|
* The exception vectors are stored compressed the __init section in the
|
|
|
|
* dispatch_init_table. This function initializes the following three tables
|
|
|
|
* from that compressed table:
|
|
|
|
* - fast user first dispatch table for user exceptions
|
|
|
|
* - fast kernel first dispatch table for kernel exceptions
|
|
|
|
* - default C-handler C-handler called by the default fast handler.
|
|
|
|
*
|
|
|
|
* See vectors.S for more details.
|
|
|
|
*/
|
|
|
|
|
2007-08-04 06:54:36 +08:00
|
|
|
void __init trap_init(void)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Setup default vectors. */
|
|
|
|
|
2017-12-16 08:08:16 +08:00
|
|
|
for (i = 0; i < EXCCAUSE_N; i++) {
|
|
|
|
set_handler(fast_user_handler, i, user_exception);
|
|
|
|
set_handler(fast_kernel_handler, i, kernel_exception);
|
|
|
|
set_handler(default_handler, i, do_unhandled);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup specific handlers. */
|
|
|
|
|
|
|
|
for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
|
|
|
|
int fast = dispatch_init_table[i].fast;
|
|
|
|
int cause = dispatch_init_table[i].cause;
|
|
|
|
void *handler = dispatch_init_table[i].handler;
|
|
|
|
|
|
|
|
if (fast == 0)
|
2017-12-16 08:08:16 +08:00
|
|
|
set_handler(default_handler, cause, handler);
|
2019-01-02 18:19:30 +08:00
|
|
|
if ((fast & USER) != 0)
|
2017-12-16 08:08:16 +08:00
|
|
|
set_handler(fast_user_handler, cause, handler);
|
2019-01-02 18:19:30 +08:00
|
|
|
if ((fast & KRNL) != 0)
|
2017-12-16 08:08:16 +08:00
|
|
|
set_handler(fast_kernel_handler, cause, handler);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
|
2013-10-17 06:42:26 +08:00
|
|
|
trap_init_excsave();
|
2016-03-07 06:36:33 +08:00
|
|
|
trap_init_debug();
|
2013-10-17 06:42:26 +08:00
|
|
|
}
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2013-10-17 06:42:26 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2013-10-17 06:42:28 +08:00
|
|
|
void secondary_trap_init(void)
|
2013-10-17 06:42:26 +08:00
|
|
|
{
|
|
|
|
trap_init_excsave();
|
2016-03-07 06:36:33 +08:00
|
|
|
trap_init_debug();
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
2013-10-17 06:42:26 +08:00
|
|
|
#endif
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This function dumps the current valid window frame and other base registers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void show_regs(struct pt_regs * regs)
|
|
|
|
{
|
2021-08-23 12:28:01 +08:00
|
|
|
int i;
|
2005-06-24 13:01:16 +08:00
|
|
|
|
dump_stack: unify debug information printed by show_regs()
show_regs() is inherently arch-dependent but it does make sense to print
generic debug information and some archs already do albeit in slightly
different forms. This patch introduces a generic function to print debug
information from show_regs() so that different archs print out the same
information and it's much easier to modify what's printed.
show_regs_print_info() prints out the same debug info as dump_stack()
does plus task and thread_info pointers.
* Archs which didn't print debug info now do.
alpha, arc, blackfin, c6x, cris, frv, h8300, hexagon, ia64, m32r,
metag, microblaze, mn10300, openrisc, parisc, score, sh64, sparc,
um, xtensa
* Already prints debug info. Replaced with show_regs_print_info().
The printed information is superset of what used to be there.
arm, arm64, avr32, mips, powerpc, sh32, tile, unicore32, x86
* s390 is special in that it used to print arch-specific information
along with generic debug info. Heiko and Martin think that the
arch-specific extra isn't worth keeping s390 specfic implementation.
Converted to use the generic version.
Note that now all archs print the debug info before actual register
dumps.
An example BUG() dump follows.
kernel BUG at /work/os/work/kernel/workqueue.c:4841!
invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Modules linked in:
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.9.0-rc1-work+ #7
Hardware name: empty empty/S3992, BIOS 080011 10/26/2007
task: ffff88007c85e040 ti: ffff88007c860000 task.ti: ffff88007c860000
RIP: 0010:[<ffffffff8234a07e>] [<ffffffff8234a07e>] init_workqueues+0x4/0x6
RSP: 0000:ffff88007c861ec8 EFLAGS: 00010246
RAX: ffff88007c861fd8 RBX: ffffffff824466a8 RCX: 0000000000000001
RDX: 0000000000000046 RSI: 0000000000000001 RDI: ffffffff8234a07a
RBP: ffff88007c861ec8 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: ffffffff8234a07a
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
FS: 0000000000000000(0000) GS:ffff88007dc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffff88015f7ff000 CR3: 00000000021f1000 CR4: 00000000000007f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Stack:
ffff88007c861ef8 ffffffff81000312 ffffffff824466a8 ffff88007c85e650
0000000000000003 0000000000000000 ffff88007c861f38 ffffffff82335e5d
ffff88007c862080 ffffffff8223d8c0 ffff88007c862080 ffffffff81c47760
Call Trace:
[<ffffffff81000312>] do_one_initcall+0x122/0x170
[<ffffffff82335e5d>] kernel_init_freeable+0x9b/0x1c8
[<ffffffff81c47760>] ? rest_init+0x140/0x140
[<ffffffff81c4776e>] kernel_init+0xe/0xf0
[<ffffffff81c6be9c>] ret_from_fork+0x7c/0xb0
[<ffffffff81c47760>] ? rest_init+0x140/0x140
...
v2: Typo fix in x86-32.
v3: CPU number dropped from show_regs_print_info() as
dump_stack_print_info() has been updated to print it. s390
specific implementation dropped as requested by s390 maintainers.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Fengguang Wu <fengguang.wu@intel.com>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Chris Metcalf <cmetcalf@tilera.com> [tile bits]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon bits]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 06:27:17 +08:00
|
|
|
show_regs_print_info(KERN_DEFAULT);
|
|
|
|
|
2008-02-13 03:55:32 +08:00
|
|
|
for (i = 0; i < 16; i++) {
|
2005-06-24 13:01:16 +08:00
|
|
|
if ((i % 8) == 0)
|
2016-11-05 05:45:08 +08:00
|
|
|
pr_info("a%02d:", i);
|
|
|
|
pr_cont(" %08lx", regs->areg[i]);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
2016-11-05 05:45:08 +08:00
|
|
|
pr_cont("\n");
|
|
|
|
pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
|
|
|
|
regs->pc, regs->ps, regs->depc, regs->excvaddr);
|
|
|
|
pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
|
|
|
|
regs->lbeg, regs->lend, regs->lcount, regs->sar);
|
2005-06-24 13:01:16 +08:00
|
|
|
if (user_mode(regs))
|
2016-11-05 05:45:08 +08:00
|
|
|
pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
|
|
|
|
regs->windowbase, regs->windowstart, regs->wmask,
|
|
|
|
regs->syscall);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
2013-04-15 13:20:48 +08:00
|
|
|
static int show_trace_cb(struct stackframe *frame, void *data)
|
2009-05-11 21:43:33 +08:00
|
|
|
{
|
2020-06-09 12:32:04 +08:00
|
|
|
const char *loglvl = data;
|
|
|
|
|
2017-04-01 06:58:40 +08:00
|
|
|
if (kernel_text_address(frame->pc))
|
2020-06-09 12:32:04 +08:00
|
|
|
printk("%s [<%08lx>] %pB\n",
|
|
|
|
loglvl, frame->pc, (void *)frame->pc);
|
2013-04-15 13:20:48 +08:00
|
|
|
return 0;
|
2009-05-11 21:43:33 +08:00
|
|
|
}
|
|
|
|
|
2020-06-09 12:32:04 +08:00
|
|
|
static void show_trace(struct task_struct *task, unsigned long *sp,
|
|
|
|
const char *loglvl)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
2013-04-15 13:20:48 +08:00
|
|
|
if (!sp)
|
|
|
|
sp = stack_pointer(task);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2020-06-09 12:32:04 +08:00
|
|
|
printk("%sCall Trace:\n", loglvl);
|
|
|
|
walk_stackframe(sp, show_trace_cb, (void *)loglvl);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
2019-11-13 00:47:48 +08:00
|
|
|
#define STACK_DUMP_ENTRY_SIZE 4
|
2023-03-18 07:18:07 +08:00
|
|
|
#define STACK_DUMP_LINE_SIZE 16
|
2019-11-13 00:43:25 +08:00
|
|
|
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2023-03-18 07:18:07 +08:00
|
|
|
struct stack_fragment
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
2023-03-18 07:18:07 +08:00
|
|
|
size_t len;
|
|
|
|
size_t off;
|
|
|
|
u8 *sp;
|
|
|
|
const char *loglvl;
|
|
|
|
};
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2023-03-18 07:18:07 +08:00
|
|
|
static int show_stack_fragment_cb(struct stackframe *frame, void *data)
|
|
|
|
{
|
|
|
|
struct stack_fragment *sf = data;
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2023-03-18 07:18:07 +08:00
|
|
|
while (sf->off < sf->len) {
|
2023-03-17 14:00:21 +08:00
|
|
|
u8 line[STACK_DUMP_LINE_SIZE];
|
2023-03-18 07:18:07 +08:00
|
|
|
size_t line_len = sf->len - sf->off > STACK_DUMP_LINE_SIZE ?
|
|
|
|
STACK_DUMP_LINE_SIZE : sf->len - sf->off;
|
|
|
|
bool arrow = sf->off == 0;
|
2023-03-17 14:00:21 +08:00
|
|
|
|
2023-03-18 07:18:07 +08:00
|
|
|
if (frame && frame->sp == (unsigned long)(sf->sp + sf->off))
|
|
|
|
arrow = true;
|
|
|
|
|
|
|
|
__memcpy(line, sf->sp + sf->off, line_len);
|
|
|
|
print_hex_dump(sf->loglvl, arrow ? "> " : " ", DUMP_PREFIX_NONE,
|
2023-03-17 14:00:21 +08:00
|
|
|
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
|
|
|
|
line, line_len, false);
|
2023-03-18 07:18:07 +08:00
|
|
|
sf->off += STACK_DUMP_LINE_SIZE;
|
|
|
|
if (arrow)
|
|
|
|
return 0;
|
2023-03-17 14:00:21 +08:00
|
|
|
}
|
2023-03-18 07:18:07 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
|
|
|
|
{
|
|
|
|
struct stack_fragment sf;
|
|
|
|
|
|
|
|
if (!sp)
|
|
|
|
sp = stack_pointer(task);
|
|
|
|
|
|
|
|
sf.len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
|
|
|
|
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
|
|
|
|
sf.off = 0;
|
|
|
|
sf.sp = (u8 *)sp;
|
|
|
|
sf.loglvl = loglvl;
|
|
|
|
|
|
|
|
printk("%sStack:\n", loglvl);
|
|
|
|
walk_stackframe(sp, show_stack_fragment_cb, &sf);
|
|
|
|
while (sf.off < sf.len)
|
|
|
|
show_stack_fragment_cb(NULL, &sf);
|
2020-06-09 12:32:07 +08:00
|
|
|
show_trace(task, sp, loglvl);
|
|
|
|
}
|
|
|
|
|
2006-06-27 17:53:55 +08:00
|
|
|
DEFINE_SPINLOCK(die_lock);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2021-10-21 01:43:48 +08:00
|
|
|
void __noreturn die(const char * str, struct pt_regs * regs, long err)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
|
|
|
static int die_counter;
|
2019-10-16 03:18:07 +08:00
|
|
|
const char *pr = "";
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PREEMPTION))
|
|
|
|
pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
console_verbose();
|
|
|
|
spin_lock_irq(&die_lock);
|
|
|
|
|
2019-10-16 03:18:07 +08:00
|
|
|
pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
|
2005-06-24 13:01:16 +08:00
|
|
|
show_regs(regs);
|
|
|
|
if (!user_mode(regs))
|
2020-06-09 12:32:29 +08:00
|
|
|
show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2013-01-21 14:47:39 +08:00
|
|
|
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
2005-06-24 13:01:16 +08:00
|
|
|
spin_unlock_irq(&die_lock);
|
|
|
|
|
|
|
|
if (in_interrupt())
|
|
|
|
panic("Fatal exception in interrupt");
|
|
|
|
|
2006-07-30 18:03:34 +08:00
|
|
|
if (panic_on_oops)
|
2006-08-14 14:24:22 +08:00
|
|
|
panic("Fatal exception");
|
2006-07-30 18:03:34 +08:00
|
|
|
|
2021-06-29 03:52:01 +08:00
|
|
|
make_task_dead(err);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|