2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Stack tracing support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/export.h>
|
2015-12-15 16:33:41 +08:00
|
|
|
#include <linux/ftrace.h>
|
2019-07-25 16:16:05 +08:00
|
|
|
#include <linux/kprobes.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/sched.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/debug.h>
|
2017-02-09 01:51:37 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
|
2015-12-04 19:02:26 +08:00
|
|
|
#include <asm/irq.h>
|
2016-11-04 04:23:05 +08:00
|
|
|
#include <asm/stack_pointer.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AArch64 PCS assigns the frame pointer to x29.
|
|
|
|
*
|
|
|
|
* A simple function prologue looks like this:
|
|
|
|
* sub sp, sp, #0x10
|
|
|
|
* stp x29, x30, [sp]
|
|
|
|
* mov x29, sp
|
|
|
|
*
|
|
|
|
* A simple function epilogue looks like this:
|
|
|
|
* mov sp, x29
|
|
|
|
* ldp x29, x30, [sp]
|
|
|
|
* add sp, sp, #0x10
|
|
|
|
*/
|
2019-07-02 21:07:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unwind from one frame record (A) to the next frame record (B).
|
|
|
|
*
|
|
|
|
* We terminate early if the location of B indicates a malformed chain of frame
|
|
|
|
* records (e.g. a cycle), determined based on the location and fp value of A
|
|
|
|
* and the location (but not the fp value) of B.
|
|
|
|
*/
|
2015-12-15 16:33:40 +08:00
|
|
|
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long fp = frame->fp;
|
2019-07-02 21:07:29 +08:00
|
|
|
struct stack_info info;
|
2017-07-22 19:48:34 +08:00
|
|
|
|
|
|
|
if (fp & 0xf)
|
|
|
|
return -EINVAL;
|
2015-12-04 19:02:26 +08:00
|
|
|
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
if (!tsk)
|
|
|
|
tsk = current;
|
|
|
|
|
2019-07-02 21:07:29 +08:00
|
|
|
if (!on_accessible_stack(tsk, fp, &info))
|
2012-03-05 19:49:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 21:07:29 +08:00
|
|
|
if (test_bit(info.type, frame->stacks_done))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As stacks grow downward, any valid record on the same stack must be
|
|
|
|
* at a strictly higher address than the prior record.
|
|
|
|
*
|
|
|
|
* Stacks can nest in several valid orders, e.g.
|
|
|
|
*
|
|
|
|
* TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
|
|
|
|
* TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
|
|
|
|
*
|
|
|
|
* ... but the nesting itself is strict. Once we transition from one
|
|
|
|
* stack to another, it's never valid to unwind back to that first
|
|
|
|
* stack.
|
|
|
|
*/
|
|
|
|
if (info.type == frame->prev_type) {
|
|
|
|
if (fp <= frame->prev_fp)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
set_bit(frame->prev_type, frame->stacks_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record this frame record's values and location. The prev_fp and
|
|
|
|
* prev_type are only meaningful to the next unwind_frame() invocation.
|
|
|
|
*/
|
2016-02-09 01:13:09 +08:00
|
|
|
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
|
|
|
|
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
|
2019-07-02 21:07:29 +08:00
|
|
|
frame->prev_fp = fp;
|
|
|
|
frame->prev_type = info.type;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-12-15 16:33:41 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
if (tsk->ret_stack &&
|
2015-12-15 16:33:41 +08:00
|
|
|
(frame->pc == (unsigned long)return_to_handler)) {
|
2018-12-08 02:13:28 +08:00
|
|
|
struct ftrace_ret_stack *ret_stack;
|
2015-12-15 16:33:41 +08:00
|
|
|
/*
|
|
|
|
* This is a case where function graph tracer has
|
|
|
|
* modified a return address (LR) in a stack frame
|
|
|
|
* to hook a function return.
|
|
|
|
* So replace it to an original value.
|
|
|
|
*/
|
2018-12-08 02:13:28 +08:00
|
|
|
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
|
|
|
if (WARN_ON_ONCE(!ret_stack))
|
|
|
|
return -EINVAL;
|
|
|
|
frame->pc = ret_stack->ret;
|
2015-12-15 16:33:41 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
|
2015-12-04 19:02:26 +08:00
|
|
|
/*
|
arm64: unwind: reference pt_regs via embedded stack frame
As it turns out, the unwind code is slightly broken, and probably has
been for a while. The problem is in the dumping of the exception stack,
which is intended to dump the contents of the pt_regs struct at each
level in the call stack where an exception was taken and routed to a
routine marked as __exception (which means its stack frame is right
below the pt_regs struct on the stack).
'Right below the pt_regs struct' is ill defined, though: the unwind
code assigns 'frame pointer + 0x10' to the .sp member of the stackframe
struct at each level, and dump_backtrace() happily dereferences that as
the pt_regs pointer when encountering an __exception routine. However,
the actual size of the stack frame created by this routine (which could
be one of many __exception routines we have in the kernel) is not known,
and so frame.sp is pretty useless to figure out where struct pt_regs
really is.
So it seems the only way to ensure that we can find our struct pt_regs
when walking the stack frames is to put it at a known fixed offset of
the stack frame pointer that is passed to such __exception routines.
The simplest way to do that is to put it inside pt_regs itself, which is
the main change implemented by this patch. As a bonus, doing this allows
us to get rid of a fair amount of cruft related to walking from one stack
to the other, which is especially nice since we intend to introduce yet
another stack for overflow handling once we add support for vmapped
stacks. It also fixes an inconsistency where we only add a stack frame
pointing to ELR_EL1 if we are executing from the IRQ stack but not when
we are executing from the task stack.
To consistly identify exceptions regs even in the presence of exceptions
taken from entry code, we must check whether the next frame was created
by entry text, rather than whether the current frame was crated by
exception text.
To avoid backtracing using PCs that fall in the idmap, or are controlled
by userspace, we must explcitly zero the FP and LR in startup paths, and
must ensure that the frame embedded in pt_regs is zeroed upon entry from
EL0. To avoid these NULL entries showin in the backtrace, unwind_frame()
is updated to avoid them.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[Mark: compare current frame against .entry.text, avoid bogus PCs]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
2017-07-23 01:45:33 +08:00
|
|
|
* Frames created upon entry from EL0 have NULL FP and PC values, so
|
|
|
|
* don't bother reporting these. Frames created by __noreturn functions
|
|
|
|
* might have a valid FP even if PC is bogus, so only terminate where
|
|
|
|
* both are NULL.
|
2015-12-04 19:02:26 +08:00
|
|
|
*/
|
arm64: unwind: reference pt_regs via embedded stack frame
As it turns out, the unwind code is slightly broken, and probably has
been for a while. The problem is in the dumping of the exception stack,
which is intended to dump the contents of the pt_regs struct at each
level in the call stack where an exception was taken and routed to a
routine marked as __exception (which means its stack frame is right
below the pt_regs struct on the stack).
'Right below the pt_regs struct' is ill defined, though: the unwind
code assigns 'frame pointer + 0x10' to the .sp member of the stackframe
struct at each level, and dump_backtrace() happily dereferences that as
the pt_regs pointer when encountering an __exception routine. However,
the actual size of the stack frame created by this routine (which could
be one of many __exception routines we have in the kernel) is not known,
and so frame.sp is pretty useless to figure out where struct pt_regs
really is.
So it seems the only way to ensure that we can find our struct pt_regs
when walking the stack frames is to put it at a known fixed offset of
the stack frame pointer that is passed to such __exception routines.
The simplest way to do that is to put it inside pt_regs itself, which is
the main change implemented by this patch. As a bonus, doing this allows
us to get rid of a fair amount of cruft related to walking from one stack
to the other, which is especially nice since we intend to introduce yet
another stack for overflow handling once we add support for vmapped
stacks. It also fixes an inconsistency where we only add a stack frame
pointing to ELR_EL1 if we are executing from the IRQ stack but not when
we are executing from the task stack.
To consistly identify exceptions regs even in the presence of exceptions
taken from entry code, we must check whether the next frame was created
by entry text, rather than whether the current frame was crated by
exception text.
To avoid backtracing using PCs that fall in the idmap, or are controlled
by userspace, we must explcitly zero the FP and LR in startup paths, and
must ensure that the frame embedded in pt_regs is zeroed upon entry from
EL0. To avoid these NULL entries showin in the backtrace, unwind_frame()
is updated to avoid them.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[Mark: compare current frame against .entry.text, avoid bogus PCs]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
2017-07-23 01:45:33 +08:00
|
|
|
if (!frame->fp && !frame->pc)
|
|
|
|
return -EINVAL;
|
2015-12-04 19:02:26 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2019-07-25 16:16:05 +08:00
|
|
|
NOKPROBE_SYMBOL(unwind_frame);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-12-15 16:33:40 +08:00
|
|
|
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
2012-03-05 19:49:27 +08:00
|
|
|
int (*fn)(struct stackframe *, void *), void *data)
|
|
|
|
{
|
|
|
|
while (1) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (fn(frame, data))
|
|
|
|
break;
|
2015-12-15 16:33:40 +08:00
|
|
|
ret = unwind_frame(tsk, frame);
|
2012-03-05 19:49:27 +08:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 16:16:05 +08:00
|
|
|
NOKPROBE_SYMBOL(walk_stackframe);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
|
struct stack_trace_data {
|
|
|
|
struct stack_trace *trace;
|
|
|
|
unsigned int no_sched_functions;
|
|
|
|
unsigned int skip;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int save_trace(struct stackframe *frame, void *d)
|
|
|
|
{
|
|
|
|
struct stack_trace_data *data = d;
|
|
|
|
struct stack_trace *trace = data->trace;
|
|
|
|
unsigned long addr = frame->pc;
|
|
|
|
|
|
|
|
if (data->no_sched_functions && in_sched_functions(addr))
|
|
|
|
return 0;
|
|
|
|
if (data->skip) {
|
|
|
|
data->skip--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace->entries[trace->nr_entries++] = addr;
|
|
|
|
|
|
|
|
return trace->nr_entries >= trace->max_entries;
|
|
|
|
}
|
|
|
|
|
2016-09-05 10:33:16 +08:00
|
|
|
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
struct stack_trace_data data;
|
|
|
|
struct stackframe frame;
|
|
|
|
|
|
|
|
data.trace = trace;
|
|
|
|
data.skip = trace->skip;
|
|
|
|
data.no_sched_functions = 0;
|
|
|
|
|
2019-07-02 21:07:28 +08:00
|
|
|
start_backtrace(&frame, regs->regs[29], regs->pc);
|
2016-09-05 10:33:16 +08:00
|
|
|
walk_stackframe(current, &frame, save_trace, &data);
|
|
|
|
}
|
2019-03-02 04:00:41 +08:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
|
2016-09-05 10:33:16 +08:00
|
|
|
|
2017-09-14 07:28:32 +08:00
|
|
|
static noinline void __save_stack_trace(struct task_struct *tsk,
|
|
|
|
struct stack_trace *trace, unsigned int nosched)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
struct stack_trace_data data;
|
|
|
|
struct stackframe frame;
|
|
|
|
|
2016-11-04 04:23:08 +08:00
|
|
|
if (!try_get_task_stack(tsk))
|
|
|
|
return;
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
data.trace = trace;
|
|
|
|
data.skip = trace->skip;
|
2017-09-14 07:28:32 +08:00
|
|
|
data.no_sched_functions = nosched;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
if (tsk != current) {
|
2019-07-02 21:07:28 +08:00
|
|
|
start_backtrace(&frame, thread_saved_fp(tsk),
|
|
|
|
thread_saved_pc(tsk));
|
2012-03-05 19:49:27 +08:00
|
|
|
} else {
|
2017-09-14 07:28:32 +08:00
|
|
|
/* We don't want this function nor the caller */
|
|
|
|
data.skip += 2;
|
2019-07-02 21:07:28 +08:00
|
|
|
start_backtrace(&frame,
|
|
|
|
(unsigned long)__builtin_frame_address(0),
|
|
|
|
(unsigned long)__save_stack_trace);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-12-15 16:33:40 +08:00
|
|
|
walk_stackframe(tsk, &frame, save_trace, &data);
|
2016-11-04 04:23:08 +08:00
|
|
|
|
|
|
|
put_task_stack(tsk);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2017-06-14 02:40:56 +08:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2017-09-14 07:28:32 +08:00
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
__save_stack_trace(tsk, trace, 1);
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
|
|
{
|
2017-09-14 07:28:32 +08:00
|
|
|
__save_stack_trace(current, trace, 0);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2017-09-14 07:28:32 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
|
#endif
|