mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
powerpc: make stack walking KASAN-safe
Make our stack-walking code KASAN-safe by using __no_sanitize_address. Generic code, arm64, s390 and x86 all make accesses unchecked for similar sorts of reasons: when unwinding a stack, we might touch memory that KASAN has marked as being out-of-bounds. In ppc64 KASAN development, I hit this sometimes when checking for an exception frame - because we're checking an arbitrary offset into the stack frame. See commit2095574632
("s390/kasan: avoid false positives during stack unwind"), commitbcaf669b4b
("arm64: disable kasan when accessing frame->fp in unwind_frame"), commit91e08ab0c8
("x86/dumpstack: Prevent KASAN false positive warnings") and commit6e22c83664
("tracing, kasan: Silence Kasan warning in check_stack of stack_tracer"). Signed-off-by: Daniel Axtens <dja@axtens.net> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210614120907.1952321-1-dja@axtens.net
This commit is contained in:
parent
d81090ed44
commit
b112fb913b
@ -2133,8 +2133,9 @@ unsigned long get_wchan(struct task_struct *p)
|
||||
|
||||
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
|
||||
|
||||
void show_stack(struct task_struct *tsk, unsigned long *stack,
|
||||
const char *loglvl)
|
||||
void __no_sanitize_address show_stack(struct task_struct *tsk,
|
||||
unsigned long *stack,
|
||||
const char *loglvl)
|
||||
{
|
||||
unsigned long sp, ip, lr, newsp;
|
||||
int count = 0;
|
||||
|
@ -23,8 +23,8 @@
|
||||
|
||||
#include <asm/paca.h>
|
||||
|
||||
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||
struct task_struct *task, struct pt_regs *regs)
|
||||
void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||
struct task_struct *task, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
@ -61,8 +61,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||
*
|
||||
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
||||
*/
|
||||
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
void *cookie, struct task_struct *task)
|
||||
int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
void *cookie, struct task_struct *task)
|
||||
{
|
||||
unsigned long sp;
|
||||
unsigned long newsp;
|
||||
|
@ -40,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
void __no_sanitize_address
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
|
Loading…
Reference in New Issue
Block a user