mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 16:44:10 +08:00
b01f6d368d
Terminating the last trace entry with ULONG_MAX is a completely pointless exercise and none of the consumers can rely on it because it's inconsistently implemented across architectures. In fact quite some of the callers remove the entry and adjust stack_trace.nr_entries afterwards. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Rich Felker <dalias@libc.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Cc: linux-sh@vger.kernel.org Cc: Simon Horman <horms+renesas@verge.net.au> Link: https://lkml.kernel.org/r/20190410103643.932464393@linutronix.de
87 lines
1.8 KiB
C
87 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* arch/sh/kernel/stacktrace.c
|
|
*
|
|
* Stack trace management functions
|
|
*
|
|
* Copyright (C) 2006 - 2008 Paul Mundt
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/module.h>
|
|
#include <asm/unwinder.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
static int save_stack_stack(void *data, char *name)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Save stack-backtrace addresses into a stack_trace buffer.
|
|
*/
|
|
static void save_stack_address(void *data, unsigned long addr, int reliable)
|
|
{
|
|
struct stack_trace *trace = data;
|
|
|
|
if (!reliable)
|
|
return;
|
|
|
|
if (trace->skip > 0) {
|
|
trace->skip--;
|
|
return;
|
|
}
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = addr;
|
|
}
|
|
|
|
static const struct stacktrace_ops save_stack_ops = {
|
|
.stack = save_stack_stack,
|
|
.address = save_stack_address,
|
|
};
|
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
{
|
|
unsigned long *sp = (unsigned long *)current_stack_pointer;
|
|
|
|
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
|
static void
|
|
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
|
|
{
|
|
struct stack_trace *trace = (struct stack_trace *)data;
|
|
|
|
if (!reliable)
|
|
return;
|
|
|
|
if (in_sched_functions(addr))
|
|
return;
|
|
|
|
if (trace->skip > 0) {
|
|
trace->skip--;
|
|
return;
|
|
}
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = addr;
|
|
}
|
|
|
|
static const struct stacktrace_ops save_stack_ops_nosched = {
|
|
.stack = save_stack_stack,
|
|
.address = save_stack_address_nosched,
|
|
};
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
{
|
|
unsigned long *sp = (unsigned long *)tsk->thread.sp;
|
|
|
|
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|