2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 11:13:58 +08:00

ftrace: add stack tracing

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Ingo Molnar 2008-05-12 21:20:51 +02:00 committed by Thomas Gleixner
parent 57422797dc
commit 86387f7ee5
4 changed files with 99 additions and 18 deletions

View File

@ -93,6 +93,7 @@ static inline void tracer_disable(void)
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
#else
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
# define CALLER_ADDR1 0UL
@ -100,6 +101,7 @@ static inline void tracer_disable(void)
# define CALLER_ADDR3 0UL
# define CALLER_ADDR4 0UL
# define CALLER_ADDR5 0UL
# define CALLER_ADDR6 0UL
#endif
#ifdef CONFIG_IRQSOFF_TRACER

View File

@ -10,6 +10,7 @@ config TRACER_MAX_TRACE
config TRACING
bool
select DEBUG_FS
select STACKTRACE
config FTRACE
bool "Kernel Function Tracer"

View File

@ -28,6 +28,8 @@
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/stacktrace.h>
#include "trace.h"
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
@ -88,6 +90,7 @@ enum trace_type {
TRACE_FN,
TRACE_CTX,
TRACE_WAKE,
TRACE_STACK,
TRACE_SPECIAL,
__TRACE_LAST_TYPE
@ -109,6 +112,7 @@ enum trace_iterator_flags {
TRACE_ITER_HEX = 0x20,
TRACE_ITER_BIN = 0x40,
TRACE_ITER_BLOCK = 0x80,
TRACE_ITER_STACKTRACE = 0x100,
};
#define TRACE_ITER_SYM_MASK \
@ -124,10 +128,11 @@ static const char *trace_options[] = {
"hex",
"bin",
"block",
"stacktrace",
NULL
};
static unsigned trace_flags;
static unsigned trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_STACKTRACE;
static DEFINE_SPINLOCK(ftrace_max_lock);
@ -657,7 +662,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
wake_up (&trace_wait);
wake_up(&trace_wait);
}
void
@ -685,13 +690,39 @@ trace_special(struct trace_array *tr, struct trace_array_cpu *data,
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
wake_up (&trace_wait);
wake_up(&trace_wait);
}
void __trace_stack(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long flags,
int skip)
{
struct trace_entry *entry;
struct stack_trace trace;
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_STACK;
memset(&entry->stack, 0, sizeof(entry->stack));
trace.nr_entries = 0;
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.skip = skip;
trace.entries = entry->stack.caller;
save_stack_trace(&trace);
}
void
tracing_sched_switch_trace(struct trace_array *tr,
struct trace_array_cpu *data,
struct task_struct *prev, struct task_struct *next,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags)
{
struct trace_entry *entry;
@ -706,16 +737,18 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->ctx.prev_state = prev->state;
entry->ctx.next_pid = next->pid;
entry->ctx.next_prio = next->prio;
__trace_stack(tr, data, flags, 4);
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
wake_up (&trace_wait);
wake_up(&trace_wait);
}
void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_array_cpu *data,
struct task_struct *wakee, struct task_struct *curr,
struct task_struct *wakee,
struct task_struct *curr,
unsigned long flags)
{
struct trace_entry *entry;
@ -730,6 +763,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->ctx.prev_state = curr->state;
entry->ctx.next_pid = wakee->pid;
entry->ctx.next_prio = wakee->prio;
__trace_stack(tr, data, flags, 5);
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
@ -1179,6 +1213,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
unsigned long rel_usecs;
char *comm;
int S;
int i;
if (!next_entry)
next_entry = entry;
@ -1197,9 +1232,11 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
abs_usecs % 1000, rel_usecs/1000,
rel_usecs % 1000);
} else {
if (entry->type != TRACE_STACK) {
lat_print_generic(s, entry, cpu);
lat_print_timestamp(s, abs_usecs, rel_usecs);
}
}
switch (entry->type) {
case TRACE_FN:
seq_print_ip_sym(s, entry->fn.ip, sym_flags);
@ -1226,6 +1263,14 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
entry->special.arg2,
entry->special.arg3);
break;
case TRACE_STACK:
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
if (i)
trace_seq_puts(s, " <= ");
seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
}
trace_seq_puts(s, "\n");
break;
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
@ -1241,8 +1286,9 @@ static int print_trace_fmt(struct trace_iterator *iter)
unsigned long long t;
unsigned long secs;
char *comm;
int S;
int ret;
int S;
int i;
entry = iter->ent;
@ -1252,6 +1298,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
usec_rem = do_div(t, 1000000ULL);
secs = (unsigned long)t;
if (entry->type != TRACE_STACK) {
ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
if (!ret)
return 0;
@ -1261,6 +1308,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
if (!ret)
return 0;
}
switch (entry->type) {
case TRACE_FN:
@ -1303,6 +1351,22 @@ static int print_trace_fmt(struct trace_iterator *iter)
if (!ret)
return 0;
break;
case TRACE_STACK:
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
if (i) {
ret = trace_seq_puts(s, " <= ");
if (!ret)
return 0;
}
ret = seq_print_ip_sym(s, entry->stack.caller[i],
sym_flags);
if (!ret)
return 0;
}
ret = trace_seq_puts(s, "\n");
if (!ret)
return 0;
break;
}
return 1;
}
@ -1344,6 +1408,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
return 0;
break;
case TRACE_SPECIAL:
case TRACE_STACK:
ret = trace_seq_printf(s, " %lx %lx %lx\n",
entry->special.arg1,
entry->special.arg2,
@ -1399,6 +1464,7 @@ static int print_hex_fmt(struct trace_iterator *iter)
SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
@ -1433,6 +1499,7 @@ static int print_bin_fmt(struct trace_iterator *iter)
SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
SEQ_PUT_FIELD_RET(s, entry->special.arg1);
SEQ_PUT_FIELD_RET(s, entry->special.arg2);
SEQ_PUT_FIELD_RET(s, entry->special.arg3);

View File

@ -34,6 +34,16 @@ struct special_entry {
unsigned long arg3;
};
/*
* Stack-trace entry:
*/
#define FTRACE_STACK_ENTRIES 5
struct stack_entry {
unsigned long caller[FTRACE_STACK_ENTRIES];
};
/*
* The trace entry - the most basic unit of tracing. This is what
* is printed in the end as a single line in the trace output, such as:
@ -51,6 +61,7 @@ struct trace_entry {
struct ftrace_entry fn;
struct ctx_switch_entry ctx;
struct special_entry special;
struct stack_entry stack;
};
};