mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 09:34:12 +08:00
Merge branch 'tracing/fastboot' into tracing/ftrace
This commit is contained in:
commit
79c81d220c
@ -234,6 +234,11 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { }
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Structure which defines the trace of an initcall.
|
||||
* You don't have to fill the func field since it is
|
||||
* only used internally by the tracer.
|
||||
*/
|
||||
struct boot_trace {
|
||||
pid_t caller;
|
||||
char func[KSYM_NAME_LEN];
|
||||
@ -244,13 +249,28 @@ struct boot_trace {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BOOT_TRACER
|
||||
/* Append the trace on the ring-buffer */
|
||||
extern void trace_boot(struct boot_trace *it, initcall_t fn);
|
||||
|
||||
/* Tells the tracer that smp_pre_initcall is finished.
|
||||
* So we can start the tracing
|
||||
*/
|
||||
extern void start_boot_trace(void);
|
||||
extern void stop_boot_trace(void);
|
||||
|
||||
/* Resume the tracing of other necessary events
|
||||
* such as sched switches
|
||||
*/
|
||||
extern void enable_boot_trace(void);
|
||||
|
||||
/* Suspend this tracing. Actually, only sched_switches tracing have
|
||||
* to be suspended. Initcalls doesn't need it.)
|
||||
*/
|
||||
extern void disable_boot_trace(void);
|
||||
#else
|
||||
static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
|
||||
static inline void start_boot_trace(void) { }
|
||||
static inline void stop_boot_trace(void) { }
|
||||
static inline void enable_boot_trace(void) { }
|
||||
static inline void disable_boot_trace(void) { }
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -711,6 +711,7 @@ int do_one_initcall(initcall_t fn)
|
||||
it.caller = task_pid_nr(current);
|
||||
printk("calling %pF @ %i\n", fn, it.caller);
|
||||
it.calltime = ktime_get();
|
||||
enable_boot_trace();
|
||||
}
|
||||
|
||||
it.result = fn();
|
||||
@ -722,6 +723,7 @@ int do_one_initcall(initcall_t fn)
|
||||
printk("initcall %pF returned %d after %Ld usecs\n", fn,
|
||||
it.result, it.duration);
|
||||
trace_boot(&it, fn);
|
||||
disable_boot_trace();
|
||||
}
|
||||
|
||||
msgbuf[0] = 0;
|
||||
@ -882,7 +884,7 @@ static int __init kernel_init(void * unused)
|
||||
* we're essentially up and running. Get rid of the
|
||||
* initmem segments and start the user-mode stuff..
|
||||
*/
|
||||
stop_boot_trace();
|
||||
|
||||
init_post();
|
||||
return 0;
|
||||
}
|
||||
|
@ -3285,6 +3285,8 @@ __init static int tracer_alloc_buffers(void)
|
||||
|
||||
register_tracer(&nop_trace);
|
||||
#ifdef CONFIG_BOOT_TRACER
|
||||
/* We don't want to launch sched_switch tracer yet */
|
||||
global_trace.ctrl = 0;
|
||||
register_tracer(&boot_tracer);
|
||||
current_trace = &boot_tracer;
|
||||
current_trace->init(&global_trace);
|
||||
|
@ -49,6 +49,7 @@ struct ftrace_entry {
|
||||
unsigned long parent_ip;
|
||||
};
|
||||
extern struct tracer boot_tracer;
|
||||
extern struct tracer sched_switch_trace; /* Used by the boot tracer */
|
||||
|
||||
/*
|
||||
* Context switch trace entry - which task (and prio) we switched from/to:
|
||||
|
@ -13,23 +13,33 @@
|
||||
#include "trace.h"
|
||||
|
||||
static struct trace_array *boot_trace;
|
||||
static int trace_boot_enabled;
|
||||
static bool pre_initcalls_finished;
|
||||
|
||||
|
||||
/* Should be started after do_pre_smp_initcalls() in init/main.c */
|
||||
/* Tells the boot tracer that the pre_smp_initcalls are finished.
|
||||
* So we are ready .
|
||||
* It doesn't enable sched events tracing however.
|
||||
* You have to call enable_boot_trace to do so.
|
||||
*/
|
||||
void start_boot_trace(void)
|
||||
{
|
||||
trace_boot_enabled = 1;
|
||||
pre_initcalls_finished = true;
|
||||
}
|
||||
|
||||
void stop_boot_trace(void)
|
||||
void enable_boot_trace(void)
|
||||
{
|
||||
trace_boot_enabled = 0;
|
||||
if (pre_initcalls_finished)
|
||||
tracing_start_cmdline_record();
|
||||
}
|
||||
|
||||
void reset_boot_trace(struct trace_array *tr)
|
||||
void disable_boot_trace(void)
|
||||
{
|
||||
stop_boot_trace();
|
||||
if (pre_initcalls_finished)
|
||||
tracing_stop_cmdline_record();
|
||||
}
|
||||
|
||||
static void reset_boot_trace(struct trace_array *tr)
|
||||
{
|
||||
sched_switch_trace.reset(tr);
|
||||
}
|
||||
|
||||
static void boot_trace_init(struct trace_array *tr)
|
||||
@ -37,18 +47,18 @@ static void boot_trace_init(struct trace_array *tr)
|
||||
int cpu;
|
||||
boot_trace = tr;
|
||||
|
||||
trace_boot_enabled = 0;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
sched_switch_trace.init(tr);
|
||||
}
|
||||
|
||||
static void boot_trace_ctrl_update(struct trace_array *tr)
|
||||
{
|
||||
if (tr->ctrl)
|
||||
start_boot_trace();
|
||||
enable_boot_trace();
|
||||
else
|
||||
stop_boot_trace();
|
||||
disable_boot_trace();
|
||||
}
|
||||
|
||||
static enum print_line_t initcall_print_line(struct trace_iterator *iter)
|
||||
@ -99,7 +109,7 @@ void trace_boot(struct boot_trace *it, initcall_t fn)
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = boot_trace;
|
||||
|
||||
if (!trace_boot_enabled)
|
||||
if (!pre_initcalls_finished)
|
||||
return;
|
||||
|
||||
/* Get its name now since this function could
|
||||
|
@ -16,7 +16,8 @@
|
||||
|
||||
static struct trace_array *ctx_trace;
|
||||
static int __read_mostly tracer_enabled;
|
||||
static atomic_t sched_ref;
|
||||
static int sched_ref;
|
||||
static DEFINE_MUTEX(sched_register_mutex);
|
||||
|
||||
static void
|
||||
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
int cpu;
|
||||
int pc;
|
||||
|
||||
if (!atomic_read(&sched_ref))
|
||||
if (!sched_ref)
|
||||
return;
|
||||
|
||||
tracing_record_cmdline(prev);
|
||||
@ -123,20 +124,22 @@ static void tracing_sched_unregister(void)
|
||||
|
||||
static void tracing_start_sched_switch(void)
|
||||
{
|
||||
long ref;
|
||||
|
||||
ref = atomic_inc_return(&sched_ref);
|
||||
if (ref == 1)
|
||||
mutex_lock(&sched_register_mutex);
|
||||
if (!(sched_ref++)) {
|
||||
tracer_enabled = 1;
|
||||
tracing_sched_register();
|
||||
}
|
||||
mutex_unlock(&sched_register_mutex);
|
||||
}
|
||||
|
||||
static void tracing_stop_sched_switch(void)
|
||||
{
|
||||
long ref;
|
||||
|
||||
ref = atomic_dec_and_test(&sched_ref);
|
||||
if (ref)
|
||||
mutex_lock(&sched_register_mutex);
|
||||
if (!(--sched_ref)) {
|
||||
tracing_sched_unregister();
|
||||
tracer_enabled = 0;
|
||||
}
|
||||
mutex_unlock(&sched_register_mutex);
|
||||
}
|
||||
|
||||
void tracing_start_cmdline_record(void)
|
||||
@ -153,12 +156,10 @@ static void start_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
sched_switch_reset(tr);
|
||||
tracing_start_cmdline_record();
|
||||
tracer_enabled = 1;
|
||||
}
|
||||
|
||||
static void stop_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
tracer_enabled = 0;
|
||||
tracing_stop_cmdline_record();
|
||||
}
|
||||
|
||||
@ -172,7 +173,7 @@ static void sched_switch_trace_init(struct trace_array *tr)
|
||||
|
||||
static void sched_switch_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
if (tr->ctrl)
|
||||
if (tr->ctrl && sched_ref)
|
||||
stop_sched_trace(tr);
|
||||
}
|
||||
|
||||
@ -185,7 +186,7 @@ static void sched_switch_trace_ctrl_update(struct trace_array *tr)
|
||||
stop_sched_trace(tr);
|
||||
}
|
||||
|
||||
static struct tracer sched_switch_trace __read_mostly =
|
||||
struct tracer sched_switch_trace __read_mostly =
|
||||
{
|
||||
.name = "sched_switch",
|
||||
.init = sched_switch_trace_init,
|
||||
@ -198,14 +199,6 @@ static struct tracer sched_switch_trace __read_mostly =
|
||||
|
||||
__init static int init_sched_switch_trace(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (atomic_read(&sched_ref))
|
||||
ret = tracing_sched_register();
|
||||
if (ret) {
|
||||
pr_info("error registering scheduler trace\n");
|
||||
return ret;
|
||||
}
|
||||
return register_tracer(&sched_switch_trace);
|
||||
}
|
||||
device_initcall(init_sched_switch_trace);
|
||||
|
Loading…
Reference in New Issue
Block a user