mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-03 00:54:09 +08:00
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: tracing: Do not record user stack trace from NMI context tracing: Disable buffer switching when starting or stopping trace tracing: Use same local variable when resetting the ring buffer function-graph: Init curr_ret_stack with ret_stack ring-buffer: Move disabled check into preempt disable section function-graph: Add tracing_thresh support to function_graph tracer tracing: Update the comm field in the right variable in update_max_tr function-graph: Use comment notation for func names of dangling '}' function-graph: Fix unused reference to ftrace_set_func() tracing: Fix warning in s_next of trace file ops tracing: Include irqflags headers from trace clock
This commit is contained in:
commit
8655e7e3dd
@ -84,10 +84,6 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
|
||||
#endif
|
||||
|
||||
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct ftrace_ops *op = ftrace_list;
|
||||
@ -2276,6 +2272,8 @@ __setup("ftrace_filter=", set_ftrace_filter);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
|
||||
static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
|
||||
|
||||
static int __init set_graph_function(char *str)
|
||||
{
|
||||
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
|
||||
@ -3351,6 +3349,7 @@ void ftrace_graph_init_task(struct task_struct *t)
|
||||
{
|
||||
/* Make sure we do not use the parent ret_stack */
|
||||
t->ret_stack = NULL;
|
||||
t->curr_ret_stack = -1;
|
||||
|
||||
if (ftrace_graph_active) {
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
@ -3360,7 +3359,6 @@ void ftrace_graph_init_task(struct task_struct *t)
|
||||
GFP_KERNEL);
|
||||
if (!ret_stack)
|
||||
return;
|
||||
t->curr_ret_stack = -1;
|
||||
atomic_set(&t->tracing_graph_pause, 0);
|
||||
atomic_set(&t->trace_overrun, 0);
|
||||
t->ftrace_timestamp = 0;
|
||||
|
@ -2233,12 +2233,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||
return NULL;
|
||||
|
||||
if (atomic_read(&buffer->record_disabled))
|
||||
return NULL;
|
||||
|
||||
/* If we are tracing schedule, we don't want to recurse */
|
||||
resched = ftrace_preempt_disable();
|
||||
|
||||
if (atomic_read(&buffer->record_disabled))
|
||||
goto out_nocheck;
|
||||
|
||||
if (trace_recursive_lock())
|
||||
goto out_nocheck;
|
||||
|
||||
@ -2470,11 +2470,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||
return -EBUSY;
|
||||
|
||||
if (atomic_read(&buffer->record_disabled))
|
||||
return -EBUSY;
|
||||
|
||||
resched = ftrace_preempt_disable();
|
||||
|
||||
if (atomic_read(&buffer->record_disabled))
|
||||
goto out;
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
|
@ -374,6 +374,21 @@ static int __init set_buf_size(char *str)
|
||||
}
|
||||
__setup("trace_buf_size=", set_buf_size);
|
||||
|
||||
static int __init set_tracing_thresh(char *str)
|
||||
{
|
||||
unsigned long threshhold;
|
||||
int ret;
|
||||
|
||||
if (!str)
|
||||
return 0;
|
||||
ret = strict_strtoul(str, 0, &threshhold);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
tracing_thresh = threshhold * 1000;
|
||||
return 1;
|
||||
}
|
||||
__setup("tracing_thresh=", set_tracing_thresh);
|
||||
|
||||
unsigned long nsecs_to_usecs(unsigned long nsecs)
|
||||
{
|
||||
return nsecs / 1000;
|
||||
@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
||||
static arch_spinlock_t ftrace_max_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
unsigned long __read_mostly tracing_thresh;
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
unsigned long __read_mostly tracing_max_latency;
|
||||
unsigned long __read_mostly tracing_thresh;
|
||||
|
||||
/*
|
||||
* Copy the new maximum trace into the separate maximum-trace
|
||||
@ -592,7 +608,7 @@ static void
|
||||
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
{
|
||||
struct trace_array_cpu *data = tr->data[cpu];
|
||||
struct trace_array_cpu *max_data = tr->data[cpu];
|
||||
struct trace_array_cpu *max_data;
|
||||
|
||||
max_tr.cpu = cpu;
|
||||
max_tr.time_start = data->preempt_timestamp;
|
||||
@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
max_data->critical_start = data->critical_start;
|
||||
max_data->critical_end = data->critical_end;
|
||||
|
||||
memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
|
||||
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
|
||||
max_data->pid = tsk->pid;
|
||||
max_data->uid = task_uid(tsk);
|
||||
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
|
||||
@ -824,10 +840,10 @@ out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
}
|
||||
|
||||
static void __tracing_reset(struct trace_array *tr, int cpu)
|
||||
static void __tracing_reset(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
ftrace_disable_cpu();
|
||||
ring_buffer_reset_cpu(tr->buffer, cpu);
|
||||
ring_buffer_reset_cpu(buffer, cpu);
|
||||
ftrace_enable_cpu();
|
||||
}
|
||||
|
||||
@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
|
||||
|
||||
/* Make sure all commits have finished */
|
||||
synchronize_sched();
|
||||
__tracing_reset(tr, cpu);
|
||||
__tracing_reset(buffer, cpu);
|
||||
|
||||
ring_buffer_record_enable(buffer);
|
||||
}
|
||||
@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
__tracing_reset(tr, cpu);
|
||||
__tracing_reset(buffer, cpu);
|
||||
|
||||
ring_buffer_record_enable(buffer);
|
||||
}
|
||||
@ -934,6 +950,8 @@ void tracing_start(void)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Prevent the buffers from switching */
|
||||
arch_spin_lock(&ftrace_max_lock);
|
||||
|
||||
buffer = global_trace.buffer;
|
||||
if (buffer)
|
||||
@ -943,6 +961,8 @@ void tracing_start(void)
|
||||
if (buffer)
|
||||
ring_buffer_record_enable(buffer);
|
||||
|
||||
arch_spin_unlock(&ftrace_max_lock);
|
||||
|
||||
ftrace_start();
|
||||
out:
|
||||
spin_unlock_irqrestore(&tracing_start_lock, flags);
|
||||
@ -964,6 +984,9 @@ void tracing_stop(void)
|
||||
if (trace_stop_count++)
|
||||
goto out;
|
||||
|
||||
/* Prevent the buffers from switching */
|
||||
arch_spin_lock(&ftrace_max_lock);
|
||||
|
||||
buffer = global_trace.buffer;
|
||||
if (buffer)
|
||||
ring_buffer_record_disable(buffer);
|
||||
@ -972,6 +995,8 @@ void tracing_stop(void)
|
||||
if (buffer)
|
||||
ring_buffer_record_disable(buffer);
|
||||
|
||||
arch_spin_unlock(&ftrace_max_lock);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&tracing_start_lock, flags);
|
||||
}
|
||||
@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* NMIs can not handle page faults, even with fix ups.
|
||||
* The save user stack can (and often does) fault.
|
||||
*/
|
||||
if (unlikely(in_nmi()))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
||||
|
||||
ftrace_enable_cpu();
|
||||
|
||||
iter->leftover = 0;
|
||||
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
|
||||
;
|
||||
|
||||
@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void)
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
trace_create_file("tracing_max_latency", 0644, d_tracer,
|
||||
&tracing_max_latency, &tracing_max_lat_fops);
|
||||
#endif
|
||||
|
||||
trace_create_file("tracing_thresh", 0644, d_tracer,
|
||||
&tracing_thresh, &tracing_max_lat_fops);
|
||||
#endif
|
||||
|
||||
trace_create_file("README", 0444, d_tracer,
|
||||
NULL, &tracing_readme_fops);
|
||||
|
@ -396,9 +396,10 @@ extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
|
||||
|
||||
extern unsigned long nsecs_to_usecs(unsigned long nsecs);
|
||||
|
||||
extern unsigned long tracing_thresh;
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
extern unsigned long tracing_max_latency;
|
||||
extern unsigned long tracing_thresh;
|
||||
|
||||
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
|
||||
void update_max_tr_single(struct trace_array *tr,
|
||||
|
@ -13,6 +13,7 @@
|
||||
* Tracer plugins will chose a default from these clocks.
|
||||
*/
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
|
@ -237,6 +237,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
|
||||
{
|
||||
if (tracing_thresh)
|
||||
return 1;
|
||||
else
|
||||
return trace_graph_entry(trace);
|
||||
}
|
||||
|
||||
static void __trace_graph_return(struct trace_array *tr,
|
||||
struct ftrace_graph_ret *trace,
|
||||
unsigned long flags,
|
||||
@ -290,13 +298,26 @@ void set_graph_array(struct trace_array *tr)
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
|
||||
{
|
||||
if (tracing_thresh &&
|
||||
(trace->rettime - trace->calltime < tracing_thresh))
|
||||
return;
|
||||
else
|
||||
trace_graph_return(trace);
|
||||
}
|
||||
|
||||
static int graph_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
set_graph_array(tr);
|
||||
ret = register_ftrace_graph(&trace_graph_return,
|
||||
&trace_graph_entry);
|
||||
if (tracing_thresh)
|
||||
ret = register_ftrace_graph(&trace_graph_thresh_return,
|
||||
&trace_graph_thresh_entry);
|
||||
else
|
||||
ret = register_ftrace_graph(&trace_graph_return,
|
||||
&trace_graph_entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
tracing_start_cmdline_record();
|
||||
@ -920,7 +941,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
} else {
|
||||
ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func);
|
||||
ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user