mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
ftrace: add trace_function api for other tracers to use
A new check was added in the ftrace function that wont trace if the CPU trace buffer is disabled. Unfortunately, other tracers used ftrace() to write to the buffer after they disabled it. The new disable check makes these calls into a nop. This patch changes the __ftrace that is called without the check into a new api for the other tracers to use, called "trace_function". The other tracers use this interface instead when the trace CPU buffer is already disabled. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
2a2cc8f7c4
commit
6fb44b717c
@ -641,8 +641,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
|
|||||||
}
|
}
|
||||||
|
|
||||||
notrace void
|
notrace void
|
||||||
__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
trace_function(struct trace_array *tr, struct trace_array_cpu *data,
|
||||||
unsigned long ip, unsigned long parent_ip, unsigned long flags)
|
unsigned long ip, unsigned long parent_ip, unsigned long flags)
|
||||||
{
|
{
|
||||||
struct trace_entry *entry;
|
struct trace_entry *entry;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
@ -664,7 +664,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
|||||||
unsigned long ip, unsigned long parent_ip, unsigned long flags)
|
unsigned long ip, unsigned long parent_ip, unsigned long flags)
|
||||||
{
|
{
|
||||||
if (likely(!atomic_read(&data->disabled)))
|
if (likely(!atomic_read(&data->disabled)))
|
||||||
__ftrace(tr, data, ip, parent_ip, flags);
|
trace_function(tr, data, ip, parent_ip, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
notrace void
|
notrace void
|
||||||
@ -730,7 +730,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
|||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
|
|
||||||
if (likely(disabled == 1))
|
if (likely(disabled == 1))
|
||||||
__ftrace(tr, data, ip, parent_ip, flags);
|
trace_function(tr, data, ip, parent_ip, flags);
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -169,6 +169,11 @@ void trace_special(struct trace_array *tr,
|
|||||||
unsigned long arg1,
|
unsigned long arg1,
|
||||||
unsigned long arg2,
|
unsigned long arg2,
|
||||||
unsigned long arg3);
|
unsigned long arg3);
|
||||||
|
void trace_function(struct trace_array *tr,
|
||||||
|
struct trace_array_cpu *data,
|
||||||
|
unsigned long ip,
|
||||||
|
unsigned long parent_ip,
|
||||||
|
unsigned long flags);
|
||||||
|
|
||||||
void tracing_start_function_trace(void);
|
void tracing_start_function_trace(void);
|
||||||
void tracing_stop_function_trace(void);
|
void tracing_stop_function_trace(void);
|
||||||
|
@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
|
|||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
|
|
||||||
if (likely(disabled == 1))
|
if (likely(disabled == 1))
|
||||||
ftrace(tr, data, ip, parent_ip, flags);
|
trace_function(tr, data, ip, parent_ip, flags);
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
}
|
}
|
||||||
@ -150,7 +150,7 @@ check_critical_timing(struct trace_array *tr,
|
|||||||
if (!report_latency(delta))
|
if (!report_latency(delta))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
|
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
|
||||||
|
|
||||||
latency = nsecs_to_usecs(delta);
|
latency = nsecs_to_usecs(delta);
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ out:
|
|||||||
data->critical_sequence = max_sequence;
|
data->critical_sequence = max_sequence;
|
||||||
data->preempt_timestamp = ftrace_now(cpu);
|
data->preempt_timestamp = ftrace_now(cpu);
|
||||||
tracing_reset(data);
|
tracing_reset(data);
|
||||||
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
|
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void notrace
|
static inline void notrace
|
||||||
@ -221,7 +221,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|||||||
|
|
||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
|
|
||||||
ftrace(tr, data, ip, parent_ip, flags);
|
trace_function(tr, data, ip, parent_ip, flags);
|
||||||
|
|
||||||
__get_cpu_var(tracing_cpu) = 1;
|
__get_cpu_var(tracing_cpu) = 1;
|
||||||
|
|
||||||
@ -254,7 +254,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|||||||
|
|
||||||
atomic_inc(&data->disabled);
|
atomic_inc(&data->disabled);
|
||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
ftrace(tr, data, ip, parent_ip, flags);
|
trace_function(tr, data, ip, parent_ip, flags);
|
||||||
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
||||||
data->critical_start = 0;
|
data->critical_start = 0;
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
|
@ -85,7 +85,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
|
|||||||
if (unlikely(!tracer_enabled || next != wakeup_task))
|
if (unlikely(!tracer_enabled || next != wakeup_task))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
|
trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* usecs conversion is slow so we try to delay the conversion
|
* usecs conversion is slow so we try to delay the conversion
|
||||||
@ -192,7 +192,8 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
|
|||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
|
|
||||||
tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
|
tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
|
||||||
ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
|
trace_function(tr, tr->data[wakeup_cpu],
|
||||||
|
CALLER_ADDR1, CALLER_ADDR2, flags);
|
||||||
|
|
||||||
out_locked:
|
out_locked:
|
||||||
spin_unlock(&wakeup_lock);
|
spin_unlock(&wakeup_lock);
|
||||||
|
Loading…
Reference in New Issue
Block a user