mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 10:44:14 +08:00
ftrace: make work with new ring buffer
This patch ports ftrace over to the new ring buffer. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ed56829cb3
commit
3928a8a2d9
File diff suppressed because it is too large
Load Diff
@ -5,6 +5,7 @@
|
|||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
|
#include <linux/ring_buffer.h>
|
||||||
#include <linux/mmiotrace.h>
|
#include <linux/mmiotrace.h>
|
||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
|
|
||||||
@ -102,7 +103,6 @@ struct trace_field {
|
|||||||
char flags;
|
char flags;
|
||||||
char preempt_count;
|
char preempt_count;
|
||||||
int pid;
|
int pid;
|
||||||
cycle_t t;
|
|
||||||
union {
|
union {
|
||||||
struct ftrace_entry fn;
|
struct ftrace_entry fn;
|
||||||
struct ctx_switch_entry ctx;
|
struct ctx_switch_entry ctx;
|
||||||
@ -139,16 +139,9 @@ struct trace_entry {
|
|||||||
* the trace, etc.)
|
* the trace, etc.)
|
||||||
*/
|
*/
|
||||||
struct trace_array_cpu {
|
struct trace_array_cpu {
|
||||||
struct list_head trace_pages;
|
|
||||||
atomic_t disabled;
|
atomic_t disabled;
|
||||||
raw_spinlock_t lock;
|
|
||||||
struct lock_class_key lock_key;
|
|
||||||
|
|
||||||
/* these fields get copied into max-trace: */
|
/* these fields get copied into max-trace: */
|
||||||
unsigned trace_head_idx;
|
|
||||||
unsigned trace_tail_idx;
|
|
||||||
void *trace_head; /* producer */
|
|
||||||
void *trace_tail; /* consumer */
|
|
||||||
unsigned long trace_idx;
|
unsigned long trace_idx;
|
||||||
unsigned long overrun;
|
unsigned long overrun;
|
||||||
unsigned long saved_latency;
|
unsigned long saved_latency;
|
||||||
@ -172,6 +165,7 @@ struct trace_iterator;
|
|||||||
* They have on/off state as well:
|
* They have on/off state as well:
|
||||||
*/
|
*/
|
||||||
struct trace_array {
|
struct trace_array {
|
||||||
|
struct ring_buffer *buffer;
|
||||||
unsigned long entries;
|
unsigned long entries;
|
||||||
long ctrl;
|
long ctrl;
|
||||||
int cpu;
|
int cpu;
|
||||||
@ -219,27 +213,21 @@ struct trace_iterator {
|
|||||||
struct trace_array *tr;
|
struct trace_array *tr;
|
||||||
struct tracer *trace;
|
struct tracer *trace;
|
||||||
void *private;
|
void *private;
|
||||||
long last_overrun[NR_CPUS];
|
struct ring_buffer_iter *buffer_iter[NR_CPUS];
|
||||||
long overrun[NR_CPUS];
|
|
||||||
|
|
||||||
/* The below is zeroed out in pipe_read */
|
/* The below is zeroed out in pipe_read */
|
||||||
struct trace_seq seq;
|
struct trace_seq seq;
|
||||||
struct trace_entry *ent;
|
struct trace_entry *ent;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
u64 ts;
|
||||||
struct trace_entry *prev_ent;
|
|
||||||
int prev_cpu;
|
|
||||||
|
|
||||||
unsigned long iter_flags;
|
unsigned long iter_flags;
|
||||||
loff_t pos;
|
loff_t pos;
|
||||||
unsigned long next_idx[NR_CPUS];
|
|
||||||
struct list_head *next_page[NR_CPUS];
|
|
||||||
unsigned next_page_idx[NR_CPUS];
|
|
||||||
long idx;
|
long idx;
|
||||||
};
|
};
|
||||||
|
|
||||||
void trace_wake_up(void);
|
void trace_wake_up(void);
|
||||||
void tracing_reset(struct trace_array_cpu *data);
|
void tracing_reset(struct trace_array *tr, int cpu);
|
||||||
int tracing_open_generic(struct inode *inode, struct file *filp);
|
int tracing_open_generic(struct inode *inode, struct file *filp);
|
||||||
struct dentry *tracing_init_dentry(void);
|
struct dentry *tracing_init_dentry(void);
|
||||||
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
|
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
|
||||||
|
@ -34,7 +34,7 @@ static void boot_trace_init(struct trace_array *tr)
|
|||||||
trace_boot_enabled = 0;
|
trace_boot_enabled = 0;
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||||
tracing_reset(tr->data[cpu]);
|
tracing_reset(tr, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void boot_trace_ctrl_update(struct trace_array *tr)
|
static void boot_trace_ctrl_update(struct trace_array *tr)
|
||||||
@ -74,6 +74,7 @@ struct tracer boot_tracer __read_mostly =
|
|||||||
|
|
||||||
void trace_boot(struct boot_trace *it)
|
void trace_boot(struct boot_trace *it)
|
||||||
{
|
{
|
||||||
|
struct ring_buffer_event *event;
|
||||||
struct trace_entry *entry;
|
struct trace_entry *entry;
|
||||||
struct trace_array_cpu *data;
|
struct trace_array_cpu *data;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
@ -85,17 +86,18 @@ void trace_boot(struct boot_trace *it)
|
|||||||
preempt_disable();
|
preempt_disable();
|
||||||
data = tr->data[smp_processor_id()];
|
data = tr->data[smp_processor_id()];
|
||||||
|
|
||||||
raw_local_irq_save(irq_flags);
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||||
__raw_spin_lock(&data->lock);
|
&irq_flags);
|
||||||
|
if (!event)
|
||||||
entry = tracing_get_trace_entry(tr, data);
|
goto out;
|
||||||
|
entry = ring_buffer_event_data(event);
|
||||||
tracing_generic_entry_update(entry, 0);
|
tracing_generic_entry_update(entry, 0);
|
||||||
entry->type = TRACE_BOOT;
|
entry->type = TRACE_BOOT;
|
||||||
entry->field.initcall = *it;
|
entry->field.initcall = *it;
|
||||||
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
|
|
||||||
__raw_spin_unlock(&data->lock);
|
|
||||||
raw_local_irq_restore(irq_flags);
|
|
||||||
trace_wake_up();
|
trace_wake_up();
|
||||||
|
|
||||||
|
out:
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ static void function_reset(struct trace_array *tr)
|
|||||||
tr->time_start = ftrace_now(tr->cpu);
|
tr->time_start = ftrace_now(tr->cpu);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
tracing_reset(tr->data[cpu]);
|
tracing_reset(tr, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void start_function_trace(struct trace_array *tr)
|
static void start_function_trace(struct trace_array *tr)
|
||||||
|
@ -173,7 +173,7 @@ out_unlock:
|
|||||||
out:
|
out:
|
||||||
data->critical_sequence = max_sequence;
|
data->critical_sequence = max_sequence;
|
||||||
data->preempt_timestamp = ftrace_now(cpu);
|
data->preempt_timestamp = ftrace_now(cpu);
|
||||||
tracing_reset(data);
|
tracing_reset(tr, cpu);
|
||||||
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
|
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|||||||
data->critical_sequence = max_sequence;
|
data->critical_sequence = max_sequence;
|
||||||
data->preempt_timestamp = ftrace_now(cpu);
|
data->preempt_timestamp = ftrace_now(cpu);
|
||||||
data->critical_start = parent_ip ? : ip;
|
data->critical_start = parent_ip ? : ip;
|
||||||
tracing_reset(data);
|
tracing_reset(tr, cpu);
|
||||||
|
|
||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
|
|
||||||
@ -234,7 +234,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|||||||
|
|
||||||
data = tr->data[cpu];
|
data = tr->data[cpu];
|
||||||
|
|
||||||
if (unlikely(!data) || unlikely(!head_page(data)) ||
|
if (unlikely(!data) ||
|
||||||
!data->critical_start || atomic_read(&data->disabled))
|
!data->critical_start || atomic_read(&data->disabled))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ static void mmio_reset_data(struct trace_array *tr)
|
|||||||
tr->time_start = ftrace_now(tr->cpu);
|
tr->time_start = ftrace_now(tr->cpu);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
tracing_reset(tr->data[cpu]);
|
tracing_reset(tr, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmio_trace_init(struct trace_array *tr)
|
static void mmio_trace_init(struct trace_array *tr)
|
||||||
@ -130,10 +130,14 @@ static unsigned long count_overruns(struct trace_iterator *iter)
|
|||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long cnt = 0;
|
unsigned long cnt = 0;
|
||||||
|
/* FIXME: */
|
||||||
|
#if 0
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
cnt += iter->overrun[cpu];
|
cnt += iter->overrun[cpu];
|
||||||
iter->overrun[cpu] = 0;
|
iter->overrun[cpu] = 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
(void)cpu;
|
||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +180,7 @@ static int mmio_print_rw(struct trace_iterator *iter)
|
|||||||
struct trace_entry *entry = iter->ent;
|
struct trace_entry *entry = iter->ent;
|
||||||
struct mmiotrace_rw *rw = &entry->field.mmiorw;
|
struct mmiotrace_rw *rw = &entry->field.mmiorw;
|
||||||
struct trace_seq *s = &iter->seq;
|
struct trace_seq *s = &iter->seq;
|
||||||
unsigned long long t = ns2usecs(entry->field.t);
|
unsigned long long t = ns2usecs(iter->ts);
|
||||||
unsigned long usec_rem = do_div(t, 1000000ULL);
|
unsigned long usec_rem = do_div(t, 1000000ULL);
|
||||||
unsigned secs = (unsigned long)t;
|
unsigned secs = (unsigned long)t;
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
@ -218,7 +222,7 @@ static int mmio_print_map(struct trace_iterator *iter)
|
|||||||
struct trace_entry *entry = iter->ent;
|
struct trace_entry *entry = iter->ent;
|
||||||
struct mmiotrace_map *m = &entry->field.mmiomap;
|
struct mmiotrace_map *m = &entry->field.mmiomap;
|
||||||
struct trace_seq *s = &iter->seq;
|
struct trace_seq *s = &iter->seq;
|
||||||
unsigned long long t = ns2usecs(entry->field.t);
|
unsigned long long t = ns2usecs(iter->ts);
|
||||||
unsigned long usec_rem = do_div(t, 1000000ULL);
|
unsigned long usec_rem = do_div(t, 1000000ULL);
|
||||||
unsigned secs = (unsigned long)t;
|
unsigned secs = (unsigned long)t;
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
@ -250,7 +254,7 @@ static int mmio_print_mark(struct trace_iterator *iter)
|
|||||||
struct trace_entry *entry = iter->ent;
|
struct trace_entry *entry = iter->ent;
|
||||||
const char *msg = entry->field.print.buf;
|
const char *msg = entry->field.print.buf;
|
||||||
struct trace_seq *s = &iter->seq;
|
struct trace_seq *s = &iter->seq;
|
||||||
unsigned long long t = ns2usecs(entry->field.t);
|
unsigned long long t = ns2usecs(iter->ts);
|
||||||
unsigned long usec_rem = do_div(t, 1000000ULL);
|
unsigned long usec_rem = do_div(t, 1000000ULL);
|
||||||
unsigned secs = (unsigned long)t;
|
unsigned secs = (unsigned long)t;
|
||||||
int ret;
|
int ret;
|
||||||
@ -303,19 +307,19 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
|
|||||||
struct trace_array_cpu *data,
|
struct trace_array_cpu *data,
|
||||||
struct mmiotrace_rw *rw)
|
struct mmiotrace_rw *rw)
|
||||||
{
|
{
|
||||||
|
struct ring_buffer_event *event;
|
||||||
struct trace_entry *entry;
|
struct trace_entry *entry;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
|
|
||||||
raw_local_irq_save(irq_flags);
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||||
__raw_spin_lock(&data->lock);
|
&irq_flags);
|
||||||
|
if (!event)
|
||||||
entry = tracing_get_trace_entry(tr, data);
|
return;
|
||||||
|
entry = ring_buffer_event_data(event);
|
||||||
tracing_generic_entry_update(entry, 0);
|
tracing_generic_entry_update(entry, 0);
|
||||||
entry->type = TRACE_MMIO_RW;
|
entry->type = TRACE_MMIO_RW;
|
||||||
entry->field.mmiorw = *rw;
|
entry->field.mmiorw = *rw;
|
||||||
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
__raw_spin_unlock(&data->lock);
|
|
||||||
raw_local_irq_restore(irq_flags);
|
|
||||||
|
|
||||||
trace_wake_up();
|
trace_wake_up();
|
||||||
}
|
}
|
||||||
@ -331,19 +335,19 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
|
|||||||
struct trace_array_cpu *data,
|
struct trace_array_cpu *data,
|
||||||
struct mmiotrace_map *map)
|
struct mmiotrace_map *map)
|
||||||
{
|
{
|
||||||
|
struct ring_buffer_event *event;
|
||||||
struct trace_entry *entry;
|
struct trace_entry *entry;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
|
|
||||||
raw_local_irq_save(irq_flags);
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||||
__raw_spin_lock(&data->lock);
|
&irq_flags);
|
||||||
|
if (!event)
|
||||||
entry = tracing_get_trace_entry(tr, data);
|
return;
|
||||||
|
entry = ring_buffer_event_data(event);
|
||||||
tracing_generic_entry_update(entry, 0);
|
tracing_generic_entry_update(entry, 0);
|
||||||
entry->type = TRACE_MMIO_MAP;
|
entry->type = TRACE_MMIO_MAP;
|
||||||
entry->field.mmiomap = *map;
|
entry->field.mmiomap = *map;
|
||||||
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
__raw_spin_unlock(&data->lock);
|
|
||||||
raw_local_irq_restore(irq_flags);
|
|
||||||
|
|
||||||
trace_wake_up();
|
trace_wake_up();
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ static void nop_trace_init(struct trace_array *tr)
|
|||||||
ctx_trace = tr;
|
ctx_trace = tr;
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
tracing_reset(tr->data[cpu]);
|
tracing_reset(tr, cpu);
|
||||||
|
|
||||||
if (tr->ctrl)
|
if (tr->ctrl)
|
||||||
start_nop_trace(tr);
|
start_nop_trace(tr);
|
||||||
|
@ -81,7 +81,7 @@ static void sched_switch_reset(struct trace_array *tr)
|
|||||||
tr->time_start = ftrace_now(tr->cpu);
|
tr->time_start = ftrace_now(tr->cpu);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
tracing_reset(tr->data[cpu]);
|
tracing_reset(tr, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tracing_sched_register(void)
|
static int tracing_sched_register(void)
|
||||||
|
@ -191,7 +191,7 @@ static void __wakeup_reset(struct trace_array *tr)
|
|||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
data = tr->data[cpu];
|
data = tr->data[cpu];
|
||||||
tracing_reset(data);
|
tracing_reset(tr, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
wakeup_cpu = -1;
|
wakeup_cpu = -1;
|
||||||
|
@ -18,58 +18,20 @@ static inline int trace_valid_entry(struct trace_entry *entry)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
|
||||||
trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
|
|
||||||
{
|
{
|
||||||
struct trace_entry *entries;
|
struct ring_buffer_event *event;
|
||||||
struct page *page;
|
struct trace_entry *entry;
|
||||||
int idx = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
BUG_ON(list_empty(&data->trace_pages));
|
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
|
||||||
page = list_entry(data->trace_pages.next, struct page, lru);
|
entry = ring_buffer_event_data(event);
|
||||||
entries = page_address(page);
|
|
||||||
|
|
||||||
check_pages(data);
|
if (!trace_valid_entry(entry)) {
|
||||||
if (head_page(data) != entries)
|
|
||||||
goto failed;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The starting trace buffer always has valid elements,
|
|
||||||
* if any element exists.
|
|
||||||
*/
|
|
||||||
entries = head_page(data);
|
|
||||||
|
|
||||||
for (i = 0; i < tr->entries; i++) {
|
|
||||||
|
|
||||||
if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
|
|
||||||
printk(KERN_CONT ".. invalid entry %d ",
|
printk(KERN_CONT ".. invalid entry %d ",
|
||||||
entries[idx].type);
|
entry->type);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx++;
|
|
||||||
if (idx >= ENTRIES_PER_PAGE) {
|
|
||||||
page = virt_to_page(entries);
|
|
||||||
if (page->lru.next == &data->trace_pages) {
|
|
||||||
if (i != tr->entries - 1) {
|
|
||||||
printk(KERN_CONT ".. entries buffer mismatch");
|
|
||||||
goto failed;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
page = list_entry(page->lru.next, struct page, lru);
|
|
||||||
entries = page_address(page);
|
|
||||||
}
|
|
||||||
idx = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
page = virt_to_page(entries);
|
|
||||||
if (page->lru.next != &data->trace_pages) {
|
|
||||||
printk(KERN_CONT ".. too many entries");
|
|
||||||
goto failed;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
@ -91,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
|||||||
/* Don't allow flipping of max traces now */
|
/* Don't allow flipping of max traces now */
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
__raw_spin_lock(&ftrace_max_lock);
|
__raw_spin_lock(&ftrace_max_lock);
|
||||||
|
|
||||||
|
cnt = ring_buffer_entries(tr->buffer);
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
if (!head_page(tr->data[cpu]))
|
ret = trace_test_buffer_cpu(tr, cpu);
|
||||||
continue;
|
|
||||||
|
|
||||||
cnt += tr->data[cpu]->trace_idx;
|
|
||||||
|
|
||||||
ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ static void stack_reset(struct trace_array *tr)
|
|||||||
tr->time_start = ftrace_now(tr->cpu);
|
tr->time_start = ftrace_now(tr->cpu);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
tracing_reset(tr->data[cpu]);
|
tracing_reset(tr, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void start_stack_trace(struct trace_array *tr)
|
static void start_stack_trace(struct trace_array *tr)
|
||||||
|
Loading…
Reference in New Issue
Block a user