linux/kernel/trace/trace_kdb.c
Douglas Anderson 03197fc02b tracing: kdb: Allow ftdump to skip all but the last few entries
The 'ftdump' command in kdb is currently a bit of a last resort, at
least if you have lots of traces turned on.  It's going to print a
whole boatload of data out your serial port which is probably running
at 115200.  This could easily take many, many minutes.

Usually you're most interested in what's at the _end_ of the ftrace
buffer, AKA what happened most recently.  That means you've got to
wait the full time for the dump.  The 'ftdump' command does attempt to
help you a little bit by allowing you to skip a fixed number of
entries.  Unfortunately it provides no way for you to know how many
entries you should skip.

Let's do similar to python and allow you to use a negative number to
indicate that you want to skip all entries except the last few.  This
allows you to quickly see what you want.

Note that we also change the printout in ftdump to print the
(positive) number of entries actually skipped since that could be
helpful to know when you've specified a negative skip count.

Link: http://lkml.kernel.org/r/20190319171206.97107-3-dianders@chromium.org

Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-05-02 21:32:55 -04:00

163 lines
3.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* kdb helper for dumping the ftrace buffer
*
* Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
*
* ftrace_dump_buf based on ftrace_dump:
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
*/
#include <linux/init.h>
#include <linux/kgdb.h>
#include <linux/kdb.h>
#include <linux/ftrace.h>
#include "trace.h"
#include "trace_output.h"
static struct trace_iterator iter;
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
static void ftrace_dump_buf(int skip_entries, long cpu_file)
{
struct trace_array *tr;
unsigned int old_userobj;
int cnt = 0, cpu;
tr = iter.tr;
old_userobj = tr->trace_flags;
/* don't look at user memory in panic mode */
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n");
if (skip_entries)
kdb_printf("(skipping %d entries)\n", skip_entries);
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1;
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
ring_buffer_read_prepare(iter.trace_buffer->buffer,
cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
}
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
ring_buffer_read_prepare(iter.trace_buffer->buffer,
cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
}
while (trace_find_next_entry_inc(&iter)) {
if (!cnt)
kdb_printf("---------------------------------\n");
cnt++;
if (!skip_entries) {
print_trace_line(&iter);
trace_printk_seq(&iter.seq);
} else {
skip_entries--;
}
if (KDB_FLAG(CMD_INTERRUPT))
goto out;
}
if (!cnt)
kdb_printf(" (ftrace buffer empty)\n");
else
kdb_printf("---------------------------------\n");
out:
tr->trace_flags = old_userobj;
for_each_tracing_cpu(cpu) {
if (iter.buffer_iter[cpu]) {
ring_buffer_read_finish(iter.buffer_iter[cpu]);
iter.buffer_iter[cpu] = NULL;
}
}
}
/*
* kdb_ftdump - Dump the ftrace log buffer
*/
static int kdb_ftdump(int argc, const char **argv)
{
int skip_entries = 0;
long cpu_file;
char *cp;
int cnt;
int cpu;
if (argc > 2)
return KDB_ARGCOUNT;
if (argc) {
skip_entries = simple_strtol(argv[1], &cp, 0);
if (*cp)
skip_entries = 0;
}
if (argc == 2) {
cpu_file = simple_strtol(argv[2], &cp, 0);
if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
!cpu_online(cpu_file))
return KDB_BADINT;
} else {
cpu_file = RING_BUFFER_ALL_CPUS;
}
kdb_trap_printk++;
trace_init_global_iter(&iter);
iter.buffer_iter = buffer_iter;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
/* A negative skip_entries means skip all but the last entries */
if (skip_entries < 0) {
if (cpu_file == RING_BUFFER_ALL_CPUS)
cnt = trace_total_entries(NULL);
else
cnt = trace_total_entries_cpu(NULL, cpu_file);
skip_entries = max(cnt + skip_entries, 0);
}
ftrace_dump_buf(skip_entries, cpu_file);
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
kdb_trap_printk--;
return 0;
}
static __init int kdb_ftrace_register(void)
{
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
"Dump ftrace log; -skip dumps last #entries", 0,
KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
late_initcall(kdb_ftrace_register);