mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
tracing: Always use canonical ftrace path
The canonical location for the tracefs filesystem is at /sys/kernel/tracing. But, from Documentation/trace/ftrace.rst: Before 4.1, all ftrace tracing control files were within the debugfs file system, which is typically located at /sys/kernel/debug/tracing. For backward compatibility, when mounting the debugfs file system, the tracefs file system will be automatically mounted at: /sys/kernel/debug/tracing Many comments and Kconfig help messages in the tracing code still refer to this older debugfs path, so let's update them to avoid confusion. Link: https://lore.kernel.org/linux-trace-kernel/20230215223350.2658616-2-zwisler@google.com Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com> Signed-off-by: Ross Zwisler <zwisler@google.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
d8f0ae3ebe
commit
2455f0e124
@ -297,7 +297,7 @@ bool mac_pton(const char *s, u8 *mac);
|
||||
*
|
||||
* Use tracing_on/tracing_off when you want to quickly turn on or off
|
||||
* tracing. It simply enables or disables the recording of the trace events.
|
||||
* This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
|
||||
* This also corresponds to the user space /sys/kernel/tracing/tracing_on
|
||||
* file, which gives a means for the kernel and userspace to interact.
|
||||
* Place a tracing_off() in the kernel where you want tracing to end.
|
||||
* From user space, examine the trace, and then echo 1 > tracing_on
|
||||
|
@ -471,7 +471,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
* * This is how the trace record is structured and will
|
||||
* * be saved into the ring buffer. These are the fields
|
||||
* * that will be exposed to user-space in
|
||||
* * /sys/kernel/debug/tracing/events/<*>/format.
|
||||
* * /sys/kernel/tracing/events/<*>/format.
|
||||
* *
|
||||
* * The declared 'local variable' is called '__entry'
|
||||
* *
|
||||
@ -531,7 +531,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
* tracepoint callback (this is used by programmatic plugins and
|
||||
* can also by used by generic instrumentation like SystemTap), and
|
||||
* it is also used to expose a structured trace record in
|
||||
* /sys/kernel/debug/tracing/events/.
|
||||
* /sys/kernel/tracing/events/.
|
||||
*
|
||||
* A set of (un)registration functions can be passed to the variant
|
||||
* TRACE_EVENT_FN to perform any (un)registration work.
|
||||
|
@ -239,7 +239,7 @@ config DYNAMIC_FTRACE
|
||||
enabled, and the functions not enabled will not affect
|
||||
performance of the system.
|
||||
|
||||
See the files in /sys/kernel/debug/tracing:
|
||||
See the files in /sys/kernel/tracing:
|
||||
available_filter_functions
|
||||
set_ftrace_filter
|
||||
set_ftrace_notrace
|
||||
@ -299,7 +299,7 @@ config STACK_TRACER
|
||||
select KALLSYMS
|
||||
help
|
||||
This special tracer records the maximum stack footprint of the
|
||||
kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
|
||||
kernel and displays it in /sys/kernel/tracing/stack_trace.
|
||||
|
||||
This tracer works by hooking into every function call that the
|
||||
kernel executes, and keeping a maximum stack depth value and
|
||||
@ -339,7 +339,7 @@ config IRQSOFF_TRACER
|
||||
disabled by default and can be runtime (re-)started
|
||||
via:
|
||||
|
||||
echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
|
||||
echo 0 > /sys/kernel/tracing/tracing_max_latency
|
||||
|
||||
(Note that kernel size and overhead increase with this option
|
||||
enabled. This option and the preempt-off timing option can be
|
||||
@ -363,7 +363,7 @@ config PREEMPT_TRACER
|
||||
disabled by default and can be runtime (re-)started
|
||||
via:
|
||||
|
||||
echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
|
||||
echo 0 > /sys/kernel/tracing/tracing_max_latency
|
||||
|
||||
(Note that kernel size and overhead increase with this option
|
||||
enabled. This option and the irqs-off timing option can be
|
||||
@ -515,7 +515,7 @@ config TRACER_SNAPSHOT
|
||||
Allow tracing users to take snapshot of the current buffer using the
|
||||
ftrace interface, e.g.:
|
||||
|
||||
echo 1 > /sys/kernel/debug/tracing/snapshot
|
||||
echo 1 > /sys/kernel/tracing/snapshot
|
||||
cat snapshot
|
||||
|
||||
config TRACER_SNAPSHOT_PER_CPU_SWAP
|
||||
@ -527,7 +527,7 @@ config TRACER_SNAPSHOT_PER_CPU_SWAP
|
||||
full swap (all buffers). If this is set, then the following is
|
||||
allowed:
|
||||
|
||||
echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
|
||||
echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot
|
||||
|
||||
After which, only the tracing buffer for CPU 2 was swapped with
|
||||
the main tracing buffer, and the other CPU buffers remain the same.
|
||||
@ -574,7 +574,7 @@ config PROFILE_ANNOTATED_BRANCHES
|
||||
This tracer profiles all likely and unlikely macros
|
||||
in the kernel. It will display the results in:
|
||||
|
||||
/sys/kernel/debug/tracing/trace_stat/branch_annotated
|
||||
/sys/kernel/tracing/trace_stat/branch_annotated
|
||||
|
||||
Note: this will add a significant overhead; only turn this
|
||||
on if you need to profile the system's use of these macros.
|
||||
@ -587,7 +587,7 @@ config PROFILE_ALL_BRANCHES
|
||||
taken in the kernel is recorded whether it hit or miss.
|
||||
The results will be displayed in:
|
||||
|
||||
/sys/kernel/debug/tracing/trace_stat/branch_all
|
||||
/sys/kernel/tracing/trace_stat/branch_all
|
||||
|
||||
This option also enables the likely/unlikely profiler.
|
||||
|
||||
@ -638,8 +638,8 @@ config BLK_DEV_IO_TRACE
|
||||
Tracing also is possible using the ftrace interface, e.g.:
|
||||
|
||||
echo 1 > /sys/block/sda/sda1/trace/enable
|
||||
echo blk > /sys/kernel/debug/tracing/current_tracer
|
||||
cat /sys/kernel/debug/tracing/trace_pipe
|
||||
echo blk > /sys/kernel/tracing/current_tracer
|
||||
cat /sys/kernel/tracing/trace_pipe
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
* Then:
|
||||
*
|
||||
* # insmod kernel/trace/kprobe_event_gen_test.ko
|
||||
* # cat /sys/kernel/debug/tracing/trace
|
||||
* # cat /sys/kernel/tracing/trace
|
||||
*
|
||||
* You should see many instances of the "gen_kprobe_test" and
|
||||
* "gen_kretprobe_test" events in the trace buffer.
|
||||
|
@ -2886,7 +2886,7 @@ rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
sched_clock_stable() ? "" :
|
||||
"If you just came from a suspend/resume,\n"
|
||||
"please switch to the trace global clock:\n"
|
||||
" echo global > /sys/kernel/debug/tracing/trace_clock\n"
|
||||
" echo global > /sys/kernel/tracing/trace_clock\n"
|
||||
"or add trace_clock=global to the kernel command line\n");
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
* Then:
|
||||
*
|
||||
* # insmod kernel/trace/synth_event_gen_test.ko
|
||||
* # cat /sys/kernel/debug/tracing/trace
|
||||
* # cat /sys/kernel/tracing/trace
|
||||
*
|
||||
* You should see several events in the trace buffer -
|
||||
* "create_synth_test", "empty_synth_test", and several instances of
|
||||
|
@ -1187,7 +1187,7 @@ void tracing_snapshot_instance(struct trace_array *tr)
|
||||
*
|
||||
* Note, make sure to allocate the snapshot with either
|
||||
* a tracing_snapshot_alloc(), or by doing it manually
|
||||
* with: echo 1 > /sys/kernel/debug/tracing/snapshot
|
||||
* with: echo 1 > /sys/kernel/tracing/snapshot
|
||||
*
|
||||
* If the snapshot buffer is not allocated, it will stop tracing.
|
||||
* Basically making a permanent snapshot.
|
||||
|
@ -23,8 +23,8 @@
|
||||
#endif
|
||||
|
||||
/* Assumes debugfs is mounted */
|
||||
const char *data_file = "/sys/kernel/debug/tracing/user_events_data";
|
||||
const char *status_file = "/sys/kernel/debug/tracing/user_events_status";
|
||||
const char *data_file = "/sys/kernel/tracing/user_events_data";
|
||||
const char *status_file = "/sys/kernel/tracing/user_events_status";
|
||||
|
||||
static int event_status(long **status)
|
||||
{
|
||||
|
@ -12,9 +12,9 @@ calls. Only the functions's names and the call time are provided.
|
||||
|
||||
Usage:
|
||||
Be sure that you have CONFIG_FUNCTION_TRACER
|
||||
# mount -t debugfs nodev /sys/kernel/debug
|
||||
# echo function > /sys/kernel/debug/tracing/current_tracer
|
||||
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
|
||||
# mount -t tracefs nodev /sys/kernel/tracing
|
||||
# echo function > /sys/kernel/tracing/current_tracer
|
||||
$ cat /sys/kernel/tracing/trace_pipe > ~/raw_trace_func
|
||||
Wait some times but not too much, the script is a bit slow.
|
||||
Break the pipe (Ctrl + Z)
|
||||
$ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include "tracing_path.h"
|
||||
|
||||
static char tracing_mnt[PATH_MAX] = "/sys/kernel/debug";
|
||||
static char tracing_path[PATH_MAX] = "/sys/kernel/debug/tracing";
|
||||
static char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
|
||||
static char tracing_path[PATH_MAX] = "/sys/kernel/tracing";
|
||||
static char tracing_events_path[PATH_MAX] = "/sys/kernel/tracing/events";
|
||||
|
||||
static void __tracing_path_set(const char *tracing, const char *mountpoint)
|
||||
{
|
||||
|
@ -1584,7 +1584,7 @@ static void *do_printloop(void *arg)
|
||||
/*
|
||||
* Toss a coin to decide if we want to sleep before printing
|
||||
* out the backtrace. The reason for this is that opening
|
||||
* /sys/kernel/debug/tracing/trace will cause a blackout of
|
||||
* /sys/kernel/tracing/trace will cause a blackout of
|
||||
* hundreds of ms, where no latencies will be noted by the
|
||||
* latency tracer. Thus by randomly sleeping we try to avoid
|
||||
* missing traces systematically due to this. With this option
|
||||
|
Loading…
Reference in New Issue
Block a user