mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
ftrace: add basic support for gcc profiler instrumentation
If CONFIG_FTRACE is selected and /proc/sys/kernel/ftrace_enabled is set to a non-zero value the ftrace routine will be called everytime we enter a kernel function that is not marked with the "notrace" attribute. The ftrace routine will then call a registered function if a function happens to be registered. [ This code has been highly hacked by Steven Rostedt and Ingo Molnar, so don't blame Arnaldo for all of this ;-) ] Update: It is now possible to register more than one ftrace function. If only one ftrace function is registered, that will be the function that ftrace calls directly. If more than one function is registered, then ftrace will call a function that will loop through the functions to call. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
6e766410c4
commit
16444a8a40
4
Makefile
4
Makefile
@ -528,6 +528,10 @@ KBUILD_CFLAGS += -g
|
||||
KBUILD_AFLAGS += -gdwarf-2
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
KBUILD_CFLAGS += -pg
|
||||
endif
|
||||
|
||||
# We trigger additional mismatches with less inlining
|
||||
ifdef CONFIG_DEBUG_SECTION_MISMATCH
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
|
||||
|
@ -23,6 +23,7 @@ config X86
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_FTRACE
|
||||
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
|
||||
select HAVE_ARCH_KGDB if !X86_VOYAGER
|
||||
|
||||
|
@ -1109,6 +1109,33 @@ ENDPROC(xen_failsafe_callback)
|
||||
|
||||
#endif /* CONFIG_XEN */
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
ENTRY(mcount)
|
||||
cmpl $ftrace_stub, ftrace_trace_function
|
||||
jnz trace
|
||||
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
ret
|
||||
|
||||
/* taken from glibc */
|
||||
trace:
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
movl 0xc(%esp), %eax
|
||||
movl 0x4(%ebp), %edx
|
||||
|
||||
call *ftrace_trace_function
|
||||
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
|
||||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
#endif
|
||||
|
||||
.section .rodata,"a"
|
||||
#include "syscall_table_32.S"
|
||||
|
||||
|
@ -54,6 +54,43 @@
|
||||
|
||||
.code64
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
ENTRY(mcount)
|
||||
cmpq $ftrace_stub, ftrace_trace_function
|
||||
jnz trace
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
retq
|
||||
|
||||
trace:
|
||||
/* taken from glibc */
|
||||
subq $0x38, %rsp
|
||||
movq %rax, (%rsp)
|
||||
movq %rcx, 8(%rsp)
|
||||
movq %rdx, 16(%rsp)
|
||||
movq %rsi, 24(%rsp)
|
||||
movq %rdi, 32(%rsp)
|
||||
movq %r8, 40(%rsp)
|
||||
movq %r9, 48(%rsp)
|
||||
|
||||
movq 0x38(%rsp), %rdi
|
||||
movq 8(%rbp), %rsi
|
||||
|
||||
call *ftrace_trace_function
|
||||
|
||||
movq 48(%rsp), %r9
|
||||
movq 40(%rsp), %r8
|
||||
movq 32(%rsp), %rdi
|
||||
movq 24(%rsp), %rsi
|
||||
movq 16(%rsp), %rdx
|
||||
movq 8(%rsp), %rcx
|
||||
movq (%rsp), %rax
|
||||
addq $0x38, %rsp
|
||||
|
||||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#define retint_kernel retint_restore_args
|
||||
#endif
|
||||
|
38
include/linux/ftrace.h
Normal file
38
include/linux/ftrace.h
Normal file
@ -0,0 +1,38 @@
|
||||
#ifndef _LINUX_FTRACE_H
|
||||
#define _LINUX_FTRACE_H
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
||||
#define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
|
||||
#define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
|
||||
|
||||
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
|
||||
|
||||
struct ftrace_ops {
|
||||
ftrace_func_t func;
|
||||
struct ftrace_ops *next;
|
||||
};
|
||||
|
||||
/*
|
||||
* The ftrace_ops must be a static and should also
|
||||
* be read_mostly. These functions do modify read_mostly variables
|
||||
* so use them sparely. Never free an ftrace_op or modify the
|
||||
* next pointer after it has been registered. Even after unregistering
|
||||
* it, the next pointer may still be used internally.
|
||||
*/
|
||||
int register_ftrace_function(struct ftrace_ops *ops);
|
||||
int unregister_ftrace_function(struct ftrace_ops *ops);
|
||||
void clear_ftrace_function(void);
|
||||
|
||||
extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
||||
extern void mcount(void);
|
||||
|
||||
#else /* !CONFIG_FTRACE */
|
||||
# define register_ftrace_function(ops) do { } while (0)
|
||||
# define unregister_ftrace_function(ops) do { } while (0)
|
||||
# define clear_ftrace_function(ops) do { } while (0)
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* _LINUX_FTRACE_H */
|
@ -69,6 +69,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
|
||||
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
|
||||
obj-$(CONFIG_MARKERS) += marker.o
|
||||
obj-$(CONFIG_LATENCYTOP) += latencytop.o
|
||||
obj-$(CONFIG_FTRACE) += trace/
|
||||
|
||||
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
|
||||
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
|
||||
|
5
kernel/trace/Kconfig
Normal file
5
kernel/trace/Kconfig
Normal file
@ -0,0 +1,5 @@
|
||||
#
|
||||
# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
|
||||
#
|
||||
config HAVE_FTRACE
|
||||
bool
|
3
kernel/trace/Makefile
Normal file
3
kernel/trace/Makefile
Normal file
@ -0,0 +1,3 @@
|
||||
obj-$(CONFIG_FTRACE) += libftrace.o
|
||||
|
||||
libftrace-y := ftrace.o
|
138
kernel/trace/ftrace.c
Normal file
138
kernel/trace/ftrace.c
Normal file
@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Infrastructure for profiling code inserted by 'gcc -pg'.
|
||||
*
|
||||
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
||||
* Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
|
||||
*
|
||||
* Originally ported from the -rt patch by:
|
||||
* Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*
|
||||
* Based on code in the latency_tracer, that is:
|
||||
*
|
||||
* Copyright (C) 2004-2006 Ingo Molnar
|
||||
* Copyright (C) 2004 William Lee Irwin III
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
static DEFINE_SPINLOCK(ftrace_func_lock);
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly =
|
||||
{
|
||||
.func = ftrace_stub,
|
||||
};
|
||||
|
||||
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
|
||||
/* mcount is defined per arch in assembly */
|
||||
EXPORT_SYMBOL(mcount);
|
||||
|
||||
notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct ftrace_ops *op = ftrace_list;
|
||||
|
||||
/* in case someone actually ports this to alpha! */
|
||||
read_barrier_depends();
|
||||
|
||||
while (op != &ftrace_list_end) {
|
||||
/* silly alpha */
|
||||
read_barrier_depends();
|
||||
op->func(ip, parent_ip);
|
||||
op = op->next;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* register_ftrace_function - register a function for profiling
|
||||
* @ops - ops structure that holds the function for profiling.
|
||||
*
|
||||
* Register a function to be called by all functions in the
|
||||
* kernel.
|
||||
*
|
||||
* Note: @ops->func and all the functions it calls must be labeled
|
||||
* with "notrace", otherwise it will go into a
|
||||
* recursive loop.
|
||||
*/
|
||||
int register_ftrace_function(struct ftrace_ops *ops)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ftrace_func_lock, flags);
|
||||
ops->next = ftrace_list;
|
||||
/*
|
||||
* We are entering ops into the ftrace_list but another
|
||||
* CPU might be walking that list. We need to make sure
|
||||
* the ops->next pointer is valid before another CPU sees
|
||||
* the ops pointer included into the ftrace_list.
|
||||
*/
|
||||
smp_wmb();
|
||||
ftrace_list = ops;
|
||||
/*
|
||||
* For one func, simply call it directly.
|
||||
* For more than one func, call the chain.
|
||||
*/
|
||||
if (ops->next == &ftrace_list_end)
|
||||
ftrace_trace_function = ops->func;
|
||||
else
|
||||
ftrace_trace_function = ftrace_list_func;
|
||||
spin_unlock_irqrestore(&ftrace_func_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* unregister_ftrace_function - unresgister a function for profiling.
|
||||
* @ops - ops structure that holds the function to unregister
|
||||
*
|
||||
* Unregister a function that was added to be called by ftrace profiling.
|
||||
*/
|
||||
int unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct ftrace_ops **p;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&ftrace_func_lock, flags);
|
||||
|
||||
/*
|
||||
* If we are the only function, then the ftrace pointer is
|
||||
* pointing directly to that function.
|
||||
*/
|
||||
if (ftrace_list == ops && ops->next == &ftrace_list_end) {
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
ftrace_list = &ftrace_list_end;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
|
||||
if (*p == ops)
|
||||
break;
|
||||
|
||||
if (*p != ops) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*p = (*p)->next;
|
||||
|
||||
/* If we only have one func left, then call that directly */
|
||||
if (ftrace_list->next == &ftrace_list_end)
|
||||
ftrace_trace_function = ftrace_list->func;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&ftrace_func_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_ftrace_function - reset the ftrace function
|
||||
*
|
||||
* This NULLs the ftrace function and in essence stops
|
||||
* tracing. There may be lag
|
||||
*/
|
||||
void clear_ftrace_function(void)
|
||||
{
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
}
|
@ -634,6 +634,8 @@ config LATENCYTOP
|
||||
Enable this option if you want to use the LatencyTOP tool
|
||||
to find out which userspace is blocking on what kernel operations.
|
||||
|
||||
source kernel/trace/Kconfig
|
||||
|
||||
config PROVIDE_OHCI1394_DMA_INIT
|
||||
bool "Remote debugging over FireWire early on boot"
|
||||
depends on PCI && X86
|
||||
|
Loading…
Reference in New Issue
Block a user