mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-16 06:35:39 +08:00
New features for this release:
o Pretty much a full rewrite of the processing of function plugins. i.e. echo do_IRQ:stacktrace > set_ftrace_filter o The rewrite was needed to add plugins to be unique to tracing instances. i.e. mkdir instance/foo; cd instances/foo; echo do_IRQ:stacktrace > set_ftrace_filter The old way was written very hacky. This removes a lot of those hacks. o New "function-fork" tracing option. When set, pids in the set_ftrace_pid will have their children added when the processes with their pids listed in the set_ftrace_pid file forks. o Exposure of "maxactive" for kretprobe in kprobe_events o Allow for builtin init functions to be traced by the function tracer (via the kernel command line). Module init function tracing will come in the next release. o Added more selftests, and have selftests also test in an instance. -----BEGIN PGP SIGNATURE----- iQExBAABCAAbBQJZCRchFBxyb3N0ZWR0QGdvb2RtaXMub3JnAAoJEMm5BfJq2Y3L zuIH/RsLUb8Hj6GmhAvn/tblUDzWyqlXX2h79VVlo/XrWayHYNHnKOmua1WwMZC6 xESXb/AffAc89VWTkKsrwaK7yfRPG6+w8zTZOcFuXSBpqSGG/oey9Fxj5Wqqpche oJ2UY7ngxANAipkP5GxdYTafFSoWhGZGfUUtW+5tAHoFHzqO2lOjO8olbXP69sON kVX/b461S20cVvRe5H/F0klXLSc37Tlp5YznXy4H4V4HcJSN1Fb6/uozOXALZ4se SBpVMWmVVoGJorzj+ic7gVOeohvC8RnR400HbeMVwaI0Lj50noidDj/5Hv8F7T+D h1B8vATNZLFAFUOSHINCBIu6Vj0= =t8mg -----END PGP SIGNATURE----- Merge tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing updates from Steven Rostedt: "New features for this release: - Pretty much a full rewrite of the processing of function plugins. i.e. echo do_IRQ:stacktrace > set_ftrace_filter - The rewrite was needed to add plugins to be unique to tracing instances. i.e. mkdir instance/foo; cd instances/foo; echo do_IRQ:stacktrace > set_ftrace_filter The old way was written very hacky. This removes a lot of those hacks. - New "function-fork" tracing option. When set, pids in the set_ftrace_pid will have their children added when the processes with their pids listed in the set_ftrace_pid file forks. - Exposure of "maxactive" for kretprobe in kprobe_events - Allow for builtin init functions to be traced by the function tracer (via the kernel command line). Module init function tracing will come in the next release. - Added more selftests, and have selftests also test in an instance" * tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (60 commits) ring-buffer: Return reader page back into existing ring buffer selftests: ftrace: Allow some event trigger tests to run in an instance selftests: ftrace: Have some basic tests run in a tracing instance too selftests: ftrace: Have event tests also run in an tracing instance selftests: ftrace: Make func_event_triggers and func_traceonoff_triggers tests do instances selftests: ftrace: Allow some tests to be run in a tracing instance tracing/ftrace: Allow for instances to trigger their own stacktrace probes tracing/ftrace: Allow for the traceonoff probe be unique to instances tracing/ftrace: Enable snapshot function trigger to work with instances tracing/ftrace: Allow instances to have their own function probes tracing/ftrace: Add a better way to pass data via the probe functions ftrace: Dynamically create the probe ftrace_ops for the trace_array tracing: Pass the trace_array into ftrace_probe_ops functions tracing: Have the trace_array hold the list of registered func probes ftrace: If the hash for a probe fails to update then free what was initialized ftrace: Have the function probes call their own function ftrace: Have each function probe use its own ftrace_ops ftrace: Have unregister_ftrace_function_probe_func() return a value ftrace: Add helper function ftrace_hash_move_and_update_ops() ftrace: Remove data field from ftrace_func_probe structure ...
This commit is contained in:
commit
4c174688ee
@ -24,7 +24,7 @@ current_tracer. Instead of that, add probe points via
|
||||
Synopsis of kprobe_events
|
||||
-------------------------
|
||||
p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
||||
r[:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
|
||||
r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
|
||||
-:[GRP/]EVENT : Clear a probe
|
||||
|
||||
GRP : Group name. If omitted, use "kprobes" for it.
|
||||
@ -33,6 +33,9 @@ Synopsis of kprobe_events
|
||||
MOD : Module name which has given SYM.
|
||||
SYM[+offs] : Symbol+offset where the probe is inserted.
|
||||
MEMADDR : Address where the probe is inserted.
|
||||
MAXACTIVE : Maximum number of instances of the specified function that
|
||||
can be probed simultaneously, or 0 for the default value
|
||||
as defined in Documentation/kprobes.txt section 1.3.1.
|
||||
|
||||
FETCHARGS : Arguments. Each probe can have up to 128 args.
|
||||
%REG : Fetch register REG
|
||||
|
@ -533,7 +533,13 @@ static void do_sync_core(void *data)
|
||||
|
||||
static void run_sync(void)
|
||||
{
|
||||
int enable_irqs = irqs_disabled();
|
||||
int enable_irqs;
|
||||
|
||||
/* No need to sync if there's only one CPU */
|
||||
if (num_online_cpus() == 1)
|
||||
return;
|
||||
|
||||
enable_irqs = irqs_disabled();
|
||||
|
||||
/* We may be called with interrupts disabled (on bootup). */
|
||||
if (enable_irqs)
|
||||
|
@ -42,8 +42,10 @@
|
||||
/* Main tracing buffer and events set up */
|
||||
#ifdef CONFIG_TRACING
|
||||
void trace_init(void);
|
||||
void early_trace_init(void);
|
||||
#else
|
||||
static inline void trace_init(void) { }
|
||||
static inline void early_trace_init(void) { }
|
||||
#endif
|
||||
|
||||
struct module;
|
||||
@ -144,6 +146,10 @@ struct ftrace_ops_hash {
|
||||
struct ftrace_hash *filter_hash;
|
||||
struct mutex regex_lock;
|
||||
};
|
||||
|
||||
void ftrace_free_init_mem(void);
|
||||
#else
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void)
|
||||
}
|
||||
static inline void clear_ftrace_function(void) { }
|
||||
static inline void ftrace_kill(void) { }
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_STACK_TRACER
|
||||
@ -279,15 +286,45 @@ int
|
||||
stack_trace_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
#endif
|
||||
|
||||
struct ftrace_func_command {
|
||||
struct list_head list;
|
||||
char *name;
|
||||
int (*func)(struct ftrace_hash *hash,
|
||||
char *func, char *cmd,
|
||||
char *params, int enable);
|
||||
};
|
||||
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
|
||||
DECLARE_PER_CPU(int, disable_stack_tracer);
|
||||
|
||||
/**
|
||||
* stack_tracer_disable - temporarily disable the stack tracer
|
||||
*
|
||||
* There's a few locations (namely in RCU) where stack tracing
|
||||
* cannot be executed. This function is used to disable stack
|
||||
* tracing during those critical sections.
|
||||
*
|
||||
* This function must be called with preemption or interrupts
|
||||
* disabled and stack_tracer_enable() must be called shortly after
|
||||
* while preemption or interrupts are still disabled.
|
||||
*/
|
||||
static inline void stack_tracer_disable(void)
|
||||
{
|
||||
/* Preemption or interupts must be disabled */
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
|
||||
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
|
||||
this_cpu_inc(disable_stack_tracer);
|
||||
}
|
||||
|
||||
/**
|
||||
* stack_tracer_enable - re-enable the stack tracer
|
||||
*
|
||||
* After stack_tracer_disable() is called, stack_tracer_enable()
|
||||
* must be called shortly afterward.
|
||||
*/
|
||||
static inline void stack_tracer_enable(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
|
||||
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
|
||||
this_cpu_dec(disable_stack_tracer);
|
||||
}
|
||||
#else
|
||||
static inline void stack_tracer_disable(void) { }
|
||||
static inline void stack_tracer_enable(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec);
|
||||
|
||||
struct seq_file;
|
||||
|
||||
struct ftrace_probe_ops {
|
||||
void (*func)(unsigned long ip,
|
||||
unsigned long parent_ip,
|
||||
void **data);
|
||||
int (*init)(struct ftrace_probe_ops *ops,
|
||||
unsigned long ip, void **data);
|
||||
void (*free)(struct ftrace_probe_ops *ops,
|
||||
unsigned long ip, void **data);
|
||||
int (*print)(struct seq_file *m,
|
||||
unsigned long ip,
|
||||
struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
};
|
||||
|
||||
extern int
|
||||
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
extern void
|
||||
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
extern void
|
||||
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
|
||||
extern void unregister_ftrace_function_probe_all(char *glob);
|
||||
|
||||
extern int ftrace_text_reserved(const void *start, const void *end);
|
||||
|
||||
extern int ftrace_nr_registered_ops(void);
|
||||
@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
|
||||
void ftrace_free_filter(struct ftrace_ops *ops);
|
||||
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
|
||||
|
||||
int register_ftrace_command(struct ftrace_func_command *cmd);
|
||||
int unregister_ftrace_command(struct ftrace_func_command *cmd);
|
||||
|
||||
enum {
|
||||
FTRACE_UPDATE_CALLS = (1 << 0),
|
||||
FTRACE_DISABLE_CALLS = (1 << 1),
|
||||
@ -433,8 +443,8 @@ enum {
|
||||
FTRACE_ITER_FILTER = (1 << 0),
|
||||
FTRACE_ITER_NOTRACE = (1 << 1),
|
||||
FTRACE_ITER_PRINTALL = (1 << 2),
|
||||
FTRACE_ITER_DO_HASH = (1 << 3),
|
||||
FTRACE_ITER_HASH = (1 << 4),
|
||||
FTRACE_ITER_DO_PROBES = (1 << 3),
|
||||
FTRACE_ITER_PROBE = (1 << 4),
|
||||
FTRACE_ITER_ENABLED = (1 << 5),
|
||||
};
|
||||
|
||||
@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { }
|
||||
static inline void ftrace_module_init(struct module *mod) { }
|
||||
static inline void ftrace_module_enable(struct module *mod) { }
|
||||
static inline void ftrace_release_mod(struct module *mod) { }
|
||||
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline __init int unregister_ftrace_command(char *cmd_name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int ftrace_text_reserved(const void *start, const void *end)
|
||||
{
|
||||
return 0;
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
/* These are for everybody (although not all archs will actually
|
||||
discard it in modules) */
|
||||
#define __init __section(.init.text) __cold notrace __latent_entropy
|
||||
#define __init __section(.init.text) __cold __inittrace __latent_entropy
|
||||
#define __initdata __section(.init.data)
|
||||
#define __initconst __section(.init.rodata)
|
||||
#define __exitdata __section(.exit.data)
|
||||
@ -68,8 +68,10 @@
|
||||
|
||||
#ifdef MODULE
|
||||
#define __exitused
|
||||
#define __inittrace notrace
|
||||
#else
|
||||
#define __exitused __used
|
||||
#define __inittrace
|
||||
#endif
|
||||
|
||||
#define __exit __section(.exit.text) __exitused __cold notrace
|
||||
|
@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
unsigned long secs,
|
||||
unsigned long c_old,
|
||||
unsigned long c);
|
||||
bool rcu_irq_enter_disabled(void);
|
||||
#else
|
||||
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
|
||||
int *flags,
|
||||
@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void)
|
||||
static inline void rcutorture_record_progress(unsigned long vernum)
|
||||
{
|
||||
}
|
||||
static inline bool rcu_irq_enter_disabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
struct rcu_head *rhp,
|
||||
|
@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page);
|
||||
|
||||
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
|
||||
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
|
||||
size_t len, int cpu, int full);
|
||||
|
||||
|
@ -138,16 +138,7 @@ enum print_line_t {
|
||||
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
||||
};
|
||||
|
||||
/*
|
||||
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
|
||||
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
|
||||
* simplifies those functions and keeps them in sync.
|
||||
*/
|
||||
static inline enum print_line_t trace_handle_return(struct trace_seq *s)
|
||||
{
|
||||
return trace_seq_has_overflowed(s) ?
|
||||
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
|
||||
}
|
||||
enum print_line_t trace_handle_return(struct trace_seq *s);
|
||||
|
||||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
|
@ -128,7 +128,7 @@ extern void syscall_unregfunc(void);
|
||||
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
|
||||
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
|
||||
*/
|
||||
#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
|
||||
#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
|
||||
do { \
|
||||
struct tracepoint_func *it_func_ptr; \
|
||||
void *it_func; \
|
||||
@ -136,7 +136,11 @@ extern void syscall_unregfunc(void);
|
||||
\
|
||||
if (!(cond)) \
|
||||
return; \
|
||||
prercu; \
|
||||
if (rcucheck) { \
|
||||
if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
|
||||
return; \
|
||||
rcu_irq_enter_irqson(); \
|
||||
} \
|
||||
rcu_read_lock_sched_notrace(); \
|
||||
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
|
||||
if (it_func_ptr) { \
|
||||
@ -147,20 +151,19 @@ extern void syscall_unregfunc(void);
|
||||
} while ((++it_func_ptr)->func); \
|
||||
} \
|
||||
rcu_read_unlock_sched_notrace(); \
|
||||
postrcu; \
|
||||
if (rcucheck) \
|
||||
rcu_irq_exit_irqson(); \
|
||||
} while (0)
|
||||
|
||||
#ifndef MODULE
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
|
||||
static inline void trace_##name##_rcuidle(proto) \
|
||||
{ \
|
||||
if (static_key_false(&__tracepoint_##name.key)) \
|
||||
__DO_TRACE(&__tracepoint_##name, \
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond), \
|
||||
rcu_irq_enter_irqson(), \
|
||||
rcu_irq_exit_irqson()); \
|
||||
TP_CONDITION(cond), 1); \
|
||||
}
|
||||
#else
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
|
||||
@ -186,7 +189,7 @@ extern void syscall_unregfunc(void);
|
||||
__DO_TRACE(&__tracepoint_##name, \
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond),,); \
|
||||
TP_CONDITION(cond), 0); \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
||||
rcu_read_lock_sched_notrace(); \
|
||||
rcu_dereference_sched(__tracepoint_##name.funcs);\
|
||||
|
10
init/main.c
10
init/main.c
@ -545,6 +545,11 @@ asmlinkage __visible void __init start_kernel(void)
|
||||
trap_init();
|
||||
mm_init();
|
||||
|
||||
ftrace_init();
|
||||
|
||||
/* trace_printk can be enabled here */
|
||||
early_trace_init();
|
||||
|
||||
/*
|
||||
* Set up the scheduler prior starting any interrupts (such as the
|
||||
* timer interrupt). Full topology setup happens at smp_init()
|
||||
@ -570,7 +575,7 @@ asmlinkage __visible void __init start_kernel(void)
|
||||
|
||||
rcu_init();
|
||||
|
||||
/* trace_printk() and trace points may be used after this */
|
||||
/* Trace events are available after this */
|
||||
trace_init();
|
||||
|
||||
context_tracking_init();
|
||||
@ -670,8 +675,6 @@ asmlinkage __visible void __init start_kernel(void)
|
||||
efi_free_boot_services();
|
||||
}
|
||||
|
||||
ftrace_init();
|
||||
|
||||
/* Do the rest non-__init'ed, we're now alive */
|
||||
rest_init();
|
||||
}
|
||||
@ -959,6 +962,7 @@ static int __ref kernel_init(void *unused)
|
||||
kernel_init_freeable();
|
||||
/* need to finish all async __init code before freeing the memory */
|
||||
async_synchronize_full();
|
||||
ftrace_free_init_mem();
|
||||
free_initmem();
|
||||
mark_readonly();
|
||||
system_state = SYSTEM_RUNNING;
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#include "tree.h"
|
||||
#include "rcu.h"
|
||||
@ -283,6 +284,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
||||
};
|
||||
|
||||
/*
|
||||
* There's a few places, currently just in the tracing infrastructure,
|
||||
* that uses rcu_irq_enter() to make sure RCU is watching. But there's
|
||||
* a small location where that will not even work. In those cases
|
||||
* rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
|
||||
* can be called.
|
||||
*/
|
||||
static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
|
||||
|
||||
bool rcu_irq_enter_disabled(void)
|
||||
{
|
||||
return this_cpu_read(disable_rcu_irq_enter);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record entry into an extended quiescent state. This is only to be
|
||||
* called when not already in an extended quiescent state.
|
||||
@ -771,25 +786,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
}
|
||||
|
||||
/*
|
||||
* rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
|
||||
* rcu_eqs_enter_common - current CPU is entering an extended quiescent state
|
||||
*
|
||||
* If the new value of the ->dynticks_nesting counter now is zero,
|
||||
* we really have entered idle, and must do the appropriate accounting.
|
||||
* The caller must have disabled interrupts.
|
||||
* Enter idle, doing appropriate accounting. The caller must have
|
||||
* disabled interrupts.
|
||||
*/
|
||||
static void rcu_eqs_enter_common(long long oldval, bool user)
|
||||
static void rcu_eqs_enter_common(bool user)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
struct rcu_data *rdp;
|
||||
RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
||||
trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
|
||||
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
||||
!user && !is_idle_task(current)) {
|
||||
struct task_struct *idle __maybe_unused =
|
||||
idle_task(smp_processor_id());
|
||||
|
||||
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
|
||||
trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
|
||||
rcu_ftrace_dump(DUMP_ORIG);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
current->pid, current->comm,
|
||||
@ -800,7 +814,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
|
||||
do_nocb_deferred_wakeup(rdp);
|
||||
}
|
||||
rcu_prepare_for_idle();
|
||||
rcu_dynticks_eqs_enter();
|
||||
__this_cpu_inc(disable_rcu_irq_enter);
|
||||
rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
|
||||
rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
|
||||
__this_cpu_dec(disable_rcu_irq_enter);
|
||||
rcu_dynticks_task_enter();
|
||||
|
||||
/*
|
||||
@ -821,19 +838,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
|
||||
*/
|
||||
static void rcu_eqs_enter(bool user)
|
||||
{
|
||||
long long oldval;
|
||||
struct rcu_dynticks *rdtp;
|
||||
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
||||
(oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
||||
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
|
||||
rdtp->dynticks_nesting = 0;
|
||||
rcu_eqs_enter_common(oldval, user);
|
||||
} else {
|
||||
(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
|
||||
if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
||||
rcu_eqs_enter_common(user);
|
||||
else
|
||||
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -892,19 +905,18 @@ void rcu_user_enter(void)
|
||||
*/
|
||||
void rcu_irq_exit(void)
|
||||
{
|
||||
long long oldval;
|
||||
struct rcu_dynticks *rdtp;
|
||||
|
||||
RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
rdtp->dynticks_nesting--;
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
||||
rdtp->dynticks_nesting < 0);
|
||||
if (rdtp->dynticks_nesting)
|
||||
trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
|
||||
else
|
||||
rcu_eqs_enter_common(oldval, true);
|
||||
rdtp->dynticks_nesting < 1);
|
||||
if (rdtp->dynticks_nesting <= 1) {
|
||||
rcu_eqs_enter_common(true);
|
||||
} else {
|
||||
trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
|
||||
rdtp->dynticks_nesting--;
|
||||
}
|
||||
rcu_sysidle_enter(1);
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,8 @@ config FUNCTION_TRACER
|
||||
select KALLSYMS
|
||||
select GENERIC_TRACER
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
select GLOB
|
||||
select GLOB
|
||||
select TASKS_RCU if PREEMPT
|
||||
help
|
||||
Enable the kernel to trace every kernel function. This is done
|
||||
by using a compiler feature to insert a small, 5-byte No-Operation
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -438,6 +438,7 @@ struct ring_buffer_per_cpu {
|
||||
raw_spinlock_t reader_lock; /* serialize readers */
|
||||
arch_spinlock_t lock;
|
||||
struct lock_class_key lock_key;
|
||||
struct buffer_data_page *free_page;
|
||||
unsigned long nr_pages;
|
||||
unsigned int current_context;
|
||||
struct list_head *pages;
|
||||
@ -4389,9 +4390,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
||||
*/
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct buffer_data_page *bpage;
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct buffer_data_page *bpage = NULL;
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
if (cpu_buffer->free_page) {
|
||||
bpage = cpu_buffer->free_page;
|
||||
cpu_buffer->free_page = NULL;
|
||||
}
|
||||
|
||||
arch_spin_unlock(&cpu_buffer->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (bpage)
|
||||
goto out;
|
||||
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
@ -4399,6 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
|
||||
bpage = page_address(page);
|
||||
|
||||
out:
|
||||
rb_init_page(bpage);
|
||||
|
||||
return bpage;
|
||||
@ -4408,13 +4426,29 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
|
||||
/**
|
||||
* ring_buffer_free_read_page - free an allocated read page
|
||||
* @buffer: the buffer the page was allocate for
|
||||
* @cpu: the cpu buffer the page came from
|
||||
* @data: the page to free
|
||||
*
|
||||
* Free a page allocated from ring_buffer_alloc_read_page.
|
||||
*/
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
|
||||
{
|
||||
free_page((unsigned long)data);
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct buffer_data_page *bpage = data;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
if (!cpu_buffer->free_page) {
|
||||
cpu_buffer->free_page = bpage;
|
||||
bpage = NULL;
|
||||
}
|
||||
|
||||
arch_spin_unlock(&cpu_buffer->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
free_page((unsigned long)bpage);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
|
||||
|
||||
|
@ -171,7 +171,7 @@ static enum event_status read_page(int cpu)
|
||||
}
|
||||
}
|
||||
}
|
||||
ring_buffer_free_read_page(buffer, bpage);
|
||||
ring_buffer_free_read_page(buffer, cpu, bpage);
|
||||
|
||||
if (ret < 0)
|
||||
return EVENT_DROPPED;
|
||||
|
@ -257,7 +257,7 @@ unsigned long long ns2usecs(u64 nsec)
|
||||
|
||||
/* trace_flags that are default zero for instances */
|
||||
#define ZEROED_TRACE_FLAGS \
|
||||
TRACE_ITER_EVENT_FORK
|
||||
(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
|
||||
|
||||
/*
|
||||
* The global_trace is the descriptor that holds the top-level tracing
|
||||
@ -757,7 +757,7 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
|
||||
return event;
|
||||
}
|
||||
|
||||
static void tracer_tracing_on(struct trace_array *tr)
|
||||
void tracer_tracing_on(struct trace_array *tr)
|
||||
{
|
||||
if (tr->trace_buffer.buffer)
|
||||
ring_buffer_record_on(tr->trace_buffer.buffer);
|
||||
@ -894,23 +894,8 @@ int __trace_bputs(unsigned long ip, const char *str)
|
||||
EXPORT_SYMBOL_GPL(__trace_bputs);
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
/**
|
||||
* trace_snapshot - take a snapshot of the current buffer.
|
||||
*
|
||||
* This causes a swap between the snapshot buffer and the current live
|
||||
* tracing buffer. You can use this to take snapshots of the live
|
||||
* trace when some condition is triggered, but continue to trace.
|
||||
*
|
||||
* Note, make sure to allocate the snapshot with either
|
||||
* a tracing_snapshot_alloc(), or by doing it manually
|
||||
* with: echo 1 > /sys/kernel/debug/tracing/snapshot
|
||||
*
|
||||
* If the snapshot buffer is not allocated, it will stop tracing.
|
||||
* Basically making a permanent snapshot.
|
||||
*/
|
||||
void tracing_snapshot(void)
|
||||
static void tracing_snapshot_instance(struct trace_array *tr)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
struct tracer *tracer = tr->current_trace;
|
||||
unsigned long flags;
|
||||
|
||||
@ -938,6 +923,27 @@ void tracing_snapshot(void)
|
||||
update_max_tr(tr, current, smp_processor_id());
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* trace_snapshot - take a snapshot of the current buffer.
|
||||
*
|
||||
* This causes a swap between the snapshot buffer and the current live
|
||||
* tracing buffer. You can use this to take snapshots of the live
|
||||
* trace when some condition is triggered, but continue to trace.
|
||||
*
|
||||
* Note, make sure to allocate the snapshot with either
|
||||
* a tracing_snapshot_alloc(), or by doing it manually
|
||||
* with: echo 1 > /sys/kernel/debug/tracing/snapshot
|
||||
*
|
||||
* If the snapshot buffer is not allocated, it will stop tracing.
|
||||
* Basically making a permanent snapshot.
|
||||
*/
|
||||
void tracing_snapshot(void)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
|
||||
tracing_snapshot_instance(tr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracing_snapshot);
|
||||
|
||||
static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
|
||||
@ -1039,7 +1045,7 @@ void tracing_snapshot_alloc(void)
|
||||
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
|
||||
#endif /* CONFIG_TRACER_SNAPSHOT */
|
||||
|
||||
static void tracer_tracing_off(struct trace_array *tr)
|
||||
void tracer_tracing_off(struct trace_array *tr)
|
||||
{
|
||||
if (tr->trace_buffer.buffer)
|
||||
ring_buffer_record_off(tr->trace_buffer.buffer);
|
||||
@ -1424,6 +1430,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
static bool selftests_can_run;
|
||||
|
||||
struct trace_selftests {
|
||||
struct list_head list;
|
||||
struct tracer *type;
|
||||
};
|
||||
|
||||
static LIST_HEAD(postponed_selftests);
|
||||
|
||||
static int save_selftest(struct tracer *type)
|
||||
{
|
||||
struct trace_selftests *selftest;
|
||||
|
||||
selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
|
||||
if (!selftest)
|
||||
return -ENOMEM;
|
||||
|
||||
selftest->type = type;
|
||||
list_add(&selftest->list, &postponed_selftests);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int run_tracer_selftest(struct tracer *type)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
@ -1433,6 +1461,14 @@ static int run_tracer_selftest(struct tracer *type)
|
||||
if (!type->selftest || tracing_selftest_disabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If a tracer registers early in boot up (before scheduling is
|
||||
* initialized and such), then do not run its selftests yet.
|
||||
* Instead, run it a little later in the boot process.
|
||||
*/
|
||||
if (!selftests_can_run)
|
||||
return save_selftest(type);
|
||||
|
||||
/*
|
||||
* Run a selftest on this tracer.
|
||||
* Here we reset the trace buffer, and set the current
|
||||
@ -1482,6 +1518,47 @@ static int run_tracer_selftest(struct tracer *type)
|
||||
printk(KERN_CONT "PASSED\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int init_trace_selftests(void)
|
||||
{
|
||||
struct trace_selftests *p, *n;
|
||||
struct tracer *t, **last;
|
||||
int ret;
|
||||
|
||||
selftests_can_run = true;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
if (list_empty(&postponed_selftests))
|
||||
goto out;
|
||||
|
||||
pr_info("Running postponed tracer tests:\n");
|
||||
|
||||
list_for_each_entry_safe(p, n, &postponed_selftests, list) {
|
||||
ret = run_tracer_selftest(p->type);
|
||||
/* If the test fails, then warn and remove from available_tracers */
|
||||
if (ret < 0) {
|
||||
WARN(1, "tracer: %s failed selftest, disabling\n",
|
||||
p->type->name);
|
||||
last = &trace_types;
|
||||
for (t = trace_types; t; t = t->next) {
|
||||
if (t == p->type) {
|
||||
*last = t->next;
|
||||
break;
|
||||
}
|
||||
last = &t->next;
|
||||
}
|
||||
}
|
||||
list_del(&p->list);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_trace_selftests);
|
||||
#else
|
||||
static inline int run_tracer_selftest(struct tracer *type)
|
||||
{
|
||||
@ -1927,6 +2004,18 @@ void tracing_record_cmdline(struct task_struct *tsk)
|
||||
__this_cpu_write(trace_cmdline_save, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
|
||||
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
|
||||
* simplifies those functions and keeps them in sync.
|
||||
*/
|
||||
enum print_line_t trace_handle_return(struct trace_seq *s)
|
||||
{
|
||||
return trace_seq_has_overflowed(s) ?
|
||||
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_handle_return);
|
||||
|
||||
void
|
||||
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
||||
int pc)
|
||||
@ -4122,6 +4211,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
||||
if (mask == TRACE_ITER_EVENT_FORK)
|
||||
trace_event_follow_fork(tr, enabled);
|
||||
|
||||
if (mask == TRACE_ITER_FUNC_FORK)
|
||||
ftrace_pid_follow_fork(tr, enabled);
|
||||
|
||||
if (mask == TRACE_ITER_OVERWRITE) {
|
||||
ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
@ -5962,6 +6054,7 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
|
||||
struct ftrace_buffer_info {
|
||||
struct trace_iterator iter;
|
||||
void *spare;
|
||||
unsigned int spare_cpu;
|
||||
unsigned int read;
|
||||
};
|
||||
|
||||
@ -6291,9 +6384,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||
return -EBUSY;
|
||||
#endif
|
||||
|
||||
if (!info->spare)
|
||||
if (!info->spare) {
|
||||
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
|
||||
iter->cpu_file);
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
}
|
||||
if (!info->spare)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -6353,7 +6448,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
|
||||
__trace_array_put(iter->tr);
|
||||
|
||||
if (info->spare)
|
||||
ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
|
||||
ring_buffer_free_read_page(iter->trace_buffer->buffer,
|
||||
info->spare_cpu, info->spare);
|
||||
kfree(info);
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
@ -6364,6 +6460,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
|
||||
struct buffer_ref {
|
||||
struct ring_buffer *buffer;
|
||||
void *page;
|
||||
int cpu;
|
||||
int ref;
|
||||
};
|
||||
|
||||
@ -6375,7 +6472,7 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
|
||||
if (--ref->ref)
|
||||
return;
|
||||
|
||||
ring_buffer_free_read_page(ref->buffer, ref->page);
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
buf->private = 0;
|
||||
}
|
||||
@ -6409,7 +6506,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
|
||||
if (--ref->ref)
|
||||
return;
|
||||
|
||||
ring_buffer_free_read_page(ref->buffer, ref->page);
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
spd->partial[i].private = 0;
|
||||
}
|
||||
@ -6473,11 +6570,13 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
ref->cpu = iter->cpu_file;
|
||||
|
||||
r = ring_buffer_read_page(ref->buffer, &ref->page,
|
||||
len, iter->cpu_file, 1);
|
||||
if (r < 0) {
|
||||
ring_buffer_free_read_page(ref->buffer, ref->page);
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu,
|
||||
ref->page);
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
@ -6648,43 +6747,89 @@ static const struct file_operations tracing_dyn_info_fops = {
|
||||
|
||||
#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
|
||||
static void
|
||||
ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
tracing_snapshot();
|
||||
tracing_snapshot_instance(tr);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
unsigned long *count = (long *)data;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
long *count = NULL;
|
||||
|
||||
if (!*count)
|
||||
return;
|
||||
if (mapper)
|
||||
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
||||
|
||||
if (count) {
|
||||
|
||||
if (*count <= 0)
|
||||
return;
|
||||
|
||||
if (*count != -1)
|
||||
(*count)--;
|
||||
}
|
||||
|
||||
tracing_snapshot();
|
||||
tracing_snapshot_instance(tr);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
long count = (long)data;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
long *count = NULL;
|
||||
|
||||
seq_printf(m, "%ps:", (void *)ip);
|
||||
|
||||
seq_puts(m, "snapshot");
|
||||
|
||||
if (count == -1)
|
||||
seq_puts(m, ":unlimited\n");
|
||||
if (mapper)
|
||||
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
||||
|
||||
if (count)
|
||||
seq_printf(m, ":count=%ld\n", *count);
|
||||
else
|
||||
seq_printf(m, ":count=%ld\n", count);
|
||||
seq_puts(m, ":unlimited\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
||||
unsigned long ip, void *init_data, void **data)
|
||||
{
|
||||
struct ftrace_func_mapper *mapper = *data;
|
||||
|
||||
if (!mapper) {
|
||||
mapper = allocate_ftrace_func_mapper();
|
||||
if (!mapper)
|
||||
return -ENOMEM;
|
||||
*data = mapper;
|
||||
}
|
||||
|
||||
return ftrace_func_mapper_add_ip(mapper, ip, init_data);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
||||
unsigned long ip, void *data)
|
||||
{
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
|
||||
if (!ip) {
|
||||
if (!mapper)
|
||||
return;
|
||||
free_ftrace_func_mapper(mapper, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
ftrace_func_mapper_remove_ip(mapper, ip);
|
||||
}
|
||||
|
||||
static struct ftrace_probe_ops snapshot_probe_ops = {
|
||||
.func = ftrace_snapshot,
|
||||
.print = ftrace_snapshot_print,
|
||||
@ -6693,10 +6838,12 @@ static struct ftrace_probe_ops snapshot_probe_ops = {
|
||||
static struct ftrace_probe_ops snapshot_count_probe_ops = {
|
||||
.func = ftrace_count_snapshot,
|
||||
.print = ftrace_snapshot_print,
|
||||
.init = ftrace_snapshot_init,
|
||||
.free = ftrace_snapshot_free,
|
||||
};
|
||||
|
||||
static int
|
||||
ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
|
||||
ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
@ -6710,10 +6857,8 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
|
||||
|
||||
ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
|
||||
|
||||
if (glob[0] == '!') {
|
||||
unregister_ftrace_function_probe_func(glob+1, ops);
|
||||
return 0;
|
||||
}
|
||||
if (glob[0] == '!')
|
||||
return unregister_ftrace_function_probe_func(glob+1, tr, ops);
|
||||
|
||||
if (!param)
|
||||
goto out_reg;
|
||||
@ -6732,11 +6877,11 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
|
||||
return ret;
|
||||
|
||||
out_reg:
|
||||
ret = alloc_snapshot(&global_trace);
|
||||
ret = alloc_snapshot(tr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = register_ftrace_function_probe(glob, ops, count);
|
||||
ret = register_ftrace_function_probe(glob, tr, ops, count);
|
||||
|
||||
out:
|
||||
return ret < 0 ? ret : 0;
|
||||
@ -7347,6 +7492,8 @@ static int instance_mkdir(const char *name)
|
||||
goto out_free_tr;
|
||||
}
|
||||
|
||||
ftrace_init_trace_array(tr);
|
||||
|
||||
init_tracer_tracefs(tr, tr->dir);
|
||||
init_trace_flags_index(tr);
|
||||
__update_tracer_options(tr);
|
||||
@ -7967,6 +8114,9 @@ __init static int tracer_alloc_buffers(void)
|
||||
|
||||
register_tracer(&nop_trace);
|
||||
|
||||
/* Function tracing may start here (via kernel command line) */
|
||||
init_function_trace();
|
||||
|
||||
/* All seems OK, enable tracing */
|
||||
tracing_disabled = 0;
|
||||
|
||||
@ -8001,7 +8151,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __init trace_init(void)
|
||||
void __init early_trace_init(void)
|
||||
{
|
||||
if (tracepoint_printk) {
|
||||
tracepoint_print_iter =
|
||||
@ -8012,6 +8162,10 @@ void __init trace_init(void)
|
||||
static_key_enable(&tracepoint_printk_key.key);
|
||||
}
|
||||
tracer_alloc_buffers();
|
||||
}
|
||||
|
||||
void __init trace_init(void)
|
||||
{
|
||||
trace_event_init();
|
||||
}
|
||||
|
||||
|
@ -262,6 +262,9 @@ struct trace_array {
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
struct ftrace_ops *ops;
|
||||
struct trace_pid_list __rcu *function_pids;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
struct list_head func_probes;
|
||||
#endif
|
||||
/* function tracing enabled */
|
||||
int function_enabled;
|
||||
#endif
|
||||
@ -579,6 +582,8 @@ void tracing_reset_all_online_cpus(void);
|
||||
int tracing_open_generic(struct inode *inode, struct file *filp);
|
||||
bool tracing_is_disabled(void);
|
||||
int tracer_tracing_is_on(struct trace_array *tr);
|
||||
void tracer_tracing_on(struct trace_array *tr);
|
||||
void tracer_tracing_off(struct trace_array *tr);
|
||||
struct dentry *trace_create_file(const char *name,
|
||||
umode_t mode,
|
||||
struct dentry *parent,
|
||||
@ -696,6 +701,9 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern unsigned long ftrace_update_tot_cnt;
|
||||
void ftrace_init_trace_array(struct trace_array *tr);
|
||||
#else
|
||||
static inline void ftrace_init_trace_array(struct trace_array *tr) { }
|
||||
#endif
|
||||
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
|
||||
extern int DYN_FTRACE_TEST_NAME(void);
|
||||
@ -880,6 +888,14 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
extern struct list_head ftrace_pids;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
struct ftrace_func_command {
|
||||
struct list_head list;
|
||||
char *name;
|
||||
int (*func)(struct trace_array *tr,
|
||||
struct ftrace_hash *hash,
|
||||
char *func, char *cmd,
|
||||
char *params, int enable);
|
||||
};
|
||||
extern bool ftrace_filter_param __initdata;
|
||||
static inline int ftrace_trace_task(struct trace_array *tr)
|
||||
{
|
||||
@ -897,6 +913,8 @@ void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
|
||||
void ftrace_init_tracefs_toplevel(struct trace_array *tr,
|
||||
struct dentry *d_tracer);
|
||||
void ftrace_clear_pids(struct trace_array *tr);
|
||||
int init_function_trace(void);
|
||||
void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
|
||||
#else
|
||||
static inline int ftrace_trace_task(struct trace_array *tr)
|
||||
{
|
||||
@ -916,15 +934,70 @@ static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
|
||||
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
|
||||
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
|
||||
static inline void ftrace_clear_pids(struct trace_array *tr) { }
|
||||
static inline int init_function_trace(void) { return 0; }
|
||||
static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
|
||||
/* ftace_func_t type is not defined, use macro instead of static inline */
|
||||
#define ftrace_init_array_ops(tr, func) do { } while (0)
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
|
||||
|
||||
struct ftrace_probe_ops {
|
||||
void (*func)(unsigned long ip,
|
||||
unsigned long parent_ip,
|
||||
struct trace_array *tr,
|
||||
struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
int (*init)(struct ftrace_probe_ops *ops,
|
||||
struct trace_array *tr,
|
||||
unsigned long ip, void *init_data,
|
||||
void **data);
|
||||
void (*free)(struct ftrace_probe_ops *ops,
|
||||
struct trace_array *tr,
|
||||
unsigned long ip, void *data);
|
||||
int (*print)(struct seq_file *m,
|
||||
unsigned long ip,
|
||||
struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
};
|
||||
|
||||
struct ftrace_func_mapper;
|
||||
typedef int (*ftrace_mapper_func)(void *data);
|
||||
|
||||
struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
|
||||
void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
|
||||
unsigned long ip);
|
||||
int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
|
||||
unsigned long ip, void *data);
|
||||
void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
|
||||
unsigned long ip);
|
||||
void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
|
||||
ftrace_mapper_func free_func);
|
||||
|
||||
extern int
|
||||
register_ftrace_function_probe(char *glob, struct trace_array *tr,
|
||||
struct ftrace_probe_ops *ops, void *data);
|
||||
extern int
|
||||
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
|
||||
struct ftrace_probe_ops *ops);
|
||||
|
||||
int register_ftrace_command(struct ftrace_func_command *cmd);
|
||||
int unregister_ftrace_command(struct ftrace_func_command *cmd);
|
||||
|
||||
void ftrace_create_filter_files(struct ftrace_ops *ops,
|
||||
struct dentry *parent);
|
||||
void ftrace_destroy_filter_files(struct ftrace_ops *ops);
|
||||
#else
|
||||
struct ftrace_func_command;
|
||||
|
||||
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline __init int unregister_ftrace_command(char *cmd_name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* The ops parameter passed in is usually undefined.
|
||||
* This must be a macro.
|
||||
@ -989,11 +1062,13 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
# define FUNCTION_FLAGS \
|
||||
C(FUNCTION, "function-trace"),
|
||||
C(FUNCTION, "function-trace"), \
|
||||
C(FUNC_FORK, "function-fork"),
|
||||
# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
|
||||
#else
|
||||
# define FUNCTION_FLAGS
|
||||
# define FUNCTION_DEFAULT_FLAGS 0UL
|
||||
# define TRACE_ITER_FUNC_FORK 0UL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
@ -153,10 +153,18 @@ static int benchmark_event_kthread(void *arg)
|
||||
trace_do_benchmark();
|
||||
|
||||
/*
|
||||
* We don't go to sleep, but let others
|
||||
* run as well.
|
||||
* We don't go to sleep, but let others run as well.
|
||||
* This is bascially a "yield()" to let any task that
|
||||
* wants to run, schedule in, but if the CPU is idle,
|
||||
* we'll keep burning cycles.
|
||||
*
|
||||
* Note the _rcu_qs() version of cond_resched() will
|
||||
* notify synchronize_rcu_tasks() that this thread has
|
||||
* passed a quiescent state for rcu_tasks. Otherwise
|
||||
* this thread will never voluntarily schedule which would
|
||||
* block synchronize_rcu_tasks() indefinitely.
|
||||
*/
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2460,15 +2460,8 @@ struct event_probe_data {
|
||||
bool enable;
|
||||
};
|
||||
|
||||
static void
|
||||
event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
|
||||
static void update_event_probe(struct event_probe_data *data)
|
||||
{
|
||||
struct event_probe_data **pdata = (struct event_probe_data **)_data;
|
||||
struct event_probe_data *data = *pdata;
|
||||
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
if (data->enable)
|
||||
clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
|
||||
else
|
||||
@ -2476,77 +2469,141 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
|
||||
}
|
||||
|
||||
static void
|
||||
event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
|
||||
event_enable_probe(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
struct event_probe_data **pdata = (struct event_probe_data **)_data;
|
||||
struct event_probe_data *data = *pdata;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
struct event_probe_data *edata;
|
||||
void **pdata;
|
||||
|
||||
if (!data)
|
||||
pdata = ftrace_func_mapper_find_ip(mapper, ip);
|
||||
if (!pdata || !*pdata)
|
||||
return;
|
||||
|
||||
if (!data->count)
|
||||
edata = *pdata;
|
||||
update_event_probe(edata);
|
||||
}
|
||||
|
||||
static void
|
||||
event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
struct event_probe_data *edata;
|
||||
void **pdata;
|
||||
|
||||
pdata = ftrace_func_mapper_find_ip(mapper, ip);
|
||||
if (!pdata || !*pdata)
|
||||
return;
|
||||
|
||||
edata = *pdata;
|
||||
|
||||
if (!edata->count)
|
||||
return;
|
||||
|
||||
/* Skip if the event is in a state we want to switch to */
|
||||
if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
|
||||
if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
|
||||
return;
|
||||
|
||||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
if (edata->count != -1)
|
||||
(edata->count)--;
|
||||
|
||||
event_enable_probe(ip, parent_ip, _data);
|
||||
update_event_probe(edata);
|
||||
}
|
||||
|
||||
static int
|
||||
event_enable_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *_data)
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
struct event_probe_data *data = _data;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
struct event_probe_data *edata;
|
||||
void **pdata;
|
||||
|
||||
pdata = ftrace_func_mapper_find_ip(mapper, ip);
|
||||
|
||||
if (WARN_ON_ONCE(!pdata || !*pdata))
|
||||
return 0;
|
||||
|
||||
edata = *pdata;
|
||||
|
||||
seq_printf(m, "%ps:", (void *)ip);
|
||||
|
||||
seq_printf(m, "%s:%s:%s",
|
||||
data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
|
||||
data->file->event_call->class->system,
|
||||
trace_event_name(data->file->event_call));
|
||||
edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
|
||||
edata->file->event_call->class->system,
|
||||
trace_event_name(edata->file->event_call));
|
||||
|
||||
if (data->count == -1)
|
||||
if (edata->count == -1)
|
||||
seq_puts(m, ":unlimited\n");
|
||||
else
|
||||
seq_printf(m, ":count=%ld\n", data->count);
|
||||
seq_printf(m, ":count=%ld\n", edata->count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
|
||||
void **_data)
|
||||
event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
||||
unsigned long ip, void *init_data, void **data)
|
||||
{
|
||||
struct event_probe_data **pdata = (struct event_probe_data **)_data;
|
||||
struct event_probe_data *data = *pdata;
|
||||
struct ftrace_func_mapper *mapper = *data;
|
||||
struct event_probe_data *edata = init_data;
|
||||
int ret;
|
||||
|
||||
data->ref++;
|
||||
if (!mapper) {
|
||||
mapper = allocate_ftrace_func_mapper();
|
||||
if (!mapper)
|
||||
return -ENODEV;
|
||||
*data = mapper;
|
||||
}
|
||||
|
||||
ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
edata->ref++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int free_probe_data(void *data)
|
||||
{
|
||||
struct event_probe_data *edata = data;
|
||||
|
||||
edata->ref--;
|
||||
if (!edata->ref) {
|
||||
/* Remove the SOFT_MODE flag */
|
||||
__ftrace_event_enable_disable(edata->file, 0, 1);
|
||||
module_put(edata->file->event_call->mod);
|
||||
kfree(edata);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
|
||||
void **_data)
|
||||
event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
||||
unsigned long ip, void *data)
|
||||
{
|
||||
struct event_probe_data **pdata = (struct event_probe_data **)_data;
|
||||
struct event_probe_data *data = *pdata;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
struct event_probe_data *edata;
|
||||
|
||||
if (WARN_ON_ONCE(data->ref <= 0))
|
||||
if (!ip) {
|
||||
if (!mapper)
|
||||
return;
|
||||
free_ftrace_func_mapper(mapper, free_probe_data);
|
||||
return;
|
||||
}
|
||||
|
||||
edata = ftrace_func_mapper_remove_ip(mapper, ip);
|
||||
|
||||
if (WARN_ON_ONCE(!edata))
|
||||
return;
|
||||
|
||||
data->ref--;
|
||||
if (!data->ref) {
|
||||
/* Remove the SOFT_MODE flag */
|
||||
__ftrace_event_enable_disable(data->file, 0, 1);
|
||||
module_put(data->file->event_call->mod);
|
||||
kfree(data);
|
||||
}
|
||||
*pdata = NULL;
|
||||
if (WARN_ON_ONCE(edata->ref <= 0))
|
||||
return;
|
||||
|
||||
free_probe_data(edata);
|
||||
}
|
||||
|
||||
static struct ftrace_probe_ops event_enable_probe_ops = {
|
||||
@ -2578,10 +2635,9 @@ static struct ftrace_probe_ops event_disable_count_probe_ops = {
|
||||
};
|
||||
|
||||
static int
|
||||
event_enable_func(struct ftrace_hash *hash,
|
||||
event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enabled)
|
||||
{
|
||||
struct trace_array *tr = top_trace_array();
|
||||
struct trace_event_file *file;
|
||||
struct ftrace_probe_ops *ops;
|
||||
struct event_probe_data *data;
|
||||
@ -2619,12 +2675,12 @@ event_enable_func(struct ftrace_hash *hash,
|
||||
ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
|
||||
|
||||
if (glob[0] == '!') {
|
||||
unregister_ftrace_function_probe_func(glob+1, ops);
|
||||
ret = 0;
|
||||
ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
goto out;
|
||||
@ -2661,7 +2717,8 @@ event_enable_func(struct ftrace_hash *hash,
|
||||
ret = __ftrace_event_enable_disable(file, 1, 1);
|
||||
if (ret < 0)
|
||||
goto out_put;
|
||||
ret = register_ftrace_function_probe(glob, ops, data);
|
||||
|
||||
ret = register_ftrace_function_probe(glob, tr, ops, data);
|
||||
/*
|
||||
* The above returns on success the # of functions enabled,
|
||||
* but if it didn't find any functions it returns zero.
|
||||
|
@ -267,10 +267,14 @@ static struct tracer function_trace __tracer_data =
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static void update_traceon_count(void **data, bool on)
|
||||
static void update_traceon_count(struct ftrace_probe_ops *ops,
|
||||
unsigned long ip,
|
||||
struct trace_array *tr, bool on,
|
||||
void *data)
|
||||
{
|
||||
long *count = (long *)data;
|
||||
long old_count = *count;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
long *count;
|
||||
long old_count;
|
||||
|
||||
/*
|
||||
* Tracing gets disabled (or enabled) once per count.
|
||||
@ -301,23 +305,22 @@ static void update_traceon_count(void **data, bool on)
|
||||
* setting the tracing_on file. But we currently don't care
|
||||
* about that.
|
||||
*/
|
||||
if (!old_count)
|
||||
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
||||
old_count = *count;
|
||||
|
||||
if (old_count <= 0)
|
||||
return;
|
||||
|
||||
/* Make sure we see count before checking tracing state */
|
||||
smp_rmb();
|
||||
|
||||
if (on == !!tracing_is_on())
|
||||
if (on == !!tracer_tracing_is_on(tr))
|
||||
return;
|
||||
|
||||
if (on)
|
||||
tracing_on();
|
||||
tracer_tracing_on(tr);
|
||||
else
|
||||
tracing_off();
|
||||
|
||||
/* unlimited? */
|
||||
if (old_count == -1)
|
||||
return;
|
||||
tracer_tracing_off(tr);
|
||||
|
||||
/* Make sure tracing state is visible before updating count */
|
||||
smp_wmb();
|
||||
@ -326,33 +329,41 @@ static void update_traceon_count(void **data, bool on)
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
update_traceon_count(data, 1);
|
||||
update_traceon_count(ops, ip, tr, 1, data);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
update_traceon_count(data, 0);
|
||||
update_traceon_count(ops, ip, tr, 0, data);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_traceon(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
if (tracing_is_on())
|
||||
if (tracer_tracing_is_on(tr))
|
||||
return;
|
||||
|
||||
tracing_on();
|
||||
tracer_tracing_on(tr);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
if (!tracing_is_on())
|
||||
if (!tracer_tracing_is_on(tr))
|
||||
return;
|
||||
|
||||
tracing_off();
|
||||
tracer_tracing_off(tr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -364,144 +375,218 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
*/
|
||||
#define STACK_SKIP 4
|
||||
|
||||
static void
|
||||
ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
static __always_inline void trace_stack(struct trace_array *tr)
|
||||
{
|
||||
trace_dump_stack(STACK_SKIP);
|
||||
unsigned long flags;
|
||||
int pc;
|
||||
|
||||
local_save_flags(flags);
|
||||
pc = preempt_count();
|
||||
|
||||
__trace_stack(tr, flags, STACK_SKIP, pc);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
long *count = (long *)data;
|
||||
trace_stack(tr);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
long *count;
|
||||
long old_count;
|
||||
long new_count;
|
||||
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
|
||||
/* unlimited? */
|
||||
if (!mapper) {
|
||||
trace_stack(tr);
|
||||
return;
|
||||
}
|
||||
|
||||
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
||||
|
||||
/*
|
||||
* Stack traces should only execute the number of times the
|
||||
* user specified in the counter.
|
||||
*/
|
||||
do {
|
||||
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
|
||||
old_count = *count;
|
||||
|
||||
if (!old_count)
|
||||
return;
|
||||
|
||||
/* unlimited? */
|
||||
if (old_count == -1) {
|
||||
trace_dump_stack(STACK_SKIP);
|
||||
return;
|
||||
}
|
||||
|
||||
new_count = old_count - 1;
|
||||
new_count = cmpxchg(count, old_count, new_count);
|
||||
if (new_count == old_count)
|
||||
trace_dump_stack(STACK_SKIP);
|
||||
trace_stack(tr);
|
||||
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
|
||||
} while (new_count != old_count);
|
||||
}
|
||||
|
||||
static int update_count(void **data)
|
||||
static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
|
||||
void *data)
|
||||
{
|
||||
unsigned long *count = (long *)data;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
long *count = NULL;
|
||||
|
||||
if (!*count)
|
||||
return 0;
|
||||
if (mapper)
|
||||
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
||||
|
||||
if (*count != -1)
|
||||
if (count) {
|
||||
if (*count <= 0)
|
||||
return 0;
|
||||
(*count)--;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
if (update_count(data))
|
||||
if (update_count(ops, ip, data))
|
||||
ftrace_dump(DUMP_ALL);
|
||||
}
|
||||
|
||||
/* Only dump the current CPU buffer. */
|
||||
static void
|
||||
ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
|
||||
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
if (update_count(data))
|
||||
if (update_count(ops, ip, data))
|
||||
ftrace_dump(DUMP_ORIG);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_probe_print(const char *name, struct seq_file *m,
|
||||
unsigned long ip, void *data)
|
||||
unsigned long ip, struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
long count = (long)data;
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
long *count = NULL;
|
||||
|
||||
seq_printf(m, "%ps:%s", (void *)ip, name);
|
||||
|
||||
if (count == -1)
|
||||
seq_puts(m, ":unlimited\n");
|
||||
if (mapper)
|
||||
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
||||
|
||||
if (count)
|
||||
seq_printf(m, ":count=%ld\n", *count);
|
||||
else
|
||||
seq_printf(m, ":count=%ld\n", count);
|
||||
seq_puts(m, ":unlimited\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_traceon_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
struct ftrace_probe_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
return ftrace_probe_print("traceon", m, ip, data);
|
||||
return ftrace_probe_print("traceon", m, ip, ops, data);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
return ftrace_probe_print("traceoff", m, ip, data);
|
||||
return ftrace_probe_print("traceoff", m, ip, ops, data);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
return ftrace_probe_print("stacktrace", m, ip, data);
|
||||
return ftrace_probe_print("stacktrace", m, ip, ops, data);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_dump_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
return ftrace_probe_print("dump", m, ip, data);
|
||||
return ftrace_probe_print("dump", m, ip, ops, data);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
return ftrace_probe_print("cpudump", m, ip, data);
|
||||
return ftrace_probe_print("cpudump", m, ip, ops, data);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
||||
unsigned long ip, void *init_data, void **data)
|
||||
{
|
||||
struct ftrace_func_mapper *mapper = *data;
|
||||
|
||||
if (!mapper) {
|
||||
mapper = allocate_ftrace_func_mapper();
|
||||
if (!mapper)
|
||||
return -ENOMEM;
|
||||
*data = mapper;
|
||||
}
|
||||
|
||||
return ftrace_func_mapper_add_ip(mapper, ip, init_data);
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
||||
unsigned long ip, void *data)
|
||||
{
|
||||
struct ftrace_func_mapper *mapper = data;
|
||||
|
||||
if (!ip) {
|
||||
free_ftrace_func_mapper(mapper, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
ftrace_func_mapper_remove_ip(mapper, ip);
|
||||
}
|
||||
|
||||
static struct ftrace_probe_ops traceon_count_probe_ops = {
|
||||
.func = ftrace_traceon_count,
|
||||
.print = ftrace_traceon_print,
|
||||
.init = ftrace_count_init,
|
||||
.free = ftrace_count_free,
|
||||
};
|
||||
|
||||
static struct ftrace_probe_ops traceoff_count_probe_ops = {
|
||||
.func = ftrace_traceoff_count,
|
||||
.print = ftrace_traceoff_print,
|
||||
.init = ftrace_count_init,
|
||||
.free = ftrace_count_free,
|
||||
};
|
||||
|
||||
static struct ftrace_probe_ops stacktrace_count_probe_ops = {
|
||||
.func = ftrace_stacktrace_count,
|
||||
.print = ftrace_stacktrace_print,
|
||||
.init = ftrace_count_init,
|
||||
.free = ftrace_count_free,
|
||||
};
|
||||
|
||||
static struct ftrace_probe_ops dump_probe_ops = {
|
||||
.func = ftrace_dump_probe,
|
||||
.print = ftrace_dump_print,
|
||||
.init = ftrace_count_init,
|
||||
.free = ftrace_count_free,
|
||||
};
|
||||
|
||||
static struct ftrace_probe_ops cpudump_probe_ops = {
|
||||
@ -525,7 +610,8 @@ static struct ftrace_probe_ops stacktrace_probe_ops = {
|
||||
};
|
||||
|
||||
static int
|
||||
ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
|
||||
ftrace_trace_probe_callback(struct trace_array *tr,
|
||||
struct ftrace_probe_ops *ops,
|
||||
struct ftrace_hash *hash, char *glob,
|
||||
char *cmd, char *param, int enable)
|
||||
{
|
||||
@ -537,10 +623,8 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
|
||||
if (!enable)
|
||||
return -EINVAL;
|
||||
|
||||
if (glob[0] == '!') {
|
||||
unregister_ftrace_function_probe_func(glob+1, ops);
|
||||
return 0;
|
||||
}
|
||||
if (glob[0] == '!')
|
||||
return unregister_ftrace_function_probe_func(glob+1, tr, ops);
|
||||
|
||||
if (!param)
|
||||
goto out_reg;
|
||||
@ -559,13 +643,13 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
|
||||
return ret;
|
||||
|
||||
out_reg:
|
||||
ret = register_ftrace_function_probe(glob, ops, count);
|
||||
ret = register_ftrace_function_probe(glob, tr, ops, count);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_trace_onoff_callback(struct ftrace_hash *hash,
|
||||
ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
@ -576,24 +660,24 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
|
||||
else
|
||||
ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
|
||||
|
||||
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
|
||||
return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
|
||||
param, enable);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_stacktrace_callback(struct ftrace_hash *hash,
|
||||
ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
|
||||
ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
|
||||
|
||||
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
|
||||
return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
|
||||
param, enable);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_dump_callback(struct ftrace_hash *hash,
|
||||
ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
@ -601,12 +685,12 @@ ftrace_dump_callback(struct ftrace_hash *hash,
|
||||
ops = &dump_probe_ops;
|
||||
|
||||
/* Only dump once. */
|
||||
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
|
||||
return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
|
||||
"1", enable);
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_cpudump_callback(struct ftrace_hash *hash,
|
||||
ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
@ -614,7 +698,7 @@ ftrace_cpudump_callback(struct ftrace_hash *hash,
|
||||
ops = &cpudump_probe_ops;
|
||||
|
||||
/* Only dump once. */
|
||||
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
|
||||
return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
|
||||
"1", enable);
|
||||
}
|
||||
|
||||
@ -687,9 +771,8 @@ static inline int init_func_cmd_traceon(void)
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
static __init int init_function_trace(void)
|
||||
__init int init_function_trace(void)
|
||||
{
|
||||
init_func_cmd_traceon();
|
||||
return register_tracer(&function_trace);
|
||||
}
|
||||
core_initcall(init_function_trace);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "trace_probe.h"
|
||||
|
||||
#define KPROBE_EVENT_SYSTEM "kprobes"
|
||||
#define KRETPROBE_MAXACTIVE_MAX 4096
|
||||
|
||||
/**
|
||||
* Kprobe event core functions
|
||||
@ -282,6 +283,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
||||
void *addr,
|
||||
const char *symbol,
|
||||
unsigned long offs,
|
||||
int maxactive,
|
||||
int nargs, bool is_return)
|
||||
{
|
||||
struct trace_kprobe *tk;
|
||||
@ -309,6 +311,8 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
||||
else
|
||||
tk->rp.kp.pre_handler = kprobe_dispatcher;
|
||||
|
||||
tk->rp.maxactive = maxactive;
|
||||
|
||||
if (!event || !is_good_name(event)) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
@ -598,8 +602,10 @@ static int create_trace_kprobe(int argc, char **argv)
|
||||
{
|
||||
/*
|
||||
* Argument syntax:
|
||||
* - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
|
||||
* - Add kprobe:
|
||||
* p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe:
|
||||
* r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
|
||||
* Fetch args:
|
||||
* $retval : fetch return value
|
||||
* $stack : fetch stack address
|
||||
@ -619,6 +625,7 @@ static int create_trace_kprobe(int argc, char **argv)
|
||||
int i, ret = 0;
|
||||
bool is_return = false, is_delete = false;
|
||||
char *symbol = NULL, *event = NULL, *group = NULL;
|
||||
int maxactive = 0;
|
||||
char *arg;
|
||||
unsigned long offset = 0;
|
||||
void *addr = NULL;
|
||||
@ -637,8 +644,28 @@ static int create_trace_kprobe(int argc, char **argv)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (argv[0][1] == ':') {
|
||||
event = &argv[0][2];
|
||||
event = strchr(&argv[0][1], ':');
|
||||
if (event) {
|
||||
event[0] = '\0';
|
||||
event++;
|
||||
}
|
||||
if (is_return && isdigit(argv[0][1])) {
|
||||
ret = kstrtouint(&argv[0][1], 0, &maxactive);
|
||||
if (ret) {
|
||||
pr_info("Failed to parse maxactive.\n");
|
||||
return ret;
|
||||
}
|
||||
/* kretprobes instances are iterated over via a list. The
|
||||
* maximum should stay reasonable.
|
||||
*/
|
||||
if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
|
||||
pr_info("Maxactive is too big (%d > %d).\n",
|
||||
maxactive, KRETPROBE_MAXACTIVE_MAX);
|
||||
return -E2BIG;
|
||||
}
|
||||
}
|
||||
|
||||
if (event) {
|
||||
if (strchr(event, '/')) {
|
||||
group = event;
|
||||
event = strchr(group, '/') + 1;
|
||||
@ -715,8 +742,8 @@ static int create_trace_kprobe(int argc, char **argv)
|
||||
is_return ? 'r' : 'p', addr);
|
||||
event = buf;
|
||||
}
|
||||
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
|
||||
is_return);
|
||||
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
|
||||
argc, is_return);
|
||||
if (IS_ERR(tk)) {
|
||||
pr_info("Failed to allocate trace_probe.(%d)\n",
|
||||
(int)PTR_ERR(tk));
|
||||
|
@ -35,7 +35,7 @@ unsigned long stack_trace_max_size;
|
||||
arch_spinlock_t stack_trace_max_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static DEFINE_PER_CPU(int, trace_active);
|
||||
DEFINE_PER_CPU(int, disable_stack_tracer);
|
||||
static DEFINE_MUTEX(stack_sysctl_mutex);
|
||||
|
||||
int stack_tracer_enabled;
|
||||
@ -96,6 +96,14 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||
if (in_nmi())
|
||||
return;
|
||||
|
||||
/*
|
||||
* There's a slight chance that we are tracing inside the
|
||||
* RCU infrastructure, and rcu_irq_enter() will not work
|
||||
* as expected.
|
||||
*/
|
||||
if (unlikely(rcu_irq_enter_disabled()))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&stack_trace_max_lock);
|
||||
|
||||
@ -207,13 +215,12 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
||||
{
|
||||
unsigned long stack;
|
||||
int cpu;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
/* no atomic needed, we only modify this variable by this cpu */
|
||||
if (per_cpu(trace_active, cpu)++ != 0)
|
||||
__this_cpu_inc(disable_stack_tracer);
|
||||
if (__this_cpu_read(disable_stack_tracer) != 1)
|
||||
goto out;
|
||||
|
||||
ip += MCOUNT_INSN_SIZE;
|
||||
@ -221,7 +228,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
||||
check_stack(ip, &stack);
|
||||
|
||||
out:
|
||||
per_cpu(trace_active, cpu)--;
|
||||
__this_cpu_dec(disable_stack_tracer);
|
||||
/* prevent recursion in schedule */
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
@ -253,7 +260,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
||||
long *ptr = filp->private_data;
|
||||
unsigned long val, flags;
|
||||
int ret;
|
||||
int cpu;
|
||||
|
||||
ret = kstrtoul_from_user(ubuf, count, 10, &val);
|
||||
if (ret)
|
||||
@ -264,16 +270,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
||||
/*
|
||||
* In case we trace inside arch_spin_lock() or after (NMI),
|
||||
* we will cause circular lock, so we also need to increase
|
||||
* the percpu trace_active here.
|
||||
* the percpu disable_stack_tracer here.
|
||||
*/
|
||||
cpu = smp_processor_id();
|
||||
per_cpu(trace_active, cpu)++;
|
||||
__this_cpu_inc(disable_stack_tracer);
|
||||
|
||||
arch_spin_lock(&stack_trace_max_lock);
|
||||
*ptr = val;
|
||||
arch_spin_unlock(&stack_trace_max_lock);
|
||||
|
||||
per_cpu(trace_active, cpu)--;
|
||||
__this_cpu_dec(disable_stack_tracer);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return count;
|
||||
@ -307,12 +312,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
cpu = smp_processor_id();
|
||||
per_cpu(trace_active, cpu)++;
|
||||
__this_cpu_inc(disable_stack_tracer);
|
||||
|
||||
arch_spin_lock(&stack_trace_max_lock);
|
||||
|
||||
@ -324,12 +326,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
arch_spin_unlock(&stack_trace_max_lock);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
per_cpu(trace_active, cpu)--;
|
||||
__this_cpu_dec(disable_stack_tracer);
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include <linux/page_owner.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -412,6 +412,7 @@ static int
|
||||
is_mcounted_section_name(char const *const txtname)
|
||||
{
|
||||
return strcmp(".text", txtname) == 0 ||
|
||||
strcmp(".init.text", txtname) == 0 ||
|
||||
strcmp(".ref.text", txtname) == 0 ||
|
||||
strcmp(".sched.text", txtname) == 0 ||
|
||||
strcmp(".spinlock.text", txtname) == 0 ||
|
||||
|
@ -130,6 +130,7 @@ if ($inputfile =~ m,kernel/trace/ftrace\.o$,) {
|
||||
# Acceptable sections to record.
|
||||
my %text_sections = (
|
||||
".text" => 1,
|
||||
".init.text" => 1,
|
||||
".ref.text" => 1,
|
||||
".sched.text" => 1,
|
||||
".spinlock.text" => 1,
|
||||
|
@ -16,6 +16,7 @@ echo " -k|--keep Keep passed test logs"
|
||||
echo " -v|--verbose Increase verbosity of test messages"
|
||||
echo " -vv Alias of -v -v (Show all results in stdout)"
|
||||
echo " -d|--debug Debug mode (trace all shell commands)"
|
||||
echo " -l|--logdir <dir> Save logs on the <dir>"
|
||||
exit $1
|
||||
}
|
||||
|
||||
@ -64,6 +65,10 @@ parse_opts() { # opts
|
||||
DEBUG=1
|
||||
shift 1
|
||||
;;
|
||||
--logdir|-l)
|
||||
LOG_DIR=$2
|
||||
shift 2
|
||||
;;
|
||||
*.tc)
|
||||
if [ -f "$1" ]; then
|
||||
OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`"
|
||||
@ -145,11 +150,16 @@ XFAILED_CASES=
|
||||
UNDEFINED_CASES=
|
||||
TOTAL_RESULT=0
|
||||
|
||||
INSTANCE=
|
||||
CASENO=0
|
||||
testcase() { # testfile
|
||||
CASENO=$((CASENO+1))
|
||||
desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
|
||||
prlog -n "[$CASENO]$desc"
|
||||
prlog -n "[$CASENO]$INSTANCE$desc"
|
||||
}
|
||||
|
||||
test_on_instance() { # testfile
|
||||
grep -q "^#[ \t]*flags:.*instance" $1
|
||||
}
|
||||
|
||||
eval_result() { # sigval
|
||||
@ -266,6 +276,17 @@ for t in $TEST_CASES; do
|
||||
run_test $t
|
||||
done
|
||||
|
||||
# Test on instance loop
|
||||
INSTANCE=" (instance) "
|
||||
for t in $TEST_CASES; do
|
||||
test_on_instance $t || continue
|
||||
SAVED_TRACING_DIR=$TRACING_DIR
|
||||
export TRACING_DIR=`mktemp -d $TRACING_DIR/instances/ftracetest.XXXXXX`
|
||||
run_test $t
|
||||
rmdir $TRACING_DIR
|
||||
TRACING_DIR=$SAVED_TRACING_DIR
|
||||
done
|
||||
|
||||
prlog ""
|
||||
prlog "# of passed: " `echo $PASSED_CASES | wc -w`
|
||||
prlog "# of failed: " `echo $FAILED_CASES | wc -w`
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: Basic test for tracers
|
||||
# flags: instance
|
||||
test -f available_tracers
|
||||
for t in `cat available_tracers`; do
|
||||
echo $t > current_tracer
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: Basic trace clock test
|
||||
# flags: instance
|
||||
test -f trace_clock
|
||||
for c in `cat trace_clock | tr -d \[\]`; do
|
||||
echo $c > trace_clock
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event tracing - enable/disable with event level files
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
echo > set_event
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event tracing - restricts events based on pid
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
echo > set_event
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event tracing - enable/disable with subsystem level files
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
echo > set_event
|
||||
|
@ -0,0 +1,114 @@
|
||||
#!/bin/sh
|
||||
# description: ftrace - test for function event triggers
|
||||
# flags: instance
|
||||
#
|
||||
# Ftrace allows to add triggers to functions, such as enabling or disabling
|
||||
# tracing, enabling or disabling trace events, or recording a stack trace
|
||||
# within the ring buffer.
|
||||
#
|
||||
# This test is designed to test event triggers
|
||||
#
|
||||
|
||||
# The triggers are set within the set_ftrace_filter file
|
||||
if [ ! -f set_ftrace_filter ]; then
|
||||
echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
|
||||
exit_unsupported
|
||||
fi
|
||||
|
||||
do_reset() {
|
||||
reset_ftrace_filter
|
||||
reset_tracer
|
||||
disable_events
|
||||
clear_trace
|
||||
enable_tracing
|
||||
}
|
||||
|
||||
fail() { # mesg
|
||||
do_reset
|
||||
echo $1
|
||||
exit $FAIL
|
||||
}
|
||||
|
||||
SLEEP_TIME=".1"
|
||||
|
||||
do_reset
|
||||
|
||||
echo "Testing function probes with events:"
|
||||
|
||||
EVENT="sched:sched_switch"
|
||||
EVENT_ENABLE="events/sched/sched_switch/enable"
|
||||
|
||||
cnt_trace() {
|
||||
grep -v '^#' trace | wc -l
|
||||
}
|
||||
|
||||
test_event_enabled() {
|
||||
val=$1
|
||||
|
||||
e=`cat $EVENT_ENABLE`
|
||||
if [ "$e" != $val ]; then
|
||||
echo "Expected $val but found $e"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
run_enable_disable() {
|
||||
enable=$1 # enable
|
||||
Enable=$2 # Enable
|
||||
check_disable=$3 # 0
|
||||
check_enable_star=$4 # 1*
|
||||
check_disable_star=$5 # 0*
|
||||
|
||||
cnt=`cnt_trace`
|
||||
if [ $cnt -ne 0 ]; then
|
||||
fail "Found junk in trace file"
|
||||
fi
|
||||
|
||||
echo "$Enable event all the time"
|
||||
|
||||
echo $check_disable > $EVENT_ENABLE
|
||||
sleep $SLEEP_TIME
|
||||
|
||||
test_event_enabled $check_disable
|
||||
|
||||
echo "schedule:${enable}_event:$EVENT" > set_ftrace_filter
|
||||
|
||||
echo " make sure it works 5 times"
|
||||
|
||||
for i in `seq 5`; do
|
||||
sleep $SLEEP_TIME
|
||||
echo " test $i"
|
||||
test_event_enabled $check_enable_star
|
||||
|
||||
echo $check_disable > $EVENT_ENABLE
|
||||
done
|
||||
sleep $SLEEP_TIME
|
||||
echo " make sure it's still works"
|
||||
test_event_enabled $check_enable_star
|
||||
|
||||
reset_ftrace_filter
|
||||
|
||||
echo " make sure it only works 3 times"
|
||||
|
||||
echo $check_disable > $EVENT_ENABLE
|
||||
sleep $SLEEP_TIME
|
||||
|
||||
echo "schedule:${enable}_event:$EVENT:3" > set_ftrace_filter
|
||||
|
||||
for i in `seq 3`; do
|
||||
sleep $SLEEP_TIME
|
||||
echo " test $i"
|
||||
test_event_enabled $check_enable_star
|
||||
|
||||
echo $check_disable > $EVENT_ENABLE
|
||||
done
|
||||
|
||||
sleep $SLEEP_TIME
|
||||
echo " make sure it stop working"
|
||||
test_event_enabled $check_disable_star
|
||||
|
||||
do_reset
|
||||
}
|
||||
|
||||
run_enable_disable enable Enable 0 "1*" "0*"
|
||||
run_enable_disable disable Disable 1 "0*" "1*"
|
@ -0,0 +1,132 @@
|
||||
#!/bin/sh
|
||||
# description: ftrace - test reading of set_ftrace_filter
|
||||
#
|
||||
# The set_ftrace_filter file of ftrace is used to list functions as well as
|
||||
# triggers (probes) attached to functions. The code to read this file is not
|
||||
# straight forward and has had various bugs in the past. This test is designed
|
||||
# to add functions and triggers to that file in various ways and read that
|
||||
# file in various ways (cat vs dd).
|
||||
#
|
||||
|
||||
# The triggers are set within the set_ftrace_filter file
|
||||
if [ ! -f set_ftrace_filter ]; then
|
||||
echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
|
||||
exit_unsupported
|
||||
fi
|
||||
|
||||
do_reset() {
|
||||
reset_tracer
|
||||
reset_ftrace_filter
|
||||
disable_events
|
||||
clear_trace
|
||||
enable_tracing
|
||||
}
|
||||
|
||||
fail() { # mesg
|
||||
do_reset
|
||||
echo $1
|
||||
exit $FAIL
|
||||
}
|
||||
|
||||
do_reset
|
||||
|
||||
FILTER=set_ftrace_filter
|
||||
FUNC1="schedule"
|
||||
FUNC2="do_IRQ"
|
||||
|
||||
ALL_FUNCS="#### all functions enabled ####"
|
||||
|
||||
test_func() {
|
||||
if ! echo "$1" | grep -q "^$2\$"; then
|
||||
return 0
|
||||
fi
|
||||
echo "$1" | grep -v "^$2\$"
|
||||
return 1
|
||||
}
|
||||
|
||||
check_set_ftrace_filter() {
|
||||
cat=`cat $FILTER`
|
||||
dd1=`dd if=$FILTER bs=1 | grep -v -e 'records in' -e 'records out' -e 'bytes copied'`
|
||||
dd100=`dd if=$FILTER bs=100 | grep -v -e 'records in' -e 'records out' -e 'bytes copied'`
|
||||
|
||||
echo "Testing '$@'"
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
echo "test $1"
|
||||
if cat=`test_func "$cat" "$1"`; then
|
||||
return 0
|
||||
fi
|
||||
if dd1=`test_func "$dd1" "$1"`; then
|
||||
return 0
|
||||
fi
|
||||
if dd100=`test_func "$dd100" "$1"`; then
|
||||
return 0
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
if [ -n "$cat" ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ -n "$dd1" ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ -n "$dd100" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1;
|
||||
}
|
||||
|
||||
if check_set_ftrace_filter "$ALL_FUNCS"; then
|
||||
fail "Expected only $ALL_FUNCS"
|
||||
fi
|
||||
|
||||
echo "$FUNC1:traceoff" > set_ftrace_filter
|
||||
if check_set_ftrace_filter "$ALL_FUNCS" "$FUNC1:traceoff:unlimited"; then
|
||||
fail "Expected $ALL_FUNCS and $FUNC1:traceoff:unlimited"
|
||||
fi
|
||||
|
||||
echo "$FUNC1" > set_ftrace_filter
|
||||
if check_set_ftrace_filter "$FUNC1" "$FUNC1:traceoff:unlimited"; then
|
||||
fail "Expected $FUNC1 and $FUNC1:traceoff:unlimited"
|
||||
fi
|
||||
|
||||
echo "$FUNC2" >> set_ftrace_filter
|
||||
if check_set_ftrace_filter "$FUNC1" "$FUNC2" "$FUNC1:traceoff:unlimited"; then
|
||||
fail "Expected $FUNC1 $FUNC2 and $FUNC1:traceoff:unlimited"
|
||||
fi
|
||||
|
||||
echo "$FUNC2:traceoff" >> set_ftrace_filter
|
||||
if check_set_ftrace_filter "$FUNC1" "$FUNC2" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then
|
||||
fail "Expected $FUNC1 $FUNC2 $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited"
|
||||
fi
|
||||
|
||||
echo "$FUNC1" > set_ftrace_filter
|
||||
if check_set_ftrace_filter "$FUNC1" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then
|
||||
fail "Expected $FUNC1 $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited"
|
||||
fi
|
||||
|
||||
echo > set_ftrace_filter
|
||||
if check_set_ftrace_filter "$ALL_FUNCS" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then
|
||||
fail "Expected $ALL_FUNCS $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited"
|
||||
fi
|
||||
|
||||
reset_ftrace_filter
|
||||
|
||||
if check_set_ftrace_filter "$ALL_FUNCS"; then
|
||||
fail "Expected $ALL_FUNCS"
|
||||
fi
|
||||
|
||||
echo "$FUNC1" > set_ftrace_filter
|
||||
if check_set_ftrace_filter "$FUNC1" ; then
|
||||
fail "Expected $FUNC1"
|
||||
fi
|
||||
|
||||
echo "$FUNC2" >> set_ftrace_filter
|
||||
if check_set_ftrace_filter "$FUNC1" "$FUNC2" ; then
|
||||
fail "Expected $FUNC1 and $FUNC2"
|
||||
fi
|
||||
|
||||
do_reset
|
||||
|
||||
exit 0
|
@ -0,0 +1,172 @@
|
||||
#!/bin/sh
|
||||
# description: ftrace - test for function traceon/off triggers
|
||||
# flags: instance
|
||||
#
|
||||
# Ftrace allows to add triggers to functions, such as enabling or disabling
|
||||
# tracing, enabling or disabling trace events, or recording a stack trace
|
||||
# within the ring buffer.
|
||||
#
|
||||
# This test is designed to test enabling and disabling tracing triggers
|
||||
#
|
||||
|
||||
# The triggers are set within the set_ftrace_filter file
|
||||
if [ ! -f set_ftrace_filter ]; then
|
||||
echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
|
||||
exit_unsupported
|
||||
fi
|
||||
|
||||
do_reset() {
|
||||
reset_ftrace_filter
|
||||
reset_tracer
|
||||
disable_events
|
||||
clear_trace
|
||||
enable_tracing
|
||||
}
|
||||
|
||||
fail() { # mesg
|
||||
do_reset
|
||||
echo $1
|
||||
exit $FAIL
|
||||
}
|
||||
|
||||
SLEEP_TIME=".1"
|
||||
|
||||
do_reset
|
||||
|
||||
echo "Testing function probes with enabling disabling tracing:"
|
||||
|
||||
cnt_trace() {
|
||||
grep -v '^#' trace | wc -l
|
||||
}
|
||||
|
||||
echo '** DISABLE TRACING'
|
||||
disable_tracing
|
||||
clear_trace
|
||||
|
||||
cnt=`cnt_trace`
|
||||
if [ $cnt -ne 0 ]; then
|
||||
fail "Found junk in trace"
|
||||
fi
|
||||
|
||||
|
||||
echo '** ENABLE EVENTS'
|
||||
|
||||
echo 1 > events/enable
|
||||
|
||||
echo '** ENABLE TRACING'
|
||||
enable_tracing
|
||||
|
||||
cnt=`cnt_trace`
|
||||
if [ $cnt -eq 0 ]; then
|
||||
fail "Nothing found in trace"
|
||||
fi
|
||||
|
||||
# powerpc uses .schedule
|
||||
func="schedule"
|
||||
x=`grep '^\.schedule$' available_filter_functions | wc -l`
|
||||
if [ "$x" -eq 1 ]; then
|
||||
func=".schedule"
|
||||
fi
|
||||
|
||||
echo '** SET TRACEOFF'
|
||||
|
||||
echo "$func:traceoff" > set_ftrace_filter
|
||||
|
||||
cnt=`grep schedule set_ftrace_filter | wc -l`
|
||||
if [ $cnt -ne 1 ]; then
|
||||
fail "Did not find traceoff trigger"
|
||||
fi
|
||||
|
||||
cnt=`cnt_trace`
|
||||
sleep $SLEEP_TIME
|
||||
cnt2=`cnt_trace`
|
||||
|
||||
if [ $cnt -ne $cnt2 ]; then
|
||||
fail "Tracing is not stopped"
|
||||
fi
|
||||
|
||||
on=`cat tracing_on`
|
||||
if [ $on != "0" ]; then
|
||||
fail "Tracing is not off"
|
||||
fi
|
||||
|
||||
line1=`cat trace | tail -1`
|
||||
sleep $SLEEP_TIME
|
||||
line2=`cat trace | tail -1`
|
||||
|
||||
if [ "$line1" != "$line2" ]; then
|
||||
fail "Tracing file is still changing"
|
||||
fi
|
||||
|
||||
clear_trace
|
||||
|
||||
cnt=`cnt_trace`
|
||||
if [ $cnt -ne 0 ]; then
|
||||
fail "Tracing is still happeing"
|
||||
fi
|
||||
|
||||
echo "!$func:traceoff" >> set_ftrace_filter
|
||||
|
||||
cnt=`grep schedule set_ftrace_filter | wc -l`
|
||||
if [ $cnt -ne 0 ]; then
|
||||
fail "traceoff trigger still exists"
|
||||
fi
|
||||
|
||||
on=`cat tracing_on`
|
||||
if [ $on != "0" ]; then
|
||||
fail "Tracing is started again"
|
||||
fi
|
||||
|
||||
echo "$func:traceon" > set_ftrace_filter
|
||||
|
||||
cnt=`grep schedule set_ftrace_filter | wc -l`
|
||||
if [ $cnt -ne 1 ]; then
|
||||
fail "traceon trigger not found"
|
||||
fi
|
||||
|
||||
cnt=`cnt_trace`
|
||||
if [ $cnt -eq 0 ]; then
|
||||
fail "Tracing did not start"
|
||||
fi
|
||||
|
||||
on=`cat tracing_on`
|
||||
if [ $on != "1" ]; then
|
||||
fail "Tracing was not enabled"
|
||||
fi
|
||||
|
||||
|
||||
echo "!$func:traceon" >> set_ftrace_filter
|
||||
|
||||
cnt=`grep schedule set_ftrace_filter | wc -l`
|
||||
if [ $cnt -ne 0 ]; then
|
||||
fail "traceon trigger still exists"
|
||||
fi
|
||||
|
||||
check_sleep() {
|
||||
val=$1
|
||||
sleep $SLEEP_TIME
|
||||
cat set_ftrace_filter
|
||||
on=`cat tracing_on`
|
||||
if [ $on != "$val" ]; then
|
||||
fail "Expected tracing_on to be $val, but it was $on"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
echo "$func:traceoff:3" > set_ftrace_filter
|
||||
check_sleep "0"
|
||||
echo 1 > tracing_on
|
||||
check_sleep "0"
|
||||
echo 1 > tracing_on
|
||||
check_sleep "0"
|
||||
echo 1 > tracing_on
|
||||
check_sleep "1"
|
||||
echo "!$func:traceoff:0" > set_ftrace_filter
|
||||
|
||||
if grep -e traceon -e traceoff set_ftrace_filter; then
|
||||
fail "Tracing on and off triggers still exist"
|
||||
fi
|
||||
|
||||
disable_events
|
||||
|
||||
exit 0
|
@ -30,6 +30,27 @@ reset_events_filter() { # reset all current setting filters
|
||||
done
|
||||
}
|
||||
|
||||
reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
|
||||
echo > set_ftrace_filter
|
||||
grep -v '^#' set_ftrace_filter | while read t; do
|
||||
tr=`echo $t | cut -d: -f2`
|
||||
if [ "$tr" == "" ]; then
|
||||
continue
|
||||
fi
|
||||
if [ $tr == "enable_event" -o $tr == "disable_event" ]; then
|
||||
tr=`echo $t | cut -d: -f1-4`
|
||||
limit=`echo $t | cut -d: -f5`
|
||||
else
|
||||
tr=`echo $t | cut -d: -f1-2`
|
||||
limit=`echo $t | cut -d: -f3`
|
||||
fi
|
||||
if [ "$limit" != "unlimited" ]; then
|
||||
tr="$tr:$limit"
|
||||
fi
|
||||
echo "!$tr" > set_ftrace_filter
|
||||
done
|
||||
}
|
||||
|
||||
disable_events() {
|
||||
echo 0 > events/enable
|
||||
}
|
||||
|
@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
# description: Kretprobe dynamic event with maxactive
|
||||
|
||||
[ -f kprobe_events ] || exit_unsupported # this is configurable
|
||||
|
||||
echo > kprobe_events
|
||||
|
||||
# Test if we successfully reject unknown messages
|
||||
if echo 'a:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
|
||||
|
||||
# Test if we successfully reject too big maxactive
|
||||
if echo 'r1000000:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
|
||||
|
||||
# Test if we successfully reject unparsable numbers for maxactive
|
||||
if echo 'r10fuzz:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
|
||||
|
||||
# Test for kretprobe with event name without maxactive
|
||||
echo 'r:myprobeaccept inet_csk_accept' > kprobe_events
|
||||
grep myprobeaccept kprobe_events
|
||||
test -d events/kprobes/myprobeaccept
|
||||
echo '-:myprobeaccept' >> kprobe_events
|
||||
|
||||
# Test for kretprobe with event name with a small maxactive
|
||||
echo 'r10:myprobeaccept inet_csk_accept' > kprobe_events
|
||||
grep myprobeaccept kprobe_events
|
||||
test -d events/kprobes/myprobeaccept
|
||||
echo '-:myprobeaccept' >> kprobe_events
|
||||
|
||||
# Test for kretprobe without event name without maxactive
|
||||
echo 'r inet_csk_accept' > kprobe_events
|
||||
grep inet_csk_accept kprobe_events
|
||||
echo > kprobe_events
|
||||
|
||||
# Test for kretprobe without event name with a small maxactive
|
||||
echo 'r10 inet_csk_accept' > kprobe_events
|
||||
grep inet_csk_accept kprobe_events
|
||||
echo > kprobe_events
|
||||
|
||||
clear_trace
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event trigger - test event enable/disable trigger
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
reset_trigger
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event trigger - test trigger filter
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
reset_trigger
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event trigger - test histogram modifiers
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
reset_trigger
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event trigger - test histogram trigger
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
reset_trigger
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
# description: event trigger - test multiple histogram triggers
|
||||
# flags: instance
|
||||
|
||||
do_reset() {
|
||||
reset_trigger
|
||||
|
Loading…
Reference in New Issue
Block a user