mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 16:23:51 +08:00
ftrace: Allow for function tracing instance to filter functions
Create a "set_ftrace_filter" and "set_ftrace_notrace" files in the instance directories to let users filter of functions to trace for the given instance. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
e3b3e2e847
commit
591dffdade
@ -92,6 +92,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
||||
* STUB - The ftrace_ops is just a place holder.
|
||||
* INITIALIZED - The ftrace_ops has already been initialized (first use time
|
||||
* register_ftrace_function() is called, it will initialized the ops)
|
||||
* DELETED - The ops are being deleted, do not let them be registered again.
|
||||
*/
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
@ -103,6 +104,7 @@ enum {
|
||||
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
|
||||
FTRACE_OPS_FL_STUB = 1 << 7,
|
||||
FTRACE_OPS_FL_INITIALIZED = 1 << 8,
|
||||
FTRACE_OPS_FL_DELETED = 1 << 9,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -436,6 +436,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
|
||||
|
||||
static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
{
|
||||
if (ops->flags & FTRACE_OPS_FL_DELETED)
|
||||
return -EINVAL;
|
||||
|
||||
if (FTRACE_WARN_ON(ops == &global_ops))
|
||||
return -EINVAL;
|
||||
|
||||
@ -4112,6 +4115,36 @@ static const struct file_operations ftrace_graph_notrace_fops = {
|
||||
};
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
void ftrace_create_filter_files(struct ftrace_ops *ops,
|
||||
struct dentry *parent)
|
||||
{
|
||||
|
||||
trace_create_file("set_ftrace_filter", 0644, parent,
|
||||
ops, &ftrace_filter_fops);
|
||||
|
||||
trace_create_file("set_ftrace_notrace", 0644, parent,
|
||||
ops, &ftrace_notrace_fops);
|
||||
}
|
||||
|
||||
/*
|
||||
* The name "destroy_filter_files" is really a misnomer. Although
|
||||
* in the future, it may actualy delete the files, but this is
|
||||
* really intended to make sure the ops passed in are disabled
|
||||
* and that when this function returns, the caller is free to
|
||||
* free the ops.
|
||||
*
|
||||
* The "destroy" name is only to match the "create" name that this
|
||||
* should be paired with.
|
||||
*/
|
||||
void ftrace_destroy_filter_files(struct ftrace_ops *ops)
|
||||
{
|
||||
mutex_lock(&ftrace_lock);
|
||||
if (ops->flags & FTRACE_OPS_FL_ENABLED)
|
||||
ftrace_shutdown(ops, 0);
|
||||
ops->flags |= FTRACE_OPS_FL_DELETED;
|
||||
mutex_unlock(&ftrace_lock);
|
||||
}
|
||||
|
||||
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
|
||||
{
|
||||
|
||||
@ -4121,11 +4154,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
|
||||
trace_create_file("enabled_functions", 0444,
|
||||
d_tracer, NULL, &ftrace_enabled_fops);
|
||||
|
||||
trace_create_file("set_ftrace_filter", 0644, d_tracer,
|
||||
&global_ops, &ftrace_filter_fops);
|
||||
|
||||
trace_create_file("set_ftrace_notrace", 0644, d_tracer,
|
||||
&global_ops, &ftrace_notrace_fops);
|
||||
ftrace_create_filter_files(&global_ops, d_tracer);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
trace_create_file("set_graph_function", 0444, d_tracer,
|
||||
|
@ -6161,6 +6161,7 @@ static int instance_delete(const char *name)
|
||||
|
||||
tracing_set_nop(tr);
|
||||
event_trace_del_tracer(tr);
|
||||
ftrace_destroy_function_files(tr);
|
||||
debugfs_remove_recursive(tr->dir);
|
||||
free_percpu(tr->trace_buffer.data);
|
||||
ring_buffer_free(tr->trace_buffer.buffer);
|
||||
@ -6291,6 +6292,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
|
||||
trace_create_file("tracing_on", 0644, d_tracer,
|
||||
tr, &rb_simple_fops);
|
||||
|
||||
if (ftrace_create_function_files(tr, d_tracer))
|
||||
WARN(1, "Could not allocate function filter files");
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
trace_create_file("snapshot", 0644, d_tracer,
|
||||
tr, &snapshot_fops);
|
||||
|
@ -819,13 +819,36 @@ static inline int ftrace_trace_task(struct task_struct *task)
|
||||
return test_tsk_trace_trace(task);
|
||||
}
|
||||
extern int ftrace_is_dead(void);
|
||||
int ftrace_create_function_files(struct trace_array *tr,
|
||||
struct dentry *parent);
|
||||
void ftrace_destroy_function_files(struct trace_array *tr);
|
||||
#else
|
||||
static inline int ftrace_trace_task(struct task_struct *task)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
static inline int ftrace_is_dead(void) { return 0; }
|
||||
#endif
|
||||
static inline int
|
||||
ftrace_create_function_files(struct trace_array *tr,
|
||||
struct dentry *parent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
|
||||
void ftrace_create_filter_files(struct ftrace_ops *ops,
|
||||
struct dentry *parent);
|
||||
void ftrace_destroy_filter_files(struct ftrace_ops *ops);
|
||||
#else
|
||||
/*
|
||||
* The ops parameter passed in is usually undefined.
|
||||
* This must be a macro.
|
||||
*/
|
||||
#define ftrace_create_filter_files(ops, parent) do { } while (0)
|
||||
#define ftrace_destroy_filter_files(ops) do { } while (0)
|
||||
#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
int ftrace_event_is_function(struct ftrace_event_call *call);
|
||||
|
||||
|
@ -52,10 +52,34 @@ static int allocate_ftrace_ops(struct trace_array *tr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int ftrace_create_function_files(struct trace_array *tr,
|
||||
struct dentry *parent)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* The top level array uses the "global_ops". */
|
||||
if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
|
||||
ret = allocate_ftrace_ops(tr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ftrace_create_filter_files(tr->ops, parent);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ftrace_destroy_function_files(struct trace_array *tr)
|
||||
{
|
||||
ftrace_destroy_filter_files(tr->ops);
|
||||
kfree(tr->ops);
|
||||
tr->ops = NULL;
|
||||
}
|
||||
|
||||
static int function_trace_init(struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_ops *ops;
|
||||
int ret;
|
||||
|
||||
if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
|
||||
/* There's only one global tr */
|
||||
@ -69,10 +93,13 @@ static int function_trace_init(struct trace_array *tr)
|
||||
else
|
||||
ops = &trace_ops;
|
||||
tr->ops = ops;
|
||||
} else {
|
||||
ret = allocate_ftrace_ops(tr);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (!tr->ops) {
|
||||
/*
|
||||
* Instance trace_arrays get their ops allocated
|
||||
* at instance creation. Unless it failed
|
||||
* the allocation.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
tr->trace_buffer.cpu = get_cpu();
|
||||
@ -87,9 +114,6 @@ static void function_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_function_trace(tr);
|
||||
tracing_stop_cmdline_record();
|
||||
if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
|
||||
kfree(tr->ops);
|
||||
tr->ops = NULL;
|
||||
}
|
||||
|
||||
static void function_trace_start(struct trace_array *tr)
|
||||
|
Loading…
Reference in New Issue
Block a user