2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-06 12:44:14 +08:00

tracing: Add flag to control different traces

More traces like event trace or trace marker will be supported.
Add flag for difference traces, so that they can be controlled
separately. Move current function trace to it's own flag
instead of global ftrace enable flag.

Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Tingwei Zhang <tingwei@codeaurora.org>
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Link: https://lore.kernel.org/r/20201005071319.78508-3-alexander.shishkin@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Tingwei Zhang 2020-10-05 10:13:13 +03:00 committed by Greg Kroah-Hartman
parent 7b9749bd83
commit 8438f52114
2 changed files with 24 additions and 17 deletions

View File

@ -3,6 +3,9 @@
#define _LINUX_TRACE_H
#ifdef CONFIG_TRACING
#define TRACE_EXPORT_FUNCTION BIT(0)
/*
* The trace export - an export of Ftrace output. The trace_export
* can process traces and export them to a registered destination as
@ -15,10 +18,12 @@
* next - pointer to the next trace_export
* write - copy traces which have been delt with ->commit() to
* the destination
* flags - which ftrace to be exported
*/
struct trace_export {
struct trace_export __rcu *next;
void (*write)(struct trace_export *, const void *, unsigned int);
int flags;
};
int register_ftrace_export(struct trace_export *export);

View File

@ -2744,33 +2744,37 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
static void
trace_process_export(struct trace_export *export,
struct ring_buffer_event *event)
struct ring_buffer_event *event, int flag)
{
struct trace_entry *entry;
unsigned int size = 0;
entry = ring_buffer_event_data(event);
size = ring_buffer_event_length(event);
export->write(export, entry, size);
if (export->flags & flag) {
entry = ring_buffer_event_data(event);
size = ring_buffer_event_length(event);
export->write(export, entry, size);
}
}
static DEFINE_MUTEX(ftrace_export_lock);
static struct trace_export __rcu *ftrace_exports_list __read_mostly;
static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
static inline void ftrace_exports_enable(void)
static inline void ftrace_exports_enable(struct trace_export *export)
{
static_branch_enable(&ftrace_exports_enabled);
if (export->flags & TRACE_EXPORT_FUNCTION)
static_branch_inc(&trace_function_exports_enabled);
}
static inline void ftrace_exports_disable(void)
static inline void ftrace_exports_disable(struct trace_export *export)
{
static_branch_disable(&ftrace_exports_enabled);
if (export->flags & TRACE_EXPORT_FUNCTION)
static_branch_dec(&trace_function_exports_enabled);
}
static void ftrace_exports(struct ring_buffer_event *event)
static void ftrace_exports(struct ring_buffer_event *event, int flag)
{
struct trace_export *export;
@ -2778,7 +2782,7 @@ static void ftrace_exports(struct ring_buffer_event *event)
export = rcu_dereference_raw_check(ftrace_exports_list);
while (export) {
trace_process_export(export, event);
trace_process_export(export, event, flag);
export = rcu_dereference_raw_check(export->next);
}
@ -2818,8 +2822,7 @@ rm_trace_export(struct trace_export **list, struct trace_export *export)
static inline void
add_ftrace_export(struct trace_export **list, struct trace_export *export)
{
if (*list == NULL)
ftrace_exports_enable();
ftrace_exports_enable(export);
add_trace_export(list, export);
}
@ -2830,8 +2833,7 @@ rm_ftrace_export(struct trace_export **list, struct trace_export *export)
int ret;
ret = rm_trace_export(list, export);
if (*list == NULL)
ftrace_exports_disable();
ftrace_exports_disable(export);
return ret;
}
@ -2884,8 +2886,8 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip;
if (!call_filter_check_discard(call, entry, buffer, event)) {
if (static_branch_unlikely(&ftrace_exports_enabled))
ftrace_exports(event);
if (static_branch_unlikely(&trace_function_exports_enabled))
ftrace_exports(event, TRACE_EXPORT_FUNCTION);
__buffer_unlock_commit(buffer, event);
}
}