mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
f413cdb80c
This patch implements the kernel side support for ftrace event record sampling. A new counter sampling attribute is added: PERF_SAMPLE_TP_RECORD which requests ftrace events record sampling. In this case if a PERF_TYPE_TRACEPOINT counter is active and a tracepoint fires, we emit the tracepoint binary record to the perfcounter event buffer, as a sample. Result, after setting PERF_SAMPLE_TP_RECORD attribute from perf record: perf record -f -F 1 -a -e workqueue:workqueue_execution perf report -D 0x21e18 [0x48]: event: 9 . . ... raw event: size 72 bytes . 0000: 09 00 00 00 01 00 48 00 d0 c7 00 81 ff ff ff ff ......H........ . 0010: 0a 00 00 00 0a 00 00 00 21 00 00 00 00 00 00 00 ........!...... . 0020: 2b 00 01 02 0a 00 00 00 0a 00 00 00 65 76 65 6e +...........eve . 0030: 74 73 2f 31 00 00 00 00 00 00 00 00 0a 00 00 00 ts/1........... . 0040: e0 b1 31 81 ff ff ff ff ....... . 0x21e18 [0x48]: PERF_EVENT_SAMPLE (IP, 1): 10: 0xffffffff8100c7d0 period: 33 The raw ftrace binary record starts at offset 0020. Translation: struct trace_entry { type = 0x2b = 43; flags = 1; preempt_count = 2; pid = 0xa = 10; tgid = 0xa = 10; } thread_comm = "events/1" thread_pid = 0xa = 10; func = 0xffffffff8131b1e0 = flush_to_ldisc() What will come next? - Userspace support ('perf trace'), 'flight data recorder' mode for perf trace, etc. - The unconditional copy from the profiling callback brings some costs however if someone wants no such sampling to occur, and needs to be fixed in the future. For that we need to have an instant access to the perf counter attribute. This is a matter of a flag to add in the struct ftrace_event. - Take care of the events recursivity! Don't ever try to record a lock event for example, it seems some locking is used in the profiling fast path and lead to a tracing recursivity. That will be fixed using raw spinlock or recursivity protection. - [...] - Profit! :-) Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Gabriel Munteanu <eduard.munteanu@linux360.ro> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
714 lines
18 KiB
C
714 lines
18 KiB
C
/*
|
|
* Stage 1 of the trace events.
|
|
*
|
|
* Override the macros in <trace/trace_events.h> to include the following:
|
|
*
|
|
* struct ftrace_raw_<call> {
|
|
* struct trace_entry ent;
|
|
* <type> <item>;
|
|
* <type2> <item2>[<len>];
|
|
* [...]
|
|
* };
|
|
*
|
|
* The <type> <item> is created by the __field(type, item) macro or
|
|
* the __array(type2, item2, len) macro.
|
|
* We simply do "type item;", and that will create the fields
|
|
* in the structure.
|
|
*/
|
|
|
|
#include <linux/ftrace_event.h>
|
|
|
|
#undef __field
|
|
#define __field(type, item) type item;
|
|
|
|
#undef __array
|
|
#define __array(type, item, len) type item[len];
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef TP_STRUCT__entry
|
|
#define TP_STRUCT__entry(args...) args
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
|
|
struct ftrace_raw_##name { \
|
|
struct trace_entry ent; \
|
|
tstruct \
|
|
char __data[0]; \
|
|
}; \
|
|
static struct ftrace_event_call event_##name
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
|
|
/*
|
|
* Stage 2 of the trace events.
|
|
*
|
|
* Include the following:
|
|
*
|
|
* struct ftrace_data_offsets_<call> {
|
|
* int <item1>;
|
|
* int <item2>;
|
|
* [...]
|
|
* };
|
|
*
|
|
* The __dynamic_array() macro will create each int <item>, this is
|
|
* to keep the offset of each array from the beginning of the event.
|
|
*/
|
|
|
|
#undef __field
|
|
#define __field(type, item);
|
|
|
|
#undef __array
|
|
#define __array(type, item, len)
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) int item;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
|
struct ftrace_data_offsets_##call { \
|
|
tstruct; \
|
|
};
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* Setup the showing format of trace point.
|
|
*
|
|
* int
|
|
* ftrace_format_##call(struct trace_seq *s)
|
|
* {
|
|
* struct ftrace_raw_##call field;
|
|
* int ret;
|
|
*
|
|
* ret = trace_seq_printf(s, #type " " #item ";"
|
|
* " offset:%u; size:%u;\n",
|
|
* offsetof(struct ftrace_raw_##call, item),
|
|
* sizeof(field.type));
|
|
*
|
|
* }
|
|
*/
|
|
|
|
#undef TP_STRUCT__entry
|
|
#define TP_STRUCT__entry(args...) args
|
|
|
|
#undef __field
|
|
#define __field(type, item) \
|
|
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
|
"offset:%u;\tsize:%u;\n", \
|
|
(unsigned int)offsetof(typeof(field), item), \
|
|
(unsigned int)sizeof(field.item)); \
|
|
if (!ret) \
|
|
return 0;
|
|
|
|
#undef __array
|
|
#define __array(type, item, len) \
|
|
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
|
|
"offset:%u;\tsize:%u;\n", \
|
|
(unsigned int)offsetof(typeof(field), item), \
|
|
(unsigned int)sizeof(field.item)); \
|
|
if (!ret) \
|
|
return 0;
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \
|
|
"offset:%u;\tsize:%u;\n", \
|
|
(unsigned int)offsetof(typeof(field), \
|
|
__data_loc_##item), \
|
|
(unsigned int)sizeof(field.__data_loc_##item)); \
|
|
if (!ret) \
|
|
return 0;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef __entry
|
|
#define __entry REC
|
|
|
|
#undef __print_symbolic
|
|
#undef __get_dynamic_array
|
|
#undef __get_str
|
|
|
|
#undef TP_printk
|
|
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
|
|
|
|
#undef TP_fast_assign
|
|
#define TP_fast_assign(args...) args
|
|
|
|
#undef TP_perf_assign
|
|
#define TP_perf_assign(args...)
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
|
|
static int \
|
|
ftrace_format_##call(struct trace_seq *s) \
|
|
{ \
|
|
struct ftrace_raw_##call field __attribute__((unused)); \
|
|
int ret = 0; \
|
|
\
|
|
tstruct; \
|
|
\
|
|
trace_seq_printf(s, "\nprint fmt: " print); \
|
|
\
|
|
return ret; \
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* Stage 3 of the trace events.
|
|
*
|
|
* Override the macros in <trace/trace_events.h> to include the following:
|
|
*
|
|
* enum print_line_t
|
|
* ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
|
|
* {
|
|
* struct trace_seq *s = &iter->seq;
|
|
* struct ftrace_raw_<call> *field; <-- defined in stage 1
|
|
* struct trace_entry *entry;
|
|
* struct trace_seq *p;
|
|
* int ret;
|
|
*
|
|
* entry = iter->ent;
|
|
*
|
|
* if (entry->type != event_<call>.id) {
|
|
* WARN_ON_ONCE(1);
|
|
* return TRACE_TYPE_UNHANDLED;
|
|
* }
|
|
*
|
|
* field = (typeof(field))entry;
|
|
*
|
|
* p = get_cpu_var(ftrace_event_seq);
|
|
* trace_seq_init(p);
|
|
* ret = trace_seq_printf(s, <TP_printk> "\n");
|
|
* put_cpu();
|
|
* if (!ret)
|
|
* return TRACE_TYPE_PARTIAL_LINE;
|
|
*
|
|
* return TRACE_TYPE_HANDLED;
|
|
* }
|
|
*
|
|
* This is the method used to print the raw event to the trace
|
|
* output format. Note, this is not needed if the data is read
|
|
* in binary.
|
|
*/
|
|
|
|
#undef __entry
|
|
#define __entry field
|
|
|
|
#undef TP_printk
|
|
#define TP_printk(fmt, args...) fmt "\n", args
|
|
|
|
#undef __get_dynamic_array
|
|
#define __get_dynamic_array(field) \
|
|
((void *)__entry + __entry->__data_loc_##field)
|
|
|
|
#undef __get_str
|
|
#define __get_str(field) (char *)__get_dynamic_array(field)
|
|
|
|
#undef __print_flags
|
|
#define __print_flags(flag, delim, flag_array...) \
|
|
({ \
|
|
static const struct trace_print_flags flags[] = \
|
|
{ flag_array, { -1, NULL }}; \
|
|
ftrace_print_flags_seq(p, delim, flag, flags); \
|
|
})
|
|
|
|
#undef __print_symbolic
|
|
#define __print_symbolic(value, symbol_array...) \
|
|
({ \
|
|
static const struct trace_print_flags symbols[] = \
|
|
{ symbol_array, { -1, NULL }}; \
|
|
ftrace_print_symbols_seq(p, value, symbols); \
|
|
})
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
|
enum print_line_t \
|
|
ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
|
|
{ \
|
|
struct trace_seq *s = &iter->seq; \
|
|
struct ftrace_raw_##call *field; \
|
|
struct trace_entry *entry; \
|
|
struct trace_seq *p; \
|
|
int ret; \
|
|
\
|
|
entry = iter->ent; \
|
|
\
|
|
if (entry->type != event_##call.id) { \
|
|
WARN_ON_ONCE(1); \
|
|
return TRACE_TYPE_UNHANDLED; \
|
|
} \
|
|
\
|
|
field = (typeof(field))entry; \
|
|
\
|
|
p = &get_cpu_var(ftrace_event_seq); \
|
|
trace_seq_init(p); \
|
|
ret = trace_seq_printf(s, #call ": " print); \
|
|
put_cpu(); \
|
|
if (!ret) \
|
|
return TRACE_TYPE_PARTIAL_LINE; \
|
|
\
|
|
return TRACE_TYPE_HANDLED; \
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
#undef __field
|
|
#define __field(type, item) \
|
|
ret = trace_define_field(event_call, #type, #item, \
|
|
offsetof(typeof(field), item), \
|
|
sizeof(field.item), is_signed_type(type)); \
|
|
if (ret) \
|
|
return ret;
|
|
|
|
#undef __array
|
|
#define __array(type, item, len) \
|
|
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
|
|
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
|
|
offsetof(typeof(field), item), \
|
|
sizeof(field.item), 0); \
|
|
if (ret) \
|
|
return ret;
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
|
|
offsetof(typeof(field), __data_loc_##item), \
|
|
sizeof(field.__data_loc_##item), 0);
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
|
|
int \
|
|
ftrace_define_fields_##call(void) \
|
|
{ \
|
|
struct ftrace_raw_##call field; \
|
|
struct ftrace_event_call *event_call = &event_##call; \
|
|
int ret; \
|
|
\
|
|
__common_field(int, type, 1); \
|
|
__common_field(unsigned char, flags, 0); \
|
|
__common_field(unsigned char, preempt_count, 0); \
|
|
__common_field(int, pid, 1); \
|
|
__common_field(int, tgid, 1); \
|
|
\
|
|
tstruct; \
|
|
\
|
|
return ret; \
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* remember the offset of each array from the beginning of the event.
|
|
*/
|
|
|
|
#undef __entry
|
|
#define __entry entry
|
|
|
|
#undef __field
|
|
#define __field(type, item)
|
|
|
|
#undef __array
|
|
#define __array(type, item, len)
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
__data_offsets->item = __data_size + \
|
|
offsetof(typeof(*entry), __data); \
|
|
__data_size += (len) * sizeof(type);
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
|
static inline int ftrace_get_offsets_##call( \
|
|
struct ftrace_data_offsets_##call *__data_offsets, proto) \
|
|
{ \
|
|
int __data_size = 0; \
|
|
struct ftrace_raw_##call __maybe_unused *entry; \
|
|
\
|
|
tstruct; \
|
|
\
|
|
return __data_size; \
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
#ifdef CONFIG_EVENT_PROFILE
|
|
|
|
/*
|
|
* Generate the functions needed for tracepoint perf_counter support.
|
|
*
|
|
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
|
|
*
|
|
* static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
|
|
* {
|
|
* int ret = 0;
|
|
*
|
|
* if (!atomic_inc_return(&event_call->profile_count))
|
|
* ret = register_trace_<call>(ftrace_profile_<call>);
|
|
*
|
|
* return ret;
|
|
* }
|
|
*
|
|
* static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
|
|
* {
|
|
* if (atomic_add_negative(-1, &event->call->profile_count))
|
|
* unregister_trace_<call>(ftrace_profile_<call>);
|
|
* }
|
|
*
|
|
*/
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
|
\
|
|
static void ftrace_profile_##call(proto); \
|
|
\
|
|
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
|
|
{ \
|
|
int ret = 0; \
|
|
\
|
|
if (!atomic_inc_return(&event_call->profile_count)) \
|
|
ret = register_trace_##call(ftrace_profile_##call); \
|
|
\
|
|
return ret; \
|
|
} \
|
|
\
|
|
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
|
|
{ \
|
|
if (atomic_add_negative(-1, &event_call->profile_count)) \
|
|
unregister_trace_##call(ftrace_profile_##call); \
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
#endif
|
|
|
|
/*
|
|
* Stage 4 of the trace events.
|
|
*
|
|
* Override the macros in <trace/trace_events.h> to include the following:
|
|
*
|
|
* static void ftrace_event_<call>(proto)
|
|
* {
|
|
* event_trace_printk(_RET_IP_, "<call>: " <fmt>);
|
|
* }
|
|
*
|
|
* static int ftrace_reg_event_<call>(void)
|
|
* {
|
|
* int ret;
|
|
*
|
|
* ret = register_trace_<call>(ftrace_event_<call>);
|
|
* if (!ret)
|
|
* pr_info("event trace: Could not activate trace point "
|
|
* "probe to <call>");
|
|
* return ret;
|
|
* }
|
|
*
|
|
* static void ftrace_unreg_event_<call>(void)
|
|
* {
|
|
* unregister_trace_<call>(ftrace_event_<call>);
|
|
* }
|
|
*
|
|
*
|
|
* For those macros defined with TRACE_EVENT:
|
|
*
|
|
* static struct ftrace_event_call event_<call>;
|
|
*
|
|
* static void ftrace_raw_event_<call>(proto)
|
|
* {
|
|
* struct ring_buffer_event *event;
|
|
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
|
|
* unsigned long irq_flags;
|
|
* int pc;
|
|
*
|
|
* local_save_flags(irq_flags);
|
|
* pc = preempt_count();
|
|
*
|
|
* event = trace_current_buffer_lock_reserve(event_<call>.id,
|
|
* sizeof(struct ftrace_raw_<call>),
|
|
* irq_flags, pc);
|
|
* if (!event)
|
|
* return;
|
|
* entry = ring_buffer_event_data(event);
|
|
*
|
|
* <assign>; <-- Here we assign the entries by the __field and
|
|
* __array macros.
|
|
*
|
|
* trace_current_buffer_unlock_commit(event, irq_flags, pc);
|
|
* }
|
|
*
|
|
* static int ftrace_raw_reg_event_<call>(void)
|
|
* {
|
|
* int ret;
|
|
*
|
|
* ret = register_trace_<call>(ftrace_raw_event_<call>);
|
|
* if (!ret)
|
|
* pr_info("event trace: Could not activate trace point "
|
|
* "probe to <call>");
|
|
* return ret;
|
|
* }
|
|
*
|
|
* static void ftrace_unreg_event_<call>(void)
|
|
* {
|
|
* unregister_trace_<call>(ftrace_raw_event_<call>);
|
|
* }
|
|
*
|
|
* static struct trace_event ftrace_event_type_<call> = {
|
|
* .trace = ftrace_raw_output_<call>, <-- stage 2
|
|
* };
|
|
*
|
|
* static int ftrace_raw_init_event_<call>(void)
|
|
* {
|
|
* int id;
|
|
*
|
|
* id = register_ftrace_event(&ftrace_event_type_<call>);
|
|
* if (!id)
|
|
* return -ENODEV;
|
|
* event_<call>.id = id;
|
|
* return 0;
|
|
* }
|
|
*
|
|
* static struct ftrace_event_call __used
|
|
* __attribute__((__aligned__(4)))
|
|
* __attribute__((section("_ftrace_events"))) event_<call> = {
|
|
* .name = "<call>",
|
|
* .system = "<system>",
|
|
* .raw_init = ftrace_raw_init_event_<call>,
|
|
* .regfunc = ftrace_reg_event_<call>,
|
|
* .unregfunc = ftrace_unreg_event_<call>,
|
|
* .show_format = ftrace_format_<call>,
|
|
* }
|
|
*
|
|
*/
|
|
|
|
#undef TP_FMT
|
|
#define TP_FMT(fmt, args...) fmt "\n", ##args
|
|
|
|
#ifdef CONFIG_EVENT_PROFILE
|
|
|
|
#define _TRACE_PROFILE_INIT(call) \
|
|
.profile_count = ATOMIC_INIT(-1), \
|
|
.profile_enable = ftrace_profile_enable_##call, \
|
|
.profile_disable = ftrace_profile_disable_##call,
|
|
|
|
#else
|
|
#define _TRACE_PROFILE_INIT(call)
|
|
#endif
|
|
|
|
#undef __entry
|
|
#define __entry entry
|
|
|
|
#undef __field
|
|
#define __field(type, item)
|
|
|
|
#undef __array
|
|
#define __array(type, item, len)
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
__entry->__data_loc_##item = __data_offsets.item;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1) \
|
|
|
|
#undef __assign_str
|
|
#define __assign_str(dst, src) \
|
|
strcpy(__get_str(dst), src);
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
|
\
|
|
static struct ftrace_event_call event_##call; \
|
|
\
|
|
static void ftrace_raw_event_##call(proto) \
|
|
{ \
|
|
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
|
struct ftrace_event_call *event_call = &event_##call; \
|
|
struct ring_buffer_event *event; \
|
|
struct ftrace_raw_##call *entry; \
|
|
unsigned long irq_flags; \
|
|
int __data_size; \
|
|
int pc; \
|
|
\
|
|
local_save_flags(irq_flags); \
|
|
pc = preempt_count(); \
|
|
\
|
|
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
|
\
|
|
event = trace_current_buffer_lock_reserve(event_##call.id, \
|
|
sizeof(*entry) + __data_size, \
|
|
irq_flags, pc); \
|
|
if (!event) \
|
|
return; \
|
|
entry = ring_buffer_event_data(event); \
|
|
\
|
|
\
|
|
tstruct \
|
|
\
|
|
{ assign; } \
|
|
\
|
|
if (!filter_current_check_discard(event_call, entry, event)) \
|
|
trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
|
|
} \
|
|
\
|
|
static int ftrace_raw_reg_event_##call(void) \
|
|
{ \
|
|
int ret; \
|
|
\
|
|
ret = register_trace_##call(ftrace_raw_event_##call); \
|
|
if (ret) \
|
|
pr_info("event trace: Could not activate trace point " \
|
|
"probe to " #call "\n"); \
|
|
return ret; \
|
|
} \
|
|
\
|
|
static void ftrace_raw_unreg_event_##call(void) \
|
|
{ \
|
|
unregister_trace_##call(ftrace_raw_event_##call); \
|
|
} \
|
|
\
|
|
static struct trace_event ftrace_event_type_##call = { \
|
|
.trace = ftrace_raw_output_##call, \
|
|
}; \
|
|
\
|
|
static int ftrace_raw_init_event_##call(void) \
|
|
{ \
|
|
int id; \
|
|
\
|
|
id = register_ftrace_event(&ftrace_event_type_##call); \
|
|
if (!id) \
|
|
return -ENODEV; \
|
|
event_##call.id = id; \
|
|
INIT_LIST_HEAD(&event_##call.fields); \
|
|
init_preds(&event_##call); \
|
|
return 0; \
|
|
} \
|
|
\
|
|
static struct ftrace_event_call __used \
|
|
__attribute__((__aligned__(4))) \
|
|
__attribute__((section("_ftrace_events"))) event_##call = { \
|
|
.name = #call, \
|
|
.system = __stringify(TRACE_SYSTEM), \
|
|
.event = &ftrace_event_type_##call, \
|
|
.raw_init = ftrace_raw_init_event_##call, \
|
|
.regfunc = ftrace_raw_reg_event_##call, \
|
|
.unregfunc = ftrace_raw_unreg_event_##call, \
|
|
.show_format = ftrace_format_##call, \
|
|
.define_fields = ftrace_define_fields_##call, \
|
|
_TRACE_PROFILE_INIT(call) \
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* Define the insertion callback to profile events
|
|
*
|
|
* The job is very similar to ftrace_raw_event_<call> except that we don't
|
|
* insert in the ring buffer but in a perf counter.
|
|
*
|
|
* static void ftrace_profile_<call>(proto)
|
|
* {
|
|
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
|
|
* struct ftrace_event_call *event_call = &event_<call>;
|
|
* extern void perf_tpcounter_event(int, u64, u64, void *, int);
|
|
* struct ftrace_raw_##call *entry;
|
|
* u64 __addr = 0, __count = 1;
|
|
* unsigned long irq_flags;
|
|
* int __entry_size;
|
|
* int __data_size;
|
|
* int pc;
|
|
*
|
|
* local_save_flags(irq_flags);
|
|
* pc = preempt_count();
|
|
*
|
|
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
|
|
* __entry_size = __data_size + sizeof(*entry);
|
|
*
|
|
* do {
|
|
* char raw_data[__entry_size]; <- allocate our sample in the stack
|
|
* struct trace_entry *ent;
|
|
*
|
|
* entry = (struct ftrace_raw_<call> *)raw_data;
|
|
* ent = &entry->ent;
|
|
* tracing_generic_entry_update(ent, irq_flags, pc);
|
|
* ent->type = event_call->id;
|
|
*
|
|
* <tstruct> <- do some jobs with dynamic arrays
|
|
*
|
|
* <assign> <- affect our values
|
|
*
|
|
* perf_tpcounter_event(event_call->id, __addr, __count, entry,
|
|
* __entry_size); <- submit them to perf counter
|
|
* } while (0);
|
|
*
|
|
* }
|
|
*/
|
|
|
|
#ifdef CONFIG_EVENT_PROFILE
|
|
|
|
#undef __perf_addr
|
|
#define __perf_addr(a) __addr = (a)
|
|
|
|
#undef __perf_count
|
|
#define __perf_count(c) __count = (c)
|
|
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
|
static void ftrace_profile_##call(proto) \
|
|
{ \
|
|
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
|
struct ftrace_event_call *event_call = &event_##call; \
|
|
extern void perf_tpcounter_event(int, u64, u64, void *, int); \
|
|
struct ftrace_raw_##call *entry; \
|
|
u64 __addr = 0, __count = 1; \
|
|
unsigned long irq_flags; \
|
|
int __entry_size; \
|
|
int __data_size; \
|
|
int pc; \
|
|
\
|
|
local_save_flags(irq_flags); \
|
|
pc = preempt_count(); \
|
|
\
|
|
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
|
__entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
|
|
\
|
|
do { \
|
|
char raw_data[__entry_size]; \
|
|
struct trace_entry *ent; \
|
|
\
|
|
entry = (struct ftrace_raw_##call *)raw_data; \
|
|
ent = &entry->ent; \
|
|
tracing_generic_entry_update(ent, irq_flags, pc); \
|
|
ent->type = event_call->id; \
|
|
\
|
|
tstruct \
|
|
\
|
|
{ assign; } \
|
|
\
|
|
perf_tpcounter_event(event_call->id, __addr, __count, entry,\
|
|
__entry_size); \
|
|
} while (0); \
|
|
\
|
|
}
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
#endif /* CONFIG_EVENT_PROFILE */
|
|
|
|
#undef _TRACE_PROFILE_INIT
|
|
|