2009-06-26 22:28:00 +08:00
|
|
|
#ifndef __PERF_CALLCHAIN_H
|
|
|
|
#define __PERF_CALLCHAIN_H
|
|
|
|
|
|
|
|
#include "../perf.h"
|
2009-07-02 01:46:08 +08:00
|
|
|
#include <linux/list.h>
|
2009-07-01 23:28:37 +08:00
|
|
|
#include <linux/rbtree.h>
|
2010-05-09 22:47:13 +08:00
|
|
|
#include "event.h"
|
2009-07-01 11:35:14 +08:00
|
|
|
#include "symbol.h"
|
2009-06-26 22:28:00 +08:00
|
|
|
|
2014-05-05 18:46:17 +08:00
|
|
|
enum perf_call_graph_mode {
|
|
|
|
CALLCHAIN_NONE,
|
|
|
|
CALLCHAIN_FP,
|
|
|
|
CALLCHAIN_DWARF,
|
2015-01-06 02:23:04 +08:00
|
|
|
CALLCHAIN_LBR,
|
2014-05-05 18:46:17 +08:00
|
|
|
CALLCHAIN_MAX
|
|
|
|
};
|
|
|
|
|
2009-07-02 23:58:21 +08:00
|
|
|
enum chain_mode {
|
2009-08-08 08:16:24 +08:00
|
|
|
CHAIN_NONE,
|
2009-07-05 13:39:21 +08:00
|
|
|
CHAIN_FLAT,
|
|
|
|
CHAIN_GRAPH_ABS,
|
|
|
|
CHAIN_GRAPH_REL
|
2009-07-02 23:58:21 +08:00
|
|
|
};
|
2009-06-26 22:28:00 +08:00
|
|
|
|
2011-06-07 23:49:46 +08:00
|
|
|
enum chain_order {
|
|
|
|
ORDER_CALLER,
|
|
|
|
ORDER_CALLEE
|
|
|
|
};
|
|
|
|
|
2009-06-26 22:28:00 +08:00
|
|
|
struct callchain_node {
|
|
|
|
struct callchain_node *parent;
|
2009-07-01 18:37:06 +08:00
|
|
|
struct list_head val;
|
2013-10-11 13:15:36 +08:00
|
|
|
struct rb_node rb_node_in; /* to insert nodes in an rbtree */
|
|
|
|
struct rb_node rb_node; /* to sort nodes in an output tree */
|
|
|
|
struct rb_root rb_root_in; /* input tree of children */
|
|
|
|
struct rb_root rb_root; /* sorted output tree of children */
|
2009-07-01 18:37:06 +08:00
|
|
|
unsigned int val_nr;
|
|
|
|
u64 hit;
|
2009-08-07 13:11:05 +08:00
|
|
|
u64 children_hit;
|
2009-06-26 22:28:00 +08:00
|
|
|
};
|
|
|
|
|
2010-08-23 02:05:22 +08:00
|
|
|
struct callchain_root {
|
|
|
|
u64 max_depth;
|
|
|
|
struct callchain_node node;
|
|
|
|
};
|
|
|
|
|
2009-07-05 13:39:21 +08:00
|
|
|
struct callchain_param;
|
|
|
|
|
2010-08-23 02:05:22 +08:00
|
|
|
typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
|
2009-07-05 13:39:21 +08:00
|
|
|
u64, struct callchain_param *);
|
|
|
|
|
2013-07-19 06:33:57 +08:00
|
|
|
enum chain_key {
|
|
|
|
CCKEY_FUNCTION,
|
|
|
|
CCKEY_ADDRESS
|
|
|
|
};
|
|
|
|
|
2009-07-05 13:39:21 +08:00
|
|
|
struct callchain_param {
|
2014-09-23 09:01:41 +08:00
|
|
|
bool enabled;
|
|
|
|
enum perf_call_graph_mode record_mode;
|
|
|
|
u32 dump_size;
|
2009-07-05 13:39:21 +08:00
|
|
|
enum chain_mode mode;
|
2010-05-10 07:28:10 +08:00
|
|
|
u32 print_limit;
|
2009-07-05 13:39:21 +08:00
|
|
|
double min_percent;
|
|
|
|
sort_chain_func_t sort;
|
2011-06-07 23:49:46 +08:00
|
|
|
enum chain_order order;
|
2013-07-19 06:33:57 +08:00
|
|
|
enum chain_key key;
|
perf callchain: Support handling complete branch stacks as histograms
Currently branch stacks can be only shown as edge histograms for
individual branches. I never found this display particularly useful.
This implements an alternative mode that creates histograms over
complete branch traces, instead of individual branches, similar to how
normal callgraphs are handled. This is done by putting it in front of
the normal callgraph and then using the normal callgraph histogram
infrastructure to unify them.
This way in complex functions we can understand the control flow that
lead to a particular sample, and may even see some control flow in the
caller for short functions.
Example (simplified, of course for such simple code this is usually not
needed), please run this after the whole patchkit is in, as at this
point in the patch order there is no --branch-history, that will be
added in a patch after this one:
tcall.c:
volatile a = 10000, b = 100000, c;
__attribute__((noinline)) f2()
{
c = a / b;
}
__attribute__((noinline)) f1()
{
f2();
f2();
}
main()
{
int i;
for (i = 0; i < 1000000; i++)
f1();
}
% perf record -b -g ./tsrc/tcall
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.044 MB perf.data (~1923 samples) ]
% perf report --no-children --branch-history
...
54.91% tcall.c:6 [.] f2 tcall
|
|--65.53%-- f2 tcall.c:5
| |
| |--70.83%-- f1 tcall.c:11
| | f1 tcall.c:10
| | main tcall.c:18
| | main tcall.c:18
| | main tcall.c:17
| | main tcall.c:17
| | f1 tcall.c:13
| | f1 tcall.c:13
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:12
| | f1 tcall.c:12
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:11
| |
| --29.17%-- f1 tcall.c:12
| f1 tcall.c:12
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:11
| f1 tcall.c:10
| main tcall.c:18
| main tcall.c:18
| main tcall.c:17
| main tcall.c:17
| f1 tcall.c:13
| f1 tcall.c:13
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:12
The default output is unchanged.
This is only implemented in perf report, no change to record or anywhere
else.
This adds the basic code to report:
- add a new "branch" option to the -g option parser to enable this mode
- when the flag is set include the LBR into the callstack in machine.c.
The rest of the history code is unchanged and doesn't know the
difference between LBR entry and normal call entry.
- detect overlaps with the callchain
- remove small loop duplicates in the LBR
Current limitations:
- The LBR flags (mispredict etc.) are not shown in the history
and LBR entries have no special marker.
- It would be nice if annotate marked the LBR entries somehow
(e.g. with arrows)
v2: Various fixes.
v3: Merge further patches into this one. Fix white space.
v4: Improve manpage. Address review feedback.
v5: Rename functions. Better error message without -g. Fix crash without
-b.
v6: Rebase
v7: Rebase. Use NO_ENTRY in memset.
v8: Port to latest tip. Move add_callchain_ip to separate
patch. Skip initial entries in callchain. Minor cleanups.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1415844328-4884-3-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-11-13 10:05:20 +08:00
|
|
|
bool branch_callstack;
|
2009-07-05 13:39:21 +08:00
|
|
|
};
|
|
|
|
|
2014-10-10 03:12:24 +08:00
|
|
|
extern struct callchain_param callchain_param;
|
|
|
|
|
2009-06-26 22:28:00 +08:00
|
|
|
struct callchain_list {
|
2009-07-01 18:37:06 +08:00
|
|
|
u64 ip;
|
2010-03-25 03:40:18 +08:00
|
|
|
struct map_symbol ms;
|
2015-05-05 22:55:46 +08:00
|
|
|
struct /* for TUI */ {
|
|
|
|
bool unfolded;
|
|
|
|
bool has_children;
|
|
|
|
};
|
2014-11-13 10:05:24 +08:00
|
|
|
char *srcline;
|
2009-06-26 22:28:00 +08:00
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2011-01-14 11:51:58 +08:00
|
|
|
/*
|
|
|
|
* A callchain cursor is a single linked list that
|
|
|
|
* let one feed a callchain progressively.
|
2012-07-18 00:20:59 +08:00
|
|
|
* It keeps persistent allocated entries to minimize
|
2011-01-14 11:51:58 +08:00
|
|
|
* allocations.
|
|
|
|
*/
|
|
|
|
struct callchain_cursor_node {
|
|
|
|
u64 ip;
|
|
|
|
struct map *map;
|
|
|
|
struct symbol *sym;
|
|
|
|
struct callchain_cursor_node *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct callchain_cursor {
|
|
|
|
u64 nr;
|
|
|
|
struct callchain_cursor_node *first;
|
|
|
|
struct callchain_cursor_node **last;
|
|
|
|
u64 pos;
|
|
|
|
struct callchain_cursor_node *curr;
|
|
|
|
};
|
|
|
|
|
2012-05-31 13:43:26 +08:00
|
|
|
extern __thread struct callchain_cursor callchain_cursor;
|
|
|
|
|
2010-08-23 02:05:22 +08:00
|
|
|
static inline void callchain_init(struct callchain_root *root)
|
2009-06-26 22:28:00 +08:00
|
|
|
{
|
2010-08-23 02:05:22 +08:00
|
|
|
INIT_LIST_HEAD(&root->node.val);
|
2010-07-08 12:06:17 +08:00
|
|
|
|
2010-08-23 02:05:22 +08:00
|
|
|
root->node.parent = NULL;
|
|
|
|
root->node.hit = 0;
|
2010-08-27 08:28:40 +08:00
|
|
|
root->node.children_hit = 0;
|
2013-10-11 13:15:36 +08:00
|
|
|
root->node.rb_root_in = RB_ROOT;
|
2010-08-23 02:05:22 +08:00
|
|
|
root->max_depth = 0;
|
2009-06-26 22:28:00 +08:00
|
|
|
}
|
|
|
|
|
2011-01-14 11:51:59 +08:00
|
|
|
static inline u64 callchain_cumul_hits(struct callchain_node *node)
|
2009-08-07 13:11:05 +08:00
|
|
|
{
|
|
|
|
return node->hit + node->children_hit;
|
|
|
|
}
|
|
|
|
|
2011-01-14 11:52:00 +08:00
|
|
|
int callchain_register_param(struct callchain_param *param);
|
2011-01-14 11:51:58 +08:00
|
|
|
int callchain_append(struct callchain_root *root,
|
|
|
|
struct callchain_cursor *cursor,
|
|
|
|
u64 period);
|
|
|
|
|
|
|
|
int callchain_merge(struct callchain_cursor *cursor,
|
|
|
|
struct callchain_root *dst, struct callchain_root *src);
|
2010-05-09 22:47:13 +08:00
|
|
|
|
2011-01-14 11:51:58 +08:00
|
|
|
/*
|
|
|
|
* Initialize a cursor before adding entries inside, but keep
|
|
|
|
* the previously allocated entries as a cache.
|
|
|
|
*/
|
|
|
|
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->nr = 0;
|
|
|
|
cursor->last = &cursor->first;
|
|
|
|
}
|
|
|
|
|
|
|
|
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
|
|
|
|
struct map *map, struct symbol *sym);
|
|
|
|
|
|
|
|
/* Close a cursor writing session. Initialize for the reader */
|
|
|
|
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->curr = cursor->first;
|
|
|
|
cursor->pos = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cursor reading iteration helpers */
|
|
|
|
static inline struct callchain_cursor_node *
|
|
|
|
callchain_cursor_current(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
if (cursor->pos == cursor->nr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return cursor->curr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->curr = cursor->curr->next;
|
|
|
|
cursor->pos++;
|
|
|
|
}
|
2012-12-12 03:46:05 +08:00
|
|
|
|
|
|
|
struct option;
|
2014-01-14 13:25:35 +08:00
|
|
|
struct hist_entry;
|
2012-12-12 03:46:05 +08:00
|
|
|
|
|
|
|
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
|
2013-10-26 22:25:33 +08:00
|
|
|
int record_callchain_opt(const struct option *opt, const char *arg, int unset);
|
|
|
|
|
2014-01-14 13:25:35 +08:00
|
|
|
int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
|
|
|
|
struct perf_evsel *evsel, struct addr_location *al,
|
|
|
|
int max_stack);
|
|
|
|
int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
|
2013-10-31 12:58:30 +08:00
|
|
|
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
|
|
|
|
bool hide_unresolved);
|
2014-01-14 13:25:35 +08:00
|
|
|
|
2012-12-12 03:46:05 +08:00
|
|
|
extern const char record_callchain_help[];
|
2015-08-07 03:44:52 +08:00
|
|
|
extern int parse_callchain_record(const char *arg, struct callchain_param *param);
|
2015-08-04 16:30:20 +08:00
|
|
|
int parse_callchain_record_opt(const char *arg, struct callchain_param *param);
|
2014-04-08 02:55:24 +08:00
|
|
|
int parse_callchain_report_opt(const char *arg);
|
2014-09-23 09:01:43 +08:00
|
|
|
int perf_callchain_config(const char *var, const char *value);
|
2012-09-10 12:38:00 +08:00
|
|
|
|
|
|
|
static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
|
|
|
|
struct callchain_cursor *src)
|
|
|
|
{
|
|
|
|
*dest = *src;
|
|
|
|
|
|
|
|
dest->first = src->curr;
|
|
|
|
dest->nr -= src->pos;
|
|
|
|
}
|
2014-06-25 23:49:03 +08:00
|
|
|
|
|
|
|
#ifdef HAVE_SKIP_CALLCHAIN_IDX
|
2014-10-23 23:50:25 +08:00
|
|
|
extern int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain);
|
2014-06-25 23:49:03 +08:00
|
|
|
#else
|
2014-10-23 23:50:25 +08:00
|
|
|
static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
|
2014-06-25 23:49:03 +08:00
|
|
|
struct ip_callchain *chain __maybe_unused)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-11-13 10:05:23 +08:00
|
|
|
char *callchain_list__sym_name(struct callchain_list *cl,
|
|
|
|
char *bf, size_t bfsize, bool show_dso);
|
|
|
|
|
2014-12-30 13:38:13 +08:00
|
|
|
void free_callchain(struct callchain_root *root);
|
|
|
|
|
2009-09-25 00:02:18 +08:00
|
|
|
#endif /* __PERF_CALLCHAIN_H */
|