mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 05:04:00 +08:00
perf bpf: Save bpf_prog_info in a rbtree in perf_env
bpf_prog_info contains information necessary to annotate bpf programs. This patch saves bpf_prog_info for bpf programs loaded in the system. Some big picture of the next few patches: To fully annotate BPF programs with source code mapping, 4 different informations are needed: 1) PERF_RECORD_KSYMBOL 2) PERF_RECORD_BPF_EVENT 3) bpf_prog_info 4) btf Before this set, 1) and 2) in the list are already saved to perf.data file. For BPF programs that are already loaded before perf run, 1) and 2) are synthesized by perf_event__synthesize_bpf_events(). For short living BPF programs, 1) and 2) are generated by kernel. This set handles 3) and 4) from the list. Again, it is necessary to handle existing BPF program and short living program separately. This patch handles 3) for exising BPF programs while synthesizing 1) and 2) in perf_event__synthesize_bpf_events(). These data are stored in perf_env. The next patch saves these data from perf_env to perf.data as headers. Similarly, the two patches after the next saves 4) of existing BPF programs to perf_env and perf.data. Another patch later will handle 3) and 4) for short living BPF programs by monitoring 1) and 2) in a dedicate thread. Signed-off-by: Song Liu <songliubraving@fb.com> Reviewed-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stanislav Fomichev <sdf@google.com> Cc: kernel-team@fb.com Link: http://lkml.kernel.org/r/20190312053051.2690567-7-songliubraving@fb.com [ set env->bpf_progs.infos_cnt to zero in perf_env__purge_bpf() as noted by jolsa ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
e541695045
commit
e4378f0cb9
@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
|
||||
use_pager = 1;
|
||||
commit_pager_choice();
|
||||
|
||||
perf_env__init(&perf_env);
|
||||
perf_env__set_cmdline(&perf_env, argc, argv);
|
||||
status = p->fn(argc, argv);
|
||||
perf_config__exit();
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "debug.h"
|
||||
#include "symbol.h"
|
||||
#include "machine.h"
|
||||
#include "env.h"
|
||||
#include "session.h"
|
||||
|
||||
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
|
||||
@ -54,17 +55,28 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
|
||||
struct bpf_event *bpf_event = &event->bpf_event;
|
||||
struct bpf_prog_info_linear *info_linear;
|
||||
struct perf_tool *tool = session->tool;
|
||||
struct bpf_prog_info_node *info_node;
|
||||
struct bpf_prog_info *info;
|
||||
struct btf *btf = NULL;
|
||||
bool has_btf = false;
|
||||
struct perf_env *env;
|
||||
u32 sub_prog_cnt, i;
|
||||
int err = 0;
|
||||
u64 arrays;
|
||||
|
||||
/*
|
||||
* for perf-record and perf-report use header.env;
|
||||
* otherwise, use global perf_env.
|
||||
*/
|
||||
env = session->data ? &session->header.env : &perf_env;
|
||||
|
||||
arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
|
||||
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
|
||||
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
|
||||
arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
|
||||
arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
|
||||
arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
|
||||
arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
|
||||
|
||||
info_linear = bpf_program__get_prog_info_linear(fd, arrays);
|
||||
if (IS_ERR_OR_NULL(info_linear)) {
|
||||
@ -153,8 +165,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
|
||||
machine, process);
|
||||
}
|
||||
|
||||
/* Synthesize PERF_RECORD_BPF_EVENT */
|
||||
if (!opts->no_bpf_event) {
|
||||
/* Synthesize PERF_RECORD_BPF_EVENT */
|
||||
*bpf_event = (struct bpf_event){
|
||||
.header = {
|
||||
.type = PERF_RECORD_BPF_EVENT,
|
||||
@ -167,6 +179,22 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
|
||||
memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
|
||||
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
|
||||
event->header.size += machine->id_hdr_size;
|
||||
|
||||
/* save bpf_prog_info to env */
|
||||
info_node = malloc(sizeof(struct bpf_prog_info_node));
|
||||
if (!info_node) {
|
||||
err = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
info_node->info_linear = info_linear;
|
||||
perf_env__insert_bpf_prog_info(env, info_node);
|
||||
info_linear = NULL;
|
||||
|
||||
/*
|
||||
* process after saving bpf_prog_info to env, so that
|
||||
* required information is ready for look up
|
||||
*/
|
||||
err = perf_tool__process_synth_event(tool, event,
|
||||
machine, process);
|
||||
}
|
||||
|
@ -3,14 +3,19 @@
|
||||
#define __PERF_BPF_EVENT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include "event.h"
|
||||
|
||||
struct machine;
|
||||
union perf_event;
|
||||
struct perf_sample;
|
||||
struct perf_tool;
|
||||
struct record_opts;
|
||||
|
||||
struct bpf_prog_info_node {
|
||||
struct bpf_prog_info_linear *info_linear;
|
||||
struct rb_node rb_node;
|
||||
};
|
||||
|
||||
#ifdef HAVE_LIBBPF_SUPPORT
|
||||
int machine__process_bpf_event(struct machine *machine, union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
|
@ -3,15 +3,97 @@
|
||||
#include "env.h"
|
||||
#include "sane_ctype.h"
|
||||
#include "util.h"
|
||||
#include "bpf-event.h"
|
||||
#include <errno.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
struct perf_env perf_env;
|
||||
|
||||
void perf_env__insert_bpf_prog_info(struct perf_env *env,
|
||||
struct bpf_prog_info_node *info_node)
|
||||
{
|
||||
__u32 prog_id = info_node->info_linear->info.id;
|
||||
struct bpf_prog_info_node *node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rb_node **p;
|
||||
|
||||
down_write(&env->bpf_progs.lock);
|
||||
p = &env->bpf_progs.infos.rb_node;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
|
||||
if (prog_id < node->info_linear->info.id) {
|
||||
p = &(*p)->rb_left;
|
||||
} else if (prog_id > node->info_linear->info.id) {
|
||||
p = &(*p)->rb_right;
|
||||
} else {
|
||||
pr_debug("duplicated bpf prog info %u\n", prog_id);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
rb_link_node(&info_node->rb_node, parent, p);
|
||||
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
|
||||
env->bpf_progs.infos_cnt++;
|
||||
out:
|
||||
up_write(&env->bpf_progs.lock);
|
||||
}
|
||||
|
||||
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
|
||||
__u32 prog_id)
|
||||
{
|
||||
struct bpf_prog_info_node *node = NULL;
|
||||
struct rb_node *n;
|
||||
|
||||
down_read(&env->bpf_progs.lock);
|
||||
n = env->bpf_progs.infos.rb_node;
|
||||
|
||||
while (n) {
|
||||
node = rb_entry(n, struct bpf_prog_info_node, rb_node);
|
||||
if (prog_id < node->info_linear->info.id)
|
||||
n = n->rb_left;
|
||||
else if (prog_id > node->info_linear->info.id)
|
||||
n = n->rb_right;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
up_read(&env->bpf_progs.lock);
|
||||
return node;
|
||||
}
|
||||
|
||||
/* purge data in bpf_progs.infos tree */
|
||||
static void perf_env__purge_bpf(struct perf_env *env)
|
||||
{
|
||||
struct rb_root *root;
|
||||
struct rb_node *next;
|
||||
|
||||
down_write(&env->bpf_progs.lock);
|
||||
|
||||
root = &env->bpf_progs.infos;
|
||||
next = rb_first(root);
|
||||
|
||||
while (next) {
|
||||
struct bpf_prog_info_node *node;
|
||||
|
||||
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
|
||||
next = rb_next(&node->rb_node);
|
||||
rb_erase(&node->rb_node, root);
|
||||
free(node);
|
||||
}
|
||||
|
||||
env->bpf_progs.infos_cnt = 0;
|
||||
|
||||
up_write(&env->bpf_progs.lock);
|
||||
}
|
||||
|
||||
void perf_env__exit(struct perf_env *env)
|
||||
{
|
||||
int i;
|
||||
|
||||
perf_env__purge_bpf(env);
|
||||
zfree(&env->hostname);
|
||||
zfree(&env->os_release);
|
||||
zfree(&env->version);
|
||||
@ -38,6 +120,12 @@ void perf_env__exit(struct perf_env *env)
|
||||
zfree(&env->memory_nodes);
|
||||
}
|
||||
|
||||
void perf_env__init(struct perf_env *env)
|
||||
{
|
||||
env->bpf_progs.infos = RB_ROOT;
|
||||
init_rwsem(&env->bpf_progs.lock);
|
||||
}
|
||||
|
||||
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
|
||||
{
|
||||
int i;
|
||||
|
@ -3,7 +3,9 @@
|
||||
#define __PERF_ENV_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include "cpumap.h"
|
||||
#include "rwsem.h"
|
||||
|
||||
struct cpu_topology_map {
|
||||
int socket_id;
|
||||
@ -64,8 +66,20 @@ struct perf_env {
|
||||
struct memory_node *memory_nodes;
|
||||
unsigned long long memory_bsize;
|
||||
u64 clockid_res_ns;
|
||||
|
||||
/*
|
||||
* bpf_info_lock protects bpf rbtrees. This is needed because the
|
||||
* trees are accessed by different threads in perf-top
|
||||
*/
|
||||
struct {
|
||||
struct rw_semaphore lock;
|
||||
struct rb_root infos;
|
||||
u32 infos_cnt;
|
||||
} bpf_progs;
|
||||
};
|
||||
|
||||
struct bpf_prog_info_node;
|
||||
|
||||
extern struct perf_env perf_env;
|
||||
|
||||
void perf_env__exit(struct perf_env *env);
|
||||
@ -80,4 +94,9 @@ const char *perf_env__arch(struct perf_env *env);
|
||||
const char *perf_env__raw_arch(struct perf_env *env);
|
||||
int perf_env__nr_cpus_avail(struct perf_env *env);
|
||||
|
||||
void perf_env__init(struct perf_env *env);
|
||||
void perf_env__insert_bpf_prog_info(struct perf_env *env,
|
||||
struct bpf_prog_info_node *info_node);
|
||||
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
|
||||
__u32 prog_id);
|
||||
#endif /* __PERF_ENV_H */
|
||||
|
@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
|
||||
ordered_events__init(&session->ordered_events,
|
||||
ordered_events__deliver_event, NULL);
|
||||
|
||||
perf_env__init(&session->header.env);
|
||||
if (data) {
|
||||
if (perf_data__open(data))
|
||||
goto out_delete;
|
||||
|
Loading…
Reference in New Issue
Block a user