mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
perf env: Avoid recursively taking env->bpf_progs.lock
Add variants of perf_env__insert_bpf_prog_info(), perf_env__insert_btf()
and perf_env__find_btf prefixed with __ to indicate the
env->bpf_progs.lock is assumed held.
Call these variants when the lock is held to avoid recursively taking it
and potentially having a thread deadlock with itself.
Fixes: f8dfeae009
("perf bpf: Show more BPF program info in print_bpf_prog_info()")
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Song Liu <song@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20231207014655.1252484-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
58824fa008
commit
9c51f8788b
@ -545,9 +545,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
|
||||
return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
|
||||
}
|
||||
|
||||
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
|
||||
struct perf_env *env,
|
||||
FILE *fp)
|
||||
void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
|
||||
struct perf_env *env,
|
||||
FILE *fp)
|
||||
{
|
||||
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
|
||||
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
|
||||
@ -563,7 +563,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
|
||||
if (info->btf_id) {
|
||||
struct btf_node *node;
|
||||
|
||||
node = perf_env__find_btf(env, info->btf_id);
|
||||
node = __perf_env__find_btf(env, info->btf_id);
|
||||
if (node)
|
||||
btf = btf__new((__u8 *)(node->data),
|
||||
node->data_size);
|
||||
|
@ -33,9 +33,9 @@ struct btf_node {
|
||||
int machine__process_bpf(struct machine *machine, union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
|
||||
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
|
||||
struct perf_env *env,
|
||||
FILE *fp);
|
||||
void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
|
||||
struct perf_env *env,
|
||||
FILE *fp);
|
||||
#else
|
||||
static inline int machine__process_bpf(struct machine *machine __maybe_unused,
|
||||
union perf_event *event __maybe_unused,
|
||||
@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
|
||||
struct perf_env *env __maybe_unused,
|
||||
FILE *fp __maybe_unused)
|
||||
static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
|
||||
struct perf_env *env __maybe_unused,
|
||||
FILE *fp __maybe_unused)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -24,13 +24,19 @@ struct perf_env perf_env;
|
||||
|
||||
void perf_env__insert_bpf_prog_info(struct perf_env *env,
|
||||
struct bpf_prog_info_node *info_node)
|
||||
{
|
||||
down_write(&env->bpf_progs.lock);
|
||||
__perf_env__insert_bpf_prog_info(env, info_node);
|
||||
up_write(&env->bpf_progs.lock);
|
||||
}
|
||||
|
||||
void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
|
||||
{
|
||||
__u32 prog_id = info_node->info_linear->info.id;
|
||||
struct bpf_prog_info_node *node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rb_node **p;
|
||||
|
||||
down_write(&env->bpf_progs.lock);
|
||||
p = &env->bpf_progs.infos.rb_node;
|
||||
|
||||
while (*p != NULL) {
|
||||
@ -42,15 +48,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
|
||||
p = &(*p)->rb_right;
|
||||
} else {
|
||||
pr_debug("duplicated bpf prog info %u\n", prog_id);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
rb_link_node(&info_node->rb_node, parent, p);
|
||||
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
|
||||
env->bpf_progs.infos_cnt++;
|
||||
out:
|
||||
up_write(&env->bpf_progs.lock);
|
||||
}
|
||||
|
||||
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
|
||||
@ -79,14 +83,22 @@ out:
|
||||
}
|
||||
|
||||
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
down_write(&env->bpf_progs.lock);
|
||||
ret = __perf_env__insert_btf(env, btf_node);
|
||||
up_write(&env->bpf_progs.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
|
||||
{
|
||||
struct rb_node *parent = NULL;
|
||||
__u32 btf_id = btf_node->id;
|
||||
struct btf_node *node;
|
||||
struct rb_node **p;
|
||||
bool ret = true;
|
||||
|
||||
down_write(&env->bpf_progs.lock);
|
||||
p = &env->bpf_progs.btfs.rb_node;
|
||||
|
||||
while (*p != NULL) {
|
||||
@ -98,25 +110,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
|
||||
p = &(*p)->rb_right;
|
||||
} else {
|
||||
pr_debug("duplicated btf %u\n", btf_id);
|
||||
ret = false;
|
||||
goto out;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
rb_link_node(&btf_node->rb_node, parent, p);
|
||||
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
|
||||
env->bpf_progs.btfs_cnt++;
|
||||
out:
|
||||
up_write(&env->bpf_progs.lock);
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
|
||||
{
|
||||
struct btf_node *res;
|
||||
|
||||
down_read(&env->bpf_progs.lock);
|
||||
res = __perf_env__find_btf(env, btf_id);
|
||||
up_read(&env->bpf_progs.lock);
|
||||
return res;
|
||||
}
|
||||
|
||||
struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
|
||||
{
|
||||
struct btf_node *node = NULL;
|
||||
struct rb_node *n;
|
||||
|
||||
down_read(&env->bpf_progs.lock);
|
||||
n = env->bpf_progs.btfs.rb_node;
|
||||
|
||||
while (n) {
|
||||
@ -126,13 +144,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
|
||||
else if (btf_id > node->id)
|
||||
n = n->rb_right;
|
||||
else
|
||||
goto out;
|
||||
return node;
|
||||
}
|
||||
node = NULL;
|
||||
|
||||
out:
|
||||
up_read(&env->bpf_progs.lock);
|
||||
return node;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* purge data in bpf_progs.infos tree */
|
||||
|
@ -175,12 +175,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
|
||||
int perf_env__nr_cpus_avail(struct perf_env *env);
|
||||
|
||||
void perf_env__init(struct perf_env *env);
|
||||
void __perf_env__insert_bpf_prog_info(struct perf_env *env,
|
||||
struct bpf_prog_info_node *info_node);
|
||||
void perf_env__insert_bpf_prog_info(struct perf_env *env,
|
||||
struct bpf_prog_info_node *info_node);
|
||||
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
|
||||
__u32 prog_id);
|
||||
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
|
||||
bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
|
||||
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
|
||||
struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
|
||||
|
||||
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
|
||||
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
|
||||
|
@ -1849,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
|
||||
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
|
||||
next = rb_next(&node->rb_node);
|
||||
|
||||
bpf_event__print_bpf_prog_info(&node->info_linear->info,
|
||||
env, fp);
|
||||
__bpf_event__print_bpf_prog_info(&node->info_linear->info,
|
||||
env, fp);
|
||||
}
|
||||
|
||||
up_read(&env->bpf_progs.lock);
|
||||
@ -3188,7 +3188,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
|
||||
/* after reading from file, translate offset to address */
|
||||
bpil_offs_to_addr(info_linear);
|
||||
info_node->info_linear = info_linear;
|
||||
perf_env__insert_bpf_prog_info(env, info_node);
|
||||
__perf_env__insert_bpf_prog_info(env, info_node);
|
||||
}
|
||||
|
||||
up_write(&env->bpf_progs.lock);
|
||||
@ -3235,7 +3235,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
|
||||
if (__do_read(ff, node->data, data_size))
|
||||
goto out;
|
||||
|
||||
perf_env__insert_btf(env, node);
|
||||
__perf_env__insert_btf(env, node);
|
||||
node = NULL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user