mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 10:04:12 +08:00
7263f3498b
Synchronize the caller in evsel with the called function. Shorten 3 lines of code in bperf_read by using perf_cpu_map__for_each_cpu(). This code is frequently using variables named cpu as cpu map indices, which doesn't matter as all CPUs are in the CPU map. It is strange in some cases the cpumap is used at all. Committer notes: Found when building with BUILD_BPF_SKEL=1: Remove unused 'num_cpu' variable in bperf__read(). Make 'j' an 'int' as it is used in perf_cpu_map__for_each_cpu() to compare against an 'int' Signed-off-by: Ian Rogers <irogers@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: John Garry <john.garry@huawei.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Clarke <pc@us.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Stephane Eranian <eranian@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Vineet Singh <vineet.singh@intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-45-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
132 lines
3.0 KiB
C
132 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PERF_BPF_COUNTER_H
|
|
#define __PERF_BPF_COUNTER_H 1
|
|
|
|
#include <linux/list.h>
|
|
#include <sys/resource.h>
|
|
#include <bpf/bpf.h>
|
|
#include <bpf/btf.h>
|
|
#include <bpf/libbpf.h>
|
|
|
|
struct evsel;
|
|
struct target;
|
|
struct bpf_counter;
|
|
|
|
typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
|
|
typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
|
|
struct target *target);
|
|
typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
|
|
int cpu_map_idx,
|
|
int fd);
|
|
|
|
struct bpf_counter_ops {
|
|
bpf_counter_evsel_target_op load;
|
|
bpf_counter_evsel_op enable;
|
|
bpf_counter_evsel_op disable;
|
|
bpf_counter_evsel_op read;
|
|
bpf_counter_evsel_op destroy;
|
|
bpf_counter_evsel_install_pe_op install_pe;
|
|
};
|
|
|
|
struct bpf_counter {
|
|
void *skel;
|
|
struct list_head list;
|
|
};
|
|
|
|
#ifdef HAVE_BPF_SKEL
|
|
|
|
int bpf_counter__load(struct evsel *evsel, struct target *target);
|
|
int bpf_counter__enable(struct evsel *evsel);
|
|
int bpf_counter__disable(struct evsel *evsel);
|
|
int bpf_counter__read(struct evsel *evsel);
|
|
void bpf_counter__destroy(struct evsel *evsel);
|
|
int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
|
|
|
|
#else /* HAVE_BPF_SKEL */
|
|
|
|
#include <linux/err.h>
|
|
|
|
static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
|
|
struct target *target __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static inline void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
|
|
{
|
|
}
|
|
|
|
static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
|
|
int cpu __maybe_unused,
|
|
int fd __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* HAVE_BPF_SKEL */
|
|
|
|
static inline void set_max_rlimit(void)
|
|
{
|
|
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
|
|
|
|
setrlimit(RLIMIT_MEMLOCK, &rinf);
|
|
}
|
|
|
|
static inline __u32 bpf_link_get_id(int fd)
|
|
{
|
|
struct bpf_link_info link_info = { .id = 0, };
|
|
__u32 link_info_len = sizeof(link_info);
|
|
|
|
bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
|
|
return link_info.id;
|
|
}
|
|
|
|
static inline __u32 bpf_link_get_prog_id(int fd)
|
|
{
|
|
struct bpf_link_info link_info = { .id = 0, };
|
|
__u32 link_info_len = sizeof(link_info);
|
|
|
|
bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
|
|
return link_info.prog_id;
|
|
}
|
|
|
|
static inline __u32 bpf_map_get_id(int fd)
|
|
{
|
|
struct bpf_map_info map_info = { .id = 0, };
|
|
__u32 map_info_len = sizeof(map_info);
|
|
|
|
bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
|
|
return map_info.id;
|
|
}
|
|
|
|
/* trigger the leader program on a cpu */
|
|
static inline int bperf_trigger_reading(int prog_fd, int cpu)
|
|
{
|
|
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.ctx_in = NULL,
|
|
.ctx_size_in = 0,
|
|
.flags = BPF_F_TEST_RUN_ON_CPU,
|
|
.cpu = cpu,
|
|
.retval = 0,
|
|
);
|
|
|
|
return bpf_prog_test_run_opts(prog_fd, &opts);
|
|
}
|
|
|
|
#endif /* __PERF_BPF_COUNTER_H */
|