mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
cdb204ad42
Provide a new solution to replace the reverted commit ac2dc29edd
("perf stat: Add default hybrid events")
For the default software attrs, nothing is changed.
For the default hardware attrs, create a new evsel for each hybrid pmu.
With the new solution, adding a new default attr will not require the
special support for the hybrid platform anymore.
Also, the "--detailed" is supported on the hybrid platform
With the patch,
$ perf stat -a -ddd sleep 1
Performance counter stats for 'system wide':
32,231.06 msec cpu-clock # 32.056 CPUs utilized
529 context-switches # 16.413 /sec
32 cpu-migrations # 0.993 /sec
69 page-faults # 2.141 /sec
176,754,151 cpu_core/cycles/ # 5.484 M/sec (41.65%)
161,695,280 cpu_atom/cycles/ # 5.017 M/sec (49.92%)
48,595,992 cpu_core/instructions/ # 1.508 M/sec (49.98%)
32,363,337 cpu_atom/instructions/ # 1.004 M/sec (58.26%)
10,088,639 cpu_core/branches/ # 313.010 K/sec (58.31%)
6,390,582 cpu_atom/branches/ # 198.274 K/sec (58.26%)
846,201 cpu_core/branch-misses/ # 26.254 K/sec (66.65%)
676,477 cpu_atom/branch-misses/ # 20.988 K/sec (58.27%)
14,290,070 cpu_core/L1-dcache-loads/ # 443.363 K/sec (66.66%)
9,983,532 cpu_atom/L1-dcache-loads/ # 309.749 K/sec (58.27%)
740,725 cpu_core/L1-dcache-load-misses/ # 22.982 K/sec (66.66%)
<not supported> cpu_atom/L1-dcache-load-misses/
480,441 cpu_core/LLC-loads/ # 14.906 K/sec (66.67%)
326,570 cpu_atom/LLC-loads/ # 10.132 K/sec (58.27%)
329 cpu_core/LLC-load-misses/ # 10.208 /sec (66.68%)
0 cpu_atom/LLC-load-misses/ # 0.000 /sec (58.32%)
<not supported> cpu_core/L1-icache-loads/
21,982,491 cpu_atom/L1-icache-loads/ # 682.028 K/sec (58.43%)
4,493,189 cpu_core/L1-icache-load-misses/ # 139.406 K/sec (33.34%)
4,711,404 cpu_atom/L1-icache-load-misses/ # 146.176 K/sec (50.08%)
13,713,090 cpu_core/dTLB-loads/ # 425.462 K/sec (33.34%)
9,384,727 cpu_atom/dTLB-loads/ # 291.170 K/sec (50.08%)
157,387 cpu_core/dTLB-load-misses/ # 4.883 K/sec (33.33%)
108,328 cpu_atom/dTLB-load-misses/ # 3.361 K/sec (50.08%)
<not supported> cpu_core/iTLB-loads/
<not supported> cpu_atom/iTLB-loads/
37,655 cpu_core/iTLB-load-misses/ # 1.168 K/sec (33.32%)
61,661 cpu_atom/iTLB-load-misses/ # 1.913 K/sec (50.03%)
<not supported> cpu_core/L1-dcache-prefetches/
<not supported> cpu_atom/L1-dcache-prefetches/
<not supported> cpu_core/L1-dcache-prefetch-misses/
<not supported> cpu_atom/L1-dcache-prefetch-misses/
1.005466919 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220721065706.2886112-5-zhengjun.xing@linux.intel.com
Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
433 lines
15 KiB
C
433 lines
15 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PERF_EVLIST_H
|
|
#define __PERF_EVLIST_H 1
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/refcount.h>
|
|
#include <linux/list.h>
|
|
#include <api/fd/array.h>
|
|
#include <internal/evlist.h>
|
|
#include <internal/evsel.h>
|
|
#include "events_stats.h"
|
|
#include "evsel.h"
|
|
#include <pthread.h>
|
|
#include <signal.h>
|
|
#include <unistd.h>
|
|
|
|
struct pollfd;
|
|
struct thread_map;
|
|
struct perf_cpu_map;
|
|
struct record_opts;
|
|
|
|
/*
|
|
* State machine of bkw_mmap_state:
|
|
*
|
|
* .________________(forbid)_____________.
|
|
* | V
|
|
* NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
|
|
* ^ ^ | ^ |
|
|
* | |__(forbid)____/ |___(forbid)___/|
|
|
* | |
|
|
* \_________________(3)_______________/
|
|
*
|
|
* NOTREADY : Backward ring buffers are not ready
|
|
* RUNNING : Backward ring buffers are recording
|
|
* DATA_PENDING : We are required to collect data from backward ring buffers
|
|
* EMPTY : We have collected data from backward ring buffers.
|
|
*
|
|
* (0): Setup backward ring buffer
|
|
* (1): Pause ring buffers for reading
|
|
* (2): Read from ring buffers
|
|
* (3): Resume ring buffers for recording
|
|
*/
|
|
enum bkw_mmap_state {
|
|
BKW_MMAP_NOTREADY,
|
|
BKW_MMAP_RUNNING,
|
|
BKW_MMAP_DATA_PENDING,
|
|
BKW_MMAP_EMPTY,
|
|
};
|
|
|
|
struct evlist {
|
|
struct perf_evlist core;
|
|
bool enabled;
|
|
int id_pos;
|
|
int is_pos;
|
|
u64 combined_sample_type;
|
|
enum bkw_mmap_state bkw_mmap_state;
|
|
struct {
|
|
int cork_fd;
|
|
pid_t pid;
|
|
} workload;
|
|
struct mmap *mmap;
|
|
struct mmap *overwrite_mmap;
|
|
struct evsel *selected;
|
|
struct events_stats stats;
|
|
struct perf_env *env;
|
|
const char *hybrid_pmu_name;
|
|
void (*trace_event_sample_raw)(struct evlist *evlist,
|
|
union perf_event *event,
|
|
struct perf_sample *sample);
|
|
u64 first_sample_time;
|
|
u64 last_sample_time;
|
|
struct {
|
|
pthread_t th;
|
|
volatile int done;
|
|
} thread;
|
|
struct {
|
|
int fd; /* control file descriptor */
|
|
int ack; /* ack file descriptor for control commands */
|
|
int pos; /* index at evlist core object to check signals */
|
|
} ctl_fd;
|
|
};
|
|
|
|
struct evsel_str_handler {
|
|
const char *name;
|
|
void *handler;
|
|
};
|
|
|
|
struct evlist *evlist__new(void);
|
|
struct evlist *evlist__new_default(void);
|
|
struct evlist *evlist__new_dummy(void);
|
|
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
|
|
struct perf_thread_map *threads);
|
|
void evlist__exit(struct evlist *evlist);
|
|
void evlist__delete(struct evlist *evlist);
|
|
|
|
void evlist__add(struct evlist *evlist, struct evsel *entry);
|
|
void evlist__remove(struct evlist *evlist, struct evsel *evsel);
|
|
|
|
int __evlist__add_default(struct evlist *evlist, bool precise);
|
|
|
|
static inline int evlist__add_default(struct evlist *evlist)
|
|
{
|
|
return __evlist__add_default(evlist, true);
|
|
}
|
|
|
|
int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs);
|
|
|
|
int __evlist__add_default_attrs(struct evlist *evlist,
|
|
struct perf_event_attr *attrs, size_t nr_attrs);
|
|
|
|
int arch_evlist__add_default_attrs(struct evlist *evlist,
|
|
struct perf_event_attr *attrs,
|
|
size_t nr_attrs);
|
|
|
|
#define evlist__add_default_attrs(evlist, array) \
|
|
arch_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
|
|
|
|
struct evsel *arch_evlist__leader(struct list_head *list);
|
|
|
|
int evlist__add_dummy(struct evlist *evlist);
|
|
struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
|
|
static inline struct evsel *evlist__add_dummy_on_all_cpus(struct evlist *evlist)
|
|
{
|
|
return evlist__add_aux_dummy(evlist, true);
|
|
}
|
|
|
|
int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
|
|
evsel__sb_cb_t cb, void *data);
|
|
void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data);
|
|
int evlist__start_sb_thread(struct evlist *evlist, struct target *target);
|
|
void evlist__stop_sb_thread(struct evlist *evlist);
|
|
|
|
int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler);
|
|
|
|
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
|
|
const struct evsel_str_handler *assocs,
|
|
size_t nr_assocs);
|
|
|
|
#define evlist__set_tracepoints_handlers(evlist, array) \
|
|
__evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
|
|
|
|
int evlist__set_tp_filter(struct evlist *evlist, const char *filter);
|
|
int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid);
|
|
int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
|
|
|
|
int evlist__append_tp_filter(struct evlist *evlist, const char *filter);
|
|
|
|
int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid);
|
|
int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
|
|
|
|
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id);
|
|
struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name);
|
|
|
|
int evlist__add_pollfd(struct evlist *evlist, int fd);
|
|
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
|
|
|
|
#ifdef HAVE_EVENTFD_SUPPORT
|
|
int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd);
|
|
#endif
|
|
|
|
int evlist__poll(struct evlist *evlist, int timeout);
|
|
|
|
struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id);
|
|
struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id);
|
|
|
|
struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id);
|
|
|
|
void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
|
|
|
|
void evlist__mmap_consume(struct evlist *evlist, int idx);
|
|
|
|
int evlist__open(struct evlist *evlist);
|
|
void evlist__close(struct evlist *evlist);
|
|
|
|
struct callchain_param;
|
|
|
|
void evlist__set_id_pos(struct evlist *evlist);
|
|
void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain);
|
|
int record_opts__config(struct record_opts *opts);
|
|
|
|
int evlist__prepare_workload(struct evlist *evlist, struct target *target,
|
|
const char *argv[], bool pipe_output,
|
|
void (*exec_error)(int signo, siginfo_t *info, void *ucontext));
|
|
int evlist__start_workload(struct evlist *evlist);
|
|
|
|
struct option;
|
|
|
|
int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
|
|
int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset);
|
|
|
|
unsigned long perf_event_mlock_kb_in_pages(void);
|
|
|
|
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
|
unsigned int auxtrace_pages,
|
|
bool auxtrace_overwrite, int nr_cblocks,
|
|
int affinity, int flush, int comp_level);
|
|
int evlist__mmap(struct evlist *evlist, unsigned int pages);
|
|
void evlist__munmap(struct evlist *evlist);
|
|
|
|
size_t evlist__mmap_size(unsigned long pages);
|
|
|
|
void evlist__disable(struct evlist *evlist);
|
|
void evlist__enable(struct evlist *evlist);
|
|
void evlist__toggle_enable(struct evlist *evlist);
|
|
void evlist__disable_evsel(struct evlist *evlist, char *evsel_name);
|
|
void evlist__enable_evsel(struct evlist *evlist, char *evsel_name);
|
|
|
|
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
|
|
|
|
int evlist__create_maps(struct evlist *evlist, struct target *target);
|
|
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
|
|
|
|
void evlist__set_leader(struct evlist *evlist);
|
|
|
|
u64 __evlist__combined_sample_type(struct evlist *evlist);
|
|
u64 evlist__combined_sample_type(struct evlist *evlist);
|
|
u64 evlist__combined_branch_type(struct evlist *evlist);
|
|
bool evlist__sample_id_all(struct evlist *evlist);
|
|
u16 evlist__id_hdr_size(struct evlist *evlist);
|
|
|
|
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample);
|
|
int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp);
|
|
|
|
bool evlist__valid_sample_type(struct evlist *evlist);
|
|
bool evlist__valid_sample_id_all(struct evlist *evlist);
|
|
bool evlist__valid_read_format(struct evlist *evlist);
|
|
|
|
void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list);
|
|
|
|
static inline bool evlist__empty(struct evlist *evlist)
|
|
{
|
|
return list_empty(&evlist->core.entries);
|
|
}
|
|
|
|
static inline struct evsel *evlist__first(struct evlist *evlist)
|
|
{
|
|
struct perf_evsel *evsel = perf_evlist__first(&evlist->core);
|
|
|
|
return container_of(evsel, struct evsel, core);
|
|
}
|
|
|
|
static inline struct evsel *evlist__last(struct evlist *evlist)
|
|
{
|
|
struct perf_evsel *evsel = perf_evlist__last(&evlist->core);
|
|
|
|
return container_of(evsel, struct evsel, core);
|
|
}
|
|
|
|
int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
|
|
int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
|
|
|
|
bool evlist__can_select_event(struct evlist *evlist, const char *str);
|
|
void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel);
|
|
|
|
/**
|
|
* __evlist__for_each_entry - iterate thru all the evsels
|
|
* @list: list_head instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define __evlist__for_each_entry(list, evsel) \
|
|
list_for_each_entry(evsel, list, core.node)
|
|
|
|
/**
|
|
* evlist__for_each_entry - iterate thru all the evsels
|
|
* @evlist: evlist instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define evlist__for_each_entry(evlist, evsel) \
|
|
__evlist__for_each_entry(&(evlist)->core.entries, evsel)
|
|
|
|
/**
|
|
* __evlist__for_each_entry_continue - continue iteration thru all the evsels
|
|
* @list: list_head instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define __evlist__for_each_entry_continue(list, evsel) \
|
|
list_for_each_entry_continue(evsel, list, core.node)
|
|
|
|
/**
|
|
* evlist__for_each_entry_continue - continue iteration thru all the evsels
|
|
* @evlist: evlist instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define evlist__for_each_entry_continue(evlist, evsel) \
|
|
__evlist__for_each_entry_continue(&(evlist)->core.entries, evsel)
|
|
|
|
/**
|
|
* __evlist__for_each_entry_from - continue iteration from @evsel (included)
|
|
* @list: list_head instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define __evlist__for_each_entry_from(list, evsel) \
|
|
list_for_each_entry_from(evsel, list, core.node)
|
|
|
|
/**
|
|
* evlist__for_each_entry_from - continue iteration from @evsel (included)
|
|
* @evlist: evlist instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define evlist__for_each_entry_from(evlist, evsel) \
|
|
__evlist__for_each_entry_from(&(evlist)->core.entries, evsel)
|
|
|
|
/**
|
|
* __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
|
|
* @list: list_head instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define __evlist__for_each_entry_reverse(list, evsel) \
|
|
list_for_each_entry_reverse(evsel, list, core.node)
|
|
|
|
/**
|
|
* evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
|
|
* @evlist: evlist instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define evlist__for_each_entry_reverse(evlist, evsel) \
|
|
__evlist__for_each_entry_reverse(&(evlist)->core.entries, evsel)
|
|
|
|
/**
|
|
* __evlist__for_each_entry_safe - safely iterate thru all the evsels
|
|
* @list: list_head instance to iterate
|
|
* @tmp: struct evsel temp iterator
|
|
* @evsel: struct evsel iterator
|
|
*/
|
|
#define __evlist__for_each_entry_safe(list, tmp, evsel) \
|
|
list_for_each_entry_safe(evsel, tmp, list, core.node)
|
|
|
|
/**
|
|
* evlist__for_each_entry_safe - safely iterate thru all the evsels
|
|
* @evlist: evlist instance to iterate
|
|
* @evsel: struct evsel iterator
|
|
* @tmp: struct evsel temp iterator
|
|
*/
|
|
#define evlist__for_each_entry_safe(evlist, tmp, evsel) \
|
|
__evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel)
|
|
|
|
/** Iterator state for evlist__for_each_cpu */
|
|
struct evlist_cpu_iterator {
|
|
/** The list being iterated through. */
|
|
struct evlist *container;
|
|
/** The current evsel of the iterator. */
|
|
struct evsel *evsel;
|
|
/** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */
|
|
int cpu_map_idx;
|
|
/**
|
|
* The CPU map index corresponding to evlist->core.all_cpus for the
|
|
* current CPU. Distinct from cpu_map_idx as the evsel's cpu map may
|
|
* contain fewer entries.
|
|
*/
|
|
int evlist_cpu_map_idx;
|
|
/** The number of CPU map entries in evlist->core.all_cpus. */
|
|
int evlist_cpu_map_nr;
|
|
/** The current CPU of the iterator. */
|
|
struct perf_cpu cpu;
|
|
/** If present, used to set the affinity when switching between CPUs. */
|
|
struct affinity *affinity;
|
|
};
|
|
|
|
/**
|
|
* evlist__for_each_cpu - without affinity, iterate over the evlist. With
|
|
* affinity, iterate over all CPUs and then the evlist
|
|
* for each evsel on that CPU. When switching between
|
|
* CPUs the affinity is set to the CPU to avoid IPIs
|
|
* during syscalls.
|
|
* @evlist_cpu_itr: the iterator instance.
|
|
* @evlist: evlist instance to iterate.
|
|
* @affinity: NULL or used to set the affinity to the current CPU.
|
|
*/
|
|
#define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \
|
|
for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \
|
|
!evlist_cpu_iterator__end(&evlist_cpu_itr); \
|
|
evlist_cpu_iterator__next(&evlist_cpu_itr))
|
|
|
|
/** Returns an iterator set to the first CPU/evsel of evlist. */
|
|
struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
|
|
/** Move to next element in iterator, updating CPU, evsel and the affinity. */
|
|
void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr);
|
|
/** Returns true when iterator is at the end of the CPUs and evlist. */
|
|
bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
|
|
|
|
struct evsel *evlist__get_tracking_event(struct evlist *evlist);
|
|
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
|
|
|
|
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
|
|
|
|
struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event);
|
|
|
|
bool evlist__exclude_kernel(struct evlist *evlist);
|
|
|
|
void evlist__force_leader(struct evlist *evlist);
|
|
|
|
struct evsel *evlist__reset_weak_group(struct evlist *evlist, struct evsel *evsel, bool close);
|
|
|
|
#define EVLIST_CTL_CMD_ENABLE_TAG "enable"
|
|
#define EVLIST_CTL_CMD_DISABLE_TAG "disable"
|
|
#define EVLIST_CTL_CMD_ACK_TAG "ack\n"
|
|
#define EVLIST_CTL_CMD_SNAPSHOT_TAG "snapshot"
|
|
#define EVLIST_CTL_CMD_EVLIST_TAG "evlist"
|
|
#define EVLIST_CTL_CMD_STOP_TAG "stop"
|
|
#define EVLIST_CTL_CMD_PING_TAG "ping"
|
|
|
|
#define EVLIST_CTL_CMD_MAX_LEN 64
|
|
|
|
enum evlist_ctl_cmd {
|
|
EVLIST_CTL_CMD_UNSUPPORTED = 0,
|
|
EVLIST_CTL_CMD_ENABLE,
|
|
EVLIST_CTL_CMD_DISABLE,
|
|
EVLIST_CTL_CMD_ACK,
|
|
EVLIST_CTL_CMD_SNAPSHOT,
|
|
EVLIST_CTL_CMD_EVLIST,
|
|
EVLIST_CTL_CMD_STOP,
|
|
EVLIST_CTL_CMD_PING,
|
|
};
|
|
|
|
int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close);
|
|
void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close);
|
|
int evlist__initialize_ctlfd(struct evlist *evlist, int ctl_fd, int ctl_fd_ack);
|
|
int evlist__finalize_ctlfd(struct evlist *evlist);
|
|
bool evlist__ctlfd_initialized(struct evlist *evlist);
|
|
int evlist__ctlfd_update(struct evlist *evlist, struct pollfd *update);
|
|
int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd);
|
|
int evlist__ctlfd_ack(struct evlist *evlist);
|
|
|
|
#define EVLIST_ENABLED_MSG "Events enabled\n"
|
|
#define EVLIST_DISABLED_MSG "Events disabled\n"
|
|
|
|
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
|
|
|
|
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
|
|
void evlist__check_mem_load_aux(struct evlist *evlist);
|
|
#endif /* __PERF_EVLIST_H */
|