2011-01-04 02:39:04 +08:00
|
|
|
#ifndef __PERF_EVSEL_H
|
|
|
|
#define __PERF_EVSEL_H 1
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
2011-01-04 03:45:52 +08:00
|
|
|
#include <stdbool.h>
|
2012-11-14 04:27:28 +08:00
|
|
|
#include <stddef.h>
|
2012-11-20 06:21:03 +08:00
|
|
|
#include <linux/perf_event.h>
|
2014-04-26 03:31:02 +08:00
|
|
|
#include <linux/types.h>
|
2011-01-04 02:39:04 +08:00
|
|
|
#include "xyarray.h"
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
#include "cgroup.h"
|
2011-03-06 08:40:06 +08:00
|
|
|
#include "hist.h"
|
2013-03-05 13:53:26 +08:00
|
|
|
#include "symbol.h"
|
2014-04-26 03:31:02 +08:00
|
|
|
|
2011-01-04 03:45:52 +08:00
|
|
|
struct perf_counts_values {
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u64 val;
|
|
|
|
u64 ena;
|
|
|
|
u64 run;
|
|
|
|
};
|
|
|
|
u64 values[3];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct perf_counts {
|
|
|
|
s8 scaled;
|
|
|
|
struct perf_counts_values aggr;
|
|
|
|
struct perf_counts_values cpu[];
|
|
|
|
};
|
2011-01-04 02:39:04 +08:00
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
struct perf_evsel;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
|
|
|
|
* more than one entry in the evlist.
|
|
|
|
*/
|
|
|
|
struct perf_sample_id {
|
|
|
|
struct hlist_node node;
|
|
|
|
u64 id;
|
|
|
|
struct perf_evsel *evsel;
|
2012-10-11 00:52:24 +08:00
|
|
|
|
|
|
|
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
|
|
|
|
u64 period;
|
2011-01-13 08:39:13 +08:00
|
|
|
};
|
|
|
|
|
2011-02-16 21:10:01 +08:00
|
|
|
/** struct perf_evsel - event selector
|
|
|
|
*
|
|
|
|
* @name - Can be set to retain the original event name passed by the user,
|
|
|
|
* so that when showing results in tools such as 'perf stat', we
|
|
|
|
* show the name used, not some alias.
|
2013-08-27 16:23:09 +08:00
|
|
|
* @id_pos: the position of the event id (PERF_SAMPLE_ID or
|
|
|
|
* PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
|
|
|
|
* struct sample_event
|
|
|
|
* @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
|
|
|
|
* PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
|
|
|
|
* is used there is an id sample appended to non-sample events
|
2011-02-16 21:10:01 +08:00
|
|
|
*/
|
2011-01-04 02:39:04 +08:00
|
|
|
struct perf_evsel {
|
|
|
|
struct list_head node;
|
|
|
|
struct perf_event_attr attr;
|
|
|
|
char *filter;
|
|
|
|
struct xyarray *fd;
|
2011-03-10 22:15:54 +08:00
|
|
|
struct xyarray *sample_id;
|
|
|
|
u64 *id;
|
2011-01-04 03:45:52 +08:00
|
|
|
struct perf_counts *counts;
|
2013-01-29 19:47:43 +08:00
|
|
|
struct perf_counts *prev_raw_counts;
|
2011-01-04 02:39:04 +08:00
|
|
|
int idx;
|
2012-08-17 03:10:17 +08:00
|
|
|
u32 ids;
|
2011-03-06 08:40:06 +08:00
|
|
|
struct hists hists;
|
2011-02-16 21:10:01 +08:00
|
|
|
char *name;
|
2013-11-13 00:58:49 +08:00
|
|
|
double scale;
|
|
|
|
const char *unit;
|
2012-08-07 20:58:03 +08:00
|
|
|
struct event_format *tp_format;
|
2011-03-10 22:15:54 +08:00
|
|
|
union {
|
|
|
|
void *priv;
|
|
|
|
off_t id_offset;
|
|
|
|
};
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
struct cgroup_sel *cgrp;
|
2013-11-06 21:17:38 +08:00
|
|
|
void *handler;
|
2012-09-10 15:53:50 +08:00
|
|
|
struct cpu_map *cpus;
|
2012-08-02 05:53:11 +08:00
|
|
|
unsigned int sample_size;
|
2013-08-27 16:23:09 +08:00
|
|
|
int id_pos;
|
|
|
|
int is_pos;
|
2011-05-30 22:55:59 +08:00
|
|
|
bool supported;
|
2012-09-26 23:48:18 +08:00
|
|
|
bool needs_swap;
|
2014-07-14 18:02:56 +08:00
|
|
|
bool no_aux_samples;
|
2014-07-14 18:02:57 +08:00
|
|
|
bool immediate;
|
2014-07-31 14:00:51 +08:00
|
|
|
bool system_wide;
|
2012-08-08 18:21:54 +08:00
|
|
|
/* parse modifier helper */
|
|
|
|
int exclude_GH;
|
2013-01-22 17:09:29 +08:00
|
|
|
int nr_members;
|
2012-10-10 23:39:03 +08:00
|
|
|
int sample_read;
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct perf_evsel *leader;
|
|
|
|
char *group_name;
|
2011-01-04 02:39:04 +08:00
|
|
|
};
|
|
|
|
|
2014-04-14 23:38:39 +08:00
|
|
|
union u64_swap {
|
|
|
|
u64 val64;
|
|
|
|
u32 val32[2];
|
|
|
|
};
|
|
|
|
|
2013-01-22 17:09:33 +08:00
|
|
|
#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
|
|
|
|
|
2011-01-04 09:09:46 +08:00
|
|
|
struct cpu_map;
|
|
|
|
struct thread_map;
|
2011-01-13 03:03:24 +08:00
|
|
|
struct perf_evlist;
|
2013-12-20 01:43:45 +08:00
|
|
|
struct record_opts;
|
2011-01-04 09:09:46 +08:00
|
|
|
|
2013-11-08 03:41:19 +08:00
|
|
|
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
|
|
|
|
|
|
|
|
static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
|
|
|
|
{
|
|
|
|
return perf_evsel__new_idx(attr, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
|
|
|
|
|
|
|
|
static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
|
|
|
|
{
|
|
|
|
return perf_evsel__newtp_idx(sys, name, 0);
|
|
|
|
}
|
2012-09-27 07:24:19 +08:00
|
|
|
|
|
|
|
struct event_format *event_format__new(const char *sys, const char *name);
|
|
|
|
|
2011-01-19 07:41:45 +08:00
|
|
|
void perf_evsel__init(struct perf_evsel *evsel,
|
|
|
|
struct perf_event_attr *attr, int idx);
|
|
|
|
void perf_evsel__exit(struct perf_evsel *evsel);
|
2011-01-04 02:39:04 +08:00
|
|
|
void perf_evsel__delete(struct perf_evsel *evsel);
|
|
|
|
|
2011-11-09 00:41:57 +08:00
|
|
|
void perf_evsel__config(struct perf_evsel *evsel,
|
2013-12-20 01:43:45 +08:00
|
|
|
struct record_opts *opts);
|
2011-11-09 00:41:57 +08:00
|
|
|
|
2013-08-27 16:23:09 +08:00
|
|
|
int __perf_evsel__sample_size(u64 sample_type);
|
|
|
|
void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
|
|
|
|
|
2012-06-12 01:08:07 +08:00
|
|
|
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
|
|
|
|
|
|
|
|
#define PERF_EVSEL__MAX_ALIASES 8
|
|
|
|
|
|
|
|
extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES];
|
|
|
|
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES];
|
2012-09-07 00:11:18 +08:00
|
|
|
extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES];
|
|
|
|
extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
|
|
|
|
extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
|
2012-06-12 01:08:07 +08:00
|
|
|
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
|
|
|
|
char *bf, size_t size);
|
2012-06-12 23:34:58 +08:00
|
|
|
const char *perf_evsel__name(struct perf_evsel *evsel);
|
2013-11-13 00:58:49 +08:00
|
|
|
|
2013-01-22 17:09:44 +08:00
|
|
|
const char *perf_evsel__group_name(struct perf_evsel *evsel);
|
|
|
|
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
|
2012-05-26 03:38:11 +08:00
|
|
|
|
2011-01-04 02:39:04 +08:00
|
|
|
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-13 08:39:13 +08:00
|
|
|
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-04 03:45:52 +08:00
|
|
|
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
|
2013-03-02 02:02:27 +08:00
|
|
|
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
|
2011-01-04 02:39:04 +08:00
|
|
|
void perf_evsel__free_fd(struct perf_evsel *evsel);
|
2011-01-13 08:39:13 +08:00
|
|
|
void perf_evsel__free_id(struct perf_evsel *evsel);
|
2013-01-25 09:44:44 +08:00
|
|
|
void perf_evsel__free_counts(struct perf_evsel *evsel);
|
2011-01-04 03:45:52 +08:00
|
|
|
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-04 02:39:04 +08:00
|
|
|
|
2012-12-11 01:53:43 +08:00
|
|
|
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
|
|
|
|
enum perf_event_sample_format bit);
|
|
|
|
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
|
|
|
|
enum perf_event_sample_format bit);
|
|
|
|
|
|
|
|
#define perf_evsel__set_sample_bit(evsel, bit) \
|
|
|
|
__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
|
|
|
|
|
|
|
|
#define perf_evsel__reset_sample_bit(evsel, bit) \
|
|
|
|
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
|
|
|
|
|
2013-08-27 16:23:09 +08:00
|
|
|
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
|
|
|
|
bool use_sample_identifier);
|
2012-12-11 02:21:30 +08:00
|
|
|
|
2012-09-27 02:07:39 +08:00
|
|
|
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
|
|
|
|
const char *filter);
|
2013-08-03 08:41:10 +08:00
|
|
|
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2012-09-27 02:07:39 +08:00
|
|
|
|
2011-01-12 09:42:19 +08:00
|
|
|
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct cpu_map *cpus);
|
2011-01-12 09:42:19 +08:00
|
|
|
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct thread_map *threads);
|
2011-01-12 09:42:19 +08:00
|
|
|
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct thread_map *threads);
|
2011-10-25 20:42:19 +08:00
|
|
|
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-04 03:48:12 +08:00
|
|
|
|
2012-09-12 06:24:23 +08:00
|
|
|
struct perf_sample;
|
|
|
|
|
2012-09-27 07:22:00 +08:00
|
|
|
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
|
2012-09-12 06:24:23 +08:00
|
|
|
const char *name);
|
|
|
|
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
|
const char *name);
|
|
|
|
|
2012-09-27 07:22:00 +08:00
|
|
|
static inline char *perf_evsel__strval(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
return perf_evsel__rawptr(evsel, sample, name);
|
|
|
|
}
|
|
|
|
|
2012-09-18 22:21:50 +08:00
|
|
|
struct format_field;
|
|
|
|
|
|
|
|
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
|
|
|
|
|
2011-01-04 02:49:44 +08:00
|
|
|
#define perf_evsel__match(evsel, t, c) \
|
|
|
|
(evsel->attr.type == PERF_TYPE_##t && \
|
|
|
|
evsel->attr.config == PERF_COUNT_##c)
|
|
|
|
|
2012-09-06 23:46:55 +08:00
|
|
|
static inline bool perf_evsel__match2(struct perf_evsel *e1,
|
|
|
|
struct perf_evsel *e2)
|
|
|
|
{
|
|
|
|
return (e1->attr.type == e2->attr.type) &&
|
|
|
|
(e1->attr.config == e2->attr.config);
|
|
|
|
}
|
|
|
|
|
2013-08-22 07:47:26 +08:00
|
|
|
#define perf_evsel__cmp(a, b) \
|
|
|
|
((a) && \
|
|
|
|
(b) && \
|
|
|
|
(a)->attr.type == (b)->attr.type && \
|
|
|
|
(a)->attr.config == (b)->attr.config)
|
|
|
|
|
2011-01-04 03:45:52 +08:00
|
|
|
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread, bool scale);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* perf_evsel__read_on_cpu - Read out the results on a CPU and thread
|
|
|
|
*
|
|
|
|
* @evsel - event selector to read value
|
|
|
|
* @cpu - CPU of interest
|
|
|
|
* @thread - thread of interest
|
|
|
|
*/
|
|
|
|
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread)
|
|
|
|
{
|
|
|
|
return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
|
|
|
|
*
|
|
|
|
* @evsel - event selector to read value
|
|
|
|
* @cpu - CPU of interest
|
|
|
|
* @thread - thread of interest
|
|
|
|
*/
|
|
|
|
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread)
|
|
|
|
{
|
|
|
|
return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
|
|
|
|
bool scale);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* perf_evsel__read - Read the aggregate results on all CPUs
|
|
|
|
*
|
|
|
|
* @evsel - event selector to read value
|
|
|
|
* @ncpus - Number of cpus affected, from zero
|
|
|
|
* @nthreads - Number of threads affected, from zero
|
|
|
|
*/
|
|
|
|
static inline int perf_evsel__read(struct perf_evsel *evsel,
|
|
|
|
int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
return __perf_evsel__read(evsel, ncpus, nthreads, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
|
|
|
|
*
|
|
|
|
* @evsel - event selector to read value
|
|
|
|
* @ncpus - Number of cpus affected, from zero
|
|
|
|
* @nthreads - Number of threads affected, from zero
|
|
|
|
*/
|
|
|
|
static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
|
|
|
|
int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
return __perf_evsel__read(evsel, ncpus, nthreads, true);
|
|
|
|
}
|
|
|
|
|
2012-03-22 21:37:26 +08:00
|
|
|
void hists__init(struct hists *hists);
|
|
|
|
|
2012-08-02 23:23:46 +08:00
|
|
|
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
|
2012-09-26 23:48:18 +08:00
|
|
|
struct perf_sample *sample);
|
2012-08-15 03:42:15 +08:00
|
|
|
|
|
|
|
static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return list_entry(evsel->node.next, struct perf_evsel, node);
|
|
|
|
}
|
2012-11-14 04:27:28 +08:00
|
|
|
|
2013-11-14 02:56:40 +08:00
|
|
|
static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return list_entry(evsel->node.prev, struct perf_evsel, node);
|
|
|
|
}
|
|
|
|
|
2013-03-05 13:53:26 +08:00
|
|
|
/**
|
|
|
|
* perf_evsel__is_group_leader - Return whether given evsel is a leader event
|
|
|
|
*
|
|
|
|
* @evsel - evsel selector to be tested
|
|
|
|
*
|
|
|
|
* Return %true if @evsel is a group leader or a stand-alone event
|
|
|
|
*/
|
2012-11-29 14:38:30 +08:00
|
|
|
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
|
2012-11-14 04:27:28 +08:00
|
|
|
{
|
2012-11-29 14:38:30 +08:00
|
|
|
return evsel->leader == evsel;
|
2012-11-14 04:27:28 +08:00
|
|
|
}
|
2012-12-11 05:17:08 +08:00
|
|
|
|
2013-03-05 13:53:26 +08:00
|
|
|
/**
|
|
|
|
* perf_evsel__is_group_event - Return whether given evsel is a group event
|
|
|
|
*
|
|
|
|
* @evsel - evsel selector to be tested
|
|
|
|
*
|
|
|
|
* Return %true iff event group view is enabled and @evsel is a actual group
|
|
|
|
* leader which has other members in the group
|
|
|
|
*/
|
|
|
|
static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
if (!symbol_conf.event_group)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
|
|
|
|
}
|
|
|
|
|
2014-03-02 23:56:40 +08:00
|
|
|
/**
|
|
|
|
* perf_evsel__is_function_event - Return whether given evsel is a function
|
|
|
|
* trace event
|
|
|
|
*
|
|
|
|
* @evsel - evsel selector to be tested
|
|
|
|
*
|
|
|
|
* Return %true if event is function trace event
|
|
|
|
*/
|
|
|
|
static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
#define FUNCTION_EVENT "ftrace:function"
|
|
|
|
|
|
|
|
return evsel->name &&
|
|
|
|
!strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
|
|
|
|
|
|
|
|
#undef FUNCTION_EVENT
|
|
|
|
}
|
|
|
|
|
2012-12-11 05:17:08 +08:00
|
|
|
struct perf_attr_details {
|
|
|
|
bool freq;
|
|
|
|
bool verbose;
|
2013-02-07 04:20:02 +08:00
|
|
|
bool event_group;
|
2012-12-11 05:17:08 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
int perf_evsel__fprintf(struct perf_evsel *evsel,
|
|
|
|
struct perf_attr_details *details, FILE *fp);
|
2012-12-14 01:16:30 +08:00
|
|
|
|
|
|
|
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
|
|
|
|
char *msg, size_t msgsize);
|
2013-11-13 03:46:16 +08:00
|
|
|
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
|
2012-12-14 02:10:58 +08:00
|
|
|
int err, char *msg, size_t size);
|
2013-01-22 17:09:29 +08:00
|
|
|
|
|
|
|
static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return evsel->idx - evsel->leader->idx;
|
|
|
|
}
|
2013-01-22 17:09:44 +08:00
|
|
|
|
|
|
|
#define for_each_group_member(_evsel, _leader) \
|
|
|
|
for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
|
|
|
|
(_evsel) && (_evsel)->leader == (_leader); \
|
|
|
|
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
|
|
|
|
|
2011-01-04 02:39:04 +08:00
|
|
|
#endif /* __PERF_EVSEL_H */
|