2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-09 14:14:00 +08:00
linux-next/tools/perf/util/event.c
Arnaldo Carvalho de Melo 401b8e1317 perf tools: Fix thread_map event synthesizing in top and record
Jeff Moyer reported these messages:

  Warning:  ... trying to fall back to cpu-clock-ticks

couldn't open /proc/-1/status
couldn't open /proc/-1/maps
[ls output]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.008 MB perf.data (~363 samples) ]

That lead me and David Ahern to see that something was fishy on the thread
synthesizing routines, at least for the case where the workload is started
from 'perf record', as -1 is the default for target_tid in 'perf record --tid'
parameter, so somehow we were trying to synthesize the PERF_RECORD_MMAP and
PERF_RECORD_COMM events for the thread -1, a bug.

So I investigated this and noticed that when we introduced support for
recording a process and its threads using --pid some bugs were introduced and
that the way to fix it was to instead of passing the target_tid to the event
synthesizing routines we should better pass the thread_map that has the list of
threads for a --pid or just the single thread for a --tid.

Checked in the following ways:

On a 8-way machine run cyclictest:

[root@emilia ~]# perf record cyclictest -a -t -n -p99 -i100 -d50
policy: fifo: loadavg: 0.00 0.13 0.31 2/139 28798

T: 0 (28791) P:99 I:100 C:  25072 Min:      4 Act:    5 Avg:    6 Max:     122
T: 1 (28792) P:98 I:150 C:  16715 Min:      4 Act:    6 Avg:    5 Max:      27
T: 2 (28793) P:97 I:200 C:  12534 Min:      4 Act:    5 Avg:    4 Max:       8
T: 3 (28794) P:96 I:250 C:  10028 Min:      4 Act:    5 Avg:    5 Max:      96
T: 4 (28795) P:95 I:300 C:   8357 Min:      5 Act:    6 Avg:    5 Max:      12
T: 5 (28796) P:94 I:350 C:   7163 Min:      5 Act:    6 Avg:    5 Max:      12
T: 6 (28797) P:93 I:400 C:   6267 Min:      4 Act:    5 Avg:    5 Max:       9
T: 7 (28798) P:92 I:450 C:   5571 Min:      4 Act:    5 Avg:    5 Max:       9
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.108 MB perf.data (~4719 samples) ]

[root@emilia ~]#

This will create one extra thread per CPU:

[root@emilia ~]# tuna -t cyclictest -CP
                      thread       ctxt_switches
    pid SCHED_ rtpri affinity voluntary nonvoluntary             cmd
 28825   OTHER     0     0xff      2169          671      cyclictest
  28832   FIFO    93        6     52338            1      cyclictest
  28833   FIFO    92        7     46524            1      cyclictest
  28826   FIFO    99        0    209360            1      cyclictest
  28827   FIFO    98        1    139577            1      cyclictest
  28828   FIFO    97        2    104686            0      cyclictest
  28829   FIFO    96        3     83751            1      cyclictest
  28830   FIFO    95        4     69794            1      cyclictest
  28831   FIFO    94        5     59825            1      cyclictest
[root@emilia ~]#

So we should expect only samples for the above 9 threads when using the
--dump-raw-trace|-D perf report switch to look at the column with the tid:

[root@emilia ~]# perf report -D | grep RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort | uniq -c
    629 28825
    110 28826
    491 28827
    308 28828
    198 28829
    621 28830
    225 28831
    203 28832
     89 28833
[root@emilia ~]#

So for workloads started by 'perf record' seems to work, now for existing workloads,
just run cyclictest first, without 'perf record':

[root@emilia ~]# tuna -t cyclictest -CP
                      thread       ctxt_switches
    pid SCHED_ rtpri affinity voluntary nonvoluntary             cmd
 28859   OTHER     0     0xff       594          200      cyclictest
  28864   FIFO    95        4     16587            1      cyclictest
  28865   FIFO    94        5     14219            1      cyclictest
  28866   FIFO    93        6     12443            0      cyclictest
  28867   FIFO    92        7     11062            1      cyclictest
  28860   FIFO    99        0     49779            1      cyclictest
  28861   FIFO    98        1     33190            1      cyclictest
  28862   FIFO    97        2     24895            1      cyclictest
  28863   FIFO    96        3     19918            1      cyclictest
[root@emilia ~]#

and then later did:

[root@emilia ~]# perf record --pid 28859 sleep 3
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.027 MB perf.data (~1195 samples) ]
[root@emilia ~]#

To collect 3 seconds worth of samples for pid 28859 and its children:

[root@emilia ~]# perf report -D | grep RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort | uniq -c
     15 28859
     33 28860
     19 28861
     13 28862
     13 28863
     10 28864
     11 28865
      9 28866
    255 28867
[root@emilia ~]#

Works, last thing is to check if looking at just one of those threads also works:

[root@emilia ~]# perf record --tid 28866 sleep 3
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.006 MB perf.data (~242 samples) ]
[root@emilia ~]# perf report -D | grep RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort | uniq -c
      3 28866
[root@emilia ~]#

Works too.

Reported-by: Jeff Moyer <jmoyer@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-02-10 12:52:47 -02:00

962 lines
24 KiB
C

#include <linux/types.h>
#include "event.h"
#include "debug.h"
#include "session.h"
#include "sort.h"
#include "string.h"
#include "strlist.h"
#include "thread.h"
static const char *event__name[] = {
[0] = "TOTAL",
[PERF_RECORD_MMAP] = "MMAP",
[PERF_RECORD_LOST] = "LOST",
[PERF_RECORD_COMM] = "COMM",
[PERF_RECORD_EXIT] = "EXIT",
[PERF_RECORD_THROTTLE] = "THROTTLE",
[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
[PERF_RECORD_FORK] = "FORK",
[PERF_RECORD_READ] = "READ",
[PERF_RECORD_SAMPLE] = "SAMPLE",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
[PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
[PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
};
const char *event__get_event_name(unsigned int id)
{
if (id >= ARRAY_SIZE(event__name))
return "INVALID";
if (!event__name[id])
return "UNKNOWN";
return event__name[id];
}
static struct sample_data synth_sample = {
.pid = -1,
.tid = -1,
.time = -1,
.stream_id = -1,
.cpu = -1,
.period = 1,
};
static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
event__handler_t process,
struct perf_session *session)
{
char filename[PATH_MAX];
char bf[BUFSIZ];
FILE *fp;
size_t size = 0;
DIR *tasks;
struct dirent dirent, *next;
pid_t tgid = 0;
snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
fp = fopen(filename, "r");
if (fp == NULL) {
out_race:
/*
* We raced with a task exiting - just return:
*/
pr_debug("couldn't open %s\n", filename);
return 0;
}
memset(&event->comm, 0, sizeof(event->comm));
while (!event->comm.comm[0] || !event->comm.pid) {
if (fgets(bf, sizeof(bf), fp) == NULL) {
pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
goto out;
}
if (memcmp(bf, "Name:", 5) == 0) {
char *name = bf + 5;
while (*name && isspace(*name))
++name;
size = strlen(name) - 1;
memcpy(event->comm.comm, name, size++);
} else if (memcmp(bf, "Tgid:", 5) == 0) {
char *tgids = bf + 5;
while (*tgids && isspace(*tgids))
++tgids;
tgid = event->comm.pid = atoi(tgids);
}
}
event->comm.header.type = PERF_RECORD_COMM;
size = ALIGN(size, sizeof(u64));
memset(event->comm.comm + size, 0, session->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
session->id_hdr_size);
if (!full) {
event->comm.tid = pid;
process(event, &synth_sample, session);
goto out;
}
snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
tasks = opendir(filename);
if (tasks == NULL)
goto out_race;
while (!readdir_r(tasks, &dirent, &next) && next) {
char *end;
pid = strtol(dirent.d_name, &end, 10);
if (*end)
continue;
event->comm.tid = pid;
process(event, &synth_sample, session);
}
closedir(tasks);
out:
fclose(fp);
return tgid;
}
static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
event__handler_t process,
struct perf_session *session)
{
char filename[PATH_MAX];
FILE *fp;
snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
fp = fopen(filename, "r");
if (fp == NULL) {
/*
* We raced with a task exiting - just return:
*/
pr_debug("couldn't open %s\n", filename);
return -1;
}
event->header.type = PERF_RECORD_MMAP;
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
event->header.misc = PERF_RECORD_MISC_USER;
while (1) {
char bf[BUFSIZ], *pbf = bf;
int n;
size_t size;
if (fgets(bf, sizeof(bf), fp) == NULL)
break;
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
n = hex2u64(pbf, &event->mmap.start);
if (n < 0)
continue;
pbf += n + 1;
n = hex2u64(pbf, &event->mmap.len);
if (n < 0)
continue;
pbf += n + 3;
if (*pbf == 'x') { /* vm_exec */
char *execname = strchr(bf, '/');
/* Catch VDSO */
if (execname == NULL)
execname = strstr(bf, "[vdso]");
if (execname == NULL)
continue;
pbf += 3;
n = hex2u64(pbf, &event->mmap.pgoff);
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(event->mmap.filename, execname, size);
size = ALIGN(size, sizeof(u64));
event->mmap.len -= event->mmap.start;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
memset(event->mmap.filename + size, 0, session->id_hdr_size);
event->mmap.header.size += session->id_hdr_size;
event->mmap.pid = tgid;
event->mmap.tid = pid;
process(event, &synth_sample, session);
}
}
fclose(fp);
return 0;
}
int event__synthesize_modules(event__handler_t process,
struct perf_session *session,
struct machine *machine)
{
struct rb_node *nd;
struct map_groups *kmaps = &machine->kmaps;
event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
}
event->header.type = PERF_RECORD_MMAP;
/*
* kernel uses 0 for user space maps, see kernel/perf_event.c
* __perf_event_mmap
*/
if (machine__is_host(machine))
event->header.misc = PERF_RECORD_MISC_KERNEL;
else
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
nd; nd = rb_next(nd)) {
size_t size;
struct map *pos = rb_entry(nd, struct map, rb_node);
if (pos->dso->kernel)
continue;
size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
memset(event->mmap.filename + size, 0, session->id_hdr_size);
event->mmap.header.size += session->id_hdr_size;
event->mmap.start = pos->start;
event->mmap.len = pos->end - pos->start;
event->mmap.pid = machine->pid;
memcpy(event->mmap.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
process(event, &synth_sample, session);
}
free(event);
return 0;
}
static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
pid_t pid, event__handler_t process,
struct perf_session *session)
{
pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
session);
if (tgid == -1)
return -1;
return event__synthesize_mmap_events(mmap_event, pid, tgid,
process, session);
}
int event__synthesize_thread_map(struct thread_map *threads,
event__handler_t process,
struct perf_session *session)
{
event_t *comm_event, *mmap_event;
int err = -1, thread;
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
if (comm_event == NULL)
goto out;
mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
err = 0;
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
threads->map[thread],
process, session)) {
err = -1;
break;
}
}
free(mmap_event);
out_free_comm:
free(comm_event);
out:
return err;
}
int event__synthesize_threads(event__handler_t process,
struct perf_session *session)
{
DIR *proc;
struct dirent dirent, *next;
event_t *comm_event, *mmap_event;
int err = -1;
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
if (comm_event == NULL)
goto out;
mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
proc = opendir("/proc");
if (proc == NULL)
goto out_free_mmap;
while (!readdir_r(proc, &dirent, &next) && next) {
char *end;
pid_t pid = strtol(dirent.d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
continue;
__event__synthesize_thread(comm_event, mmap_event, pid,
process, session);
}
closedir(proc);
err = 0;
out_free_mmap:
free(mmap_event);
out_free_comm:
free(comm_event);
out:
return err;
}
struct process_symbol_args {
const char *name;
u64 start;
};
static int find_symbol_cb(void *arg, const char *name, char type,
u64 start, u64 end __used)
{
struct process_symbol_args *args = arg;
/*
* Must be a function or at least an alias, as in PARISC64, where "_text" is
* an 'A' to the same address as "_stext".
*/
if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
type == 'A') || strcmp(name, args->name))
return 0;
args->start = start;
return 1;
}
int event__synthesize_kernel_mmap(event__handler_t process,
struct perf_session *session,
struct machine *machine,
const char *symbol_name)
{
size_t size;
const char *filename, *mmap_name;
char path[PATH_MAX];
char name_buff[PATH_MAX];
struct map *map;
int err;
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
* kernels.
*/
struct process_symbol_args args = { .name = symbol_name, };
event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
}
mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
if (machine__is_host(machine)) {
/*
* kernel uses PERF_RECORD_MISC_USER for user space maps,
* see kernel/perf_event.c __perf_event_mmap
*/
event->header.misc = PERF_RECORD_MISC_KERNEL;
filename = "/proc/kallsyms";
} else {
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
if (machine__is_default_guest(machine))
filename = (char *) symbol_conf.default_guest_kallsyms;
else {
sprintf(path, "%s/proc/kallsyms", machine->root_dir);
filename = path;
}
}
if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
return -ENOENT;
map = machine->vmlinux_maps[MAP__FUNCTION];
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", mmap_name, symbol_name) + 1;
size = ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size) + session->id_hdr_size);
event->mmap.pgoff = args.start;
event->mmap.start = map->start;
event->mmap.len = map->end - event->mmap.start;
event->mmap.pid = machine->pid;
err = process(event, &synth_sample, session);
free(event);
return err;
}
static void thread__comm_adjust(struct thread *self, struct hists *hists)
{
char *comm = self->comm;
if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
(!symbol_conf.comm_list ||
strlist__has_entry(symbol_conf.comm_list, comm))) {
u16 slen = strlen(comm);
if (hists__new_col_len(hists, HISTC_COMM, slen))
hists__set_col_len(hists, HISTC_THREAD, slen + 6);
}
}
static int thread__set_comm_adjust(struct thread *self, const char *comm,
struct hists *hists)
{
int ret = thread__set_comm(self, comm);
if (ret)
return ret;
thread__comm_adjust(self, hists);
return 0;
}
int event__process_comm(event_t *self, struct sample_data *sample __used,
struct perf_session *session)
{
struct thread *thread = perf_session__findnew(session, self->comm.tid);
dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
&session->hists)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
return 0;
}
int event__process_lost(event_t *self, struct sample_data *sample __used,
struct perf_session *session)
{
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
self->lost.id, self->lost.lost);
session->hists.stats.total_lost += self->lost.lost;
return 0;
}
static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
{
maps[MAP__FUNCTION]->start = self->mmap.start;
maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
/*
* Be a bit paranoid here, some perf.data file came with
* a zero sized synthesized MMAP event for the kernel.
*/
if (maps[MAP__FUNCTION]->end == 0)
maps[MAP__FUNCTION]->end = ~0ULL;
}
static int event__process_kernel_mmap(event_t *self,
struct perf_session *session)
{
struct map *map;
char kmmap_prefix[PATH_MAX];
struct machine *machine;
enum dso_kernel_type kernel_type;
bool is_kernel_mmap;
machine = perf_session__findnew_machine(session, self->mmap.pid);
if (!machine) {
pr_err("Can't find id %d's machine\n", self->mmap.pid);
goto out_problem;
}
machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
if (machine__is_host(machine))
kernel_type = DSO_TYPE_KERNEL;
else
kernel_type = DSO_TYPE_GUEST_KERNEL;
is_kernel_mmap = memcmp(self->mmap.filename,
kmmap_prefix,
strlen(kmmap_prefix)) == 0;
if (self->mmap.filename[0] == '/' ||
(!is_kernel_mmap && self->mmap.filename[0] == '[')) {
char short_module_name[1024];
char *name, *dot;
if (self->mmap.filename[0] == '/') {
name = strrchr(self->mmap.filename, '/');
if (name == NULL)
goto out_problem;
++name; /* skip / */
dot = strrchr(name, '.');
if (dot == NULL)
goto out_problem;
snprintf(short_module_name, sizeof(short_module_name),
"[%.*s]", (int)(dot - name), name);
strxfrchar(short_module_name, '-', '_');
} else
strcpy(short_module_name, self->mmap.filename);
map = machine__new_module(machine, self->mmap.start,
self->mmap.filename);
if (map == NULL)
goto out_problem;
name = strdup(short_module_name);
if (name == NULL)
goto out_problem;
map->dso->short_name = name;
map->dso->sname_alloc = 1;
map->end = map->start + self->mmap.len;
} else if (is_kernel_mmap) {
const char *symbol_name = (self->mmap.filename +
strlen(kmmap_prefix));
/*
* Should be there already, from the build-id table in
* the header.
*/
struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
kmmap_prefix);
if (kernel == NULL)
goto out_problem;
kernel->kernel = kernel_type;
if (__machine__create_kernel_maps(machine, kernel) < 0)
goto out_problem;
event_set_kernel_mmap_len(machine->vmlinux_maps, self);
perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
symbol_name,
self->mmap.pgoff);
if (machine__is_default_guest(machine)) {
/*
* preload dso of guest kernel and modules
*/
dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
NULL);
}
}
return 0;
out_problem:
return -1;
}
int event__process_mmap(event_t *self, struct sample_data *sample __used,
struct perf_session *session)
{
struct machine *machine;
struct thread *thread;
struct map *map;
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
int ret = 0;
dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
self->mmap.pid, self->mmap.tid, self->mmap.start,
self->mmap.len, self->mmap.pgoff, self->mmap.filename);
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
cpumode == PERF_RECORD_MISC_KERNEL) {
ret = event__process_kernel_mmap(self, session);
if (ret < 0)
goto out_problem;
return 0;
}
machine = perf_session__find_host_machine(session);
if (machine == NULL)
goto out_problem;
thread = perf_session__findnew(session, self->mmap.pid);
if (thread == NULL)
goto out_problem;
map = map__new(&machine->user_dsos, self->mmap.start,
self->mmap.len, self->mmap.pgoff,
self->mmap.pid, self->mmap.filename,
MAP__FUNCTION);
if (map == NULL)
goto out_problem;
thread__insert_map(thread, map);
return 0;
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
int event__process_task(event_t *self, struct sample_data *sample __used,
struct perf_session *session)
{
struct thread *thread = perf_session__findnew(session, self->fork.tid);
struct thread *parent = perf_session__findnew(session, self->fork.ptid);
dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
self->fork.ppid, self->fork.ptid);
if (self->header.type == PERF_RECORD_EXIT) {
perf_session__remove_thread(session, thread);
return 0;
}
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent) < 0) {
dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
return -1;
}
return 0;
}
int event__process(event_t *event, struct sample_data *sample,
struct perf_session *session)
{
switch (event->header.type) {
case PERF_RECORD_COMM:
event__process_comm(event, sample, session);
break;
case PERF_RECORD_MMAP:
event__process_mmap(event, sample, session);
break;
case PERF_RECORD_FORK:
case PERF_RECORD_EXIT:
event__process_task(event, sample, session);
break;
default:
break;
}
return 0;
}
void thread__find_addr_map(struct thread *self,
struct perf_session *session, u8 cpumode,
enum map_type type, pid_t pid, u64 addr,
struct addr_location *al)
{
struct map_groups *mg = &self->mg;
struct machine *machine = NULL;
al->thread = self;
al->addr = addr;
al->cpumode = cpumode;
al->filtered = false;
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
machine = perf_session__find_host_machine(session);
if (machine == NULL) {
al->map = NULL;
return;
}
mg = &machine->kmaps;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
machine = perf_session__find_host_machine(session);
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
machine = perf_session__find_machine(session, pid);
if (machine == NULL) {
al->map = NULL;
return;
}
mg = &machine->kmaps;
} else {
/*
* 'u' means guest os user space.
* TODO: We don't support guest user space. Might support late.
*/
if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
al->level = 'u';
else
al->level = 'H';
al->map = NULL;
if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
!perf_guest)
al->filtered = true;
if ((cpumode == PERF_RECORD_MISC_USER ||
cpumode == PERF_RECORD_MISC_KERNEL) &&
!perf_host)
al->filtered = true;
return;
}
try_again:
al->map = map_groups__find(mg, type, al->addr);
if (al->map == NULL) {
/*
* If this is outside of all known maps, and is a negative
* address, try to look it up in the kernel dso, as it might be
* a vsyscall or vdso (which executes in user-mode).
*
* XXX This is nasty, we should have a symbol list in the
* "[vdso]" dso, but for now lets use the old trick of looking
* in the whole kernel symbol list.
*/
if ((long long)al->addr < 0 &&
cpumode == PERF_RECORD_MISC_KERNEL &&
machine && mg != &machine->kmaps) {
mg = &machine->kmaps;
goto try_again;
}
} else
al->addr = al->map->map_ip(al->map, al->addr);
}
void thread__find_addr_location(struct thread *self,
struct perf_session *session, u8 cpumode,
enum map_type type, pid_t pid, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
{
thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
if (al->map != NULL)
al->sym = map__find_symbol(al->map, al->addr, filter);
else
al->sym = NULL;
}
static void dso__calc_col_width(struct dso *self, struct hists *hists)
{
if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
(!symbol_conf.dso_list ||
strlist__has_entry(symbol_conf.dso_list, self->name))) {
u16 slen = dso__name_len(self);
hists__new_col_len(hists, HISTC_DSO, slen);
}
self->slen_calculated = 1;
}
int event__preprocess_sample(const event_t *self, struct perf_session *session,
struct addr_location *al, struct sample_data *data,
symbol_filter_t filter)
{
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
struct thread *thread = perf_session__findnew(session, self->ip.pid);
if (thread == NULL)
return -1;
if (symbol_conf.comm_list &&
!strlist__has_entry(symbol_conf.comm_list, thread->comm))
goto out_filtered;
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
/*
* Have we already created the kernel maps for the host machine?
*
* This should have happened earlier, when we processed the kernel MMAP
* events, but for older perf.data files there was no such thing, so do
* it now.
*/
if (cpumode == PERF_RECORD_MISC_KERNEL &&
session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
machine__create_kernel_maps(&session->host_machine);
thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
self->ip.pid, self->ip.ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
al->sym = NULL;
al->cpu = data->cpu;
if (al->map) {
if (symbol_conf.dso_list &&
(!al->map || !al->map->dso ||
!(strlist__has_entry(symbol_conf.dso_list,
al->map->dso->short_name) ||
(al->map->dso->short_name != al->map->dso->long_name &&
strlist__has_entry(symbol_conf.dso_list,
al->map->dso->long_name)))))
goto out_filtered;
/*
* We have to do this here as we may have a dso with no symbol
* hit that has a name longer than the ones with symbols
* sampled.
*/
if (!sort_dso.elide && !al->map->dso->slen_calculated)
dso__calc_col_width(al->map->dso, &session->hists);
al->sym = map__find_symbol(al->map, al->addr, filter);
} else {
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
!symbol_conf.dso_list)
hists__set_col_len(&session->hists, HISTC_DSO,
unresolved_col_width);
}
if (symbol_conf.sym_list && al->sym &&
!strlist__has_entry(symbol_conf.sym_list, al->sym->name))
goto out_filtered;
return 0;
out_filtered:
al->filtered = true;
return 0;
}
static int event__parse_id_sample(const event_t *event,
struct perf_session *session,
struct sample_data *sample)
{
const u64 *array;
u64 type;
sample->cpu = sample->pid = sample->tid = -1;
sample->stream_id = sample->id = sample->time = -1ULL;
if (!session->sample_id_all)
return 0;
array = event->sample.array;
array += ((event->header.size -
sizeof(event->header)) / sizeof(u64)) - 1;
type = session->sample_type;
if (type & PERF_SAMPLE_CPU) {
u32 *p = (u32 *)array;
sample->cpu = *p;
array--;
}
if (type & PERF_SAMPLE_STREAM_ID) {
sample->stream_id = *array;
array--;
}
if (type & PERF_SAMPLE_ID) {
sample->id = *array;
array--;
}
if (type & PERF_SAMPLE_TIME) {
sample->time = *array;
array--;
}
if (type & PERF_SAMPLE_TID) {
u32 *p = (u32 *)array;
sample->pid = p[0];
sample->tid = p[1];
}
return 0;
}
int event__parse_sample(const event_t *event, struct perf_session *session,
struct sample_data *data)
{
const u64 *array;
u64 type;
if (event->header.type != PERF_RECORD_SAMPLE)
return event__parse_id_sample(event, session, data);
array = event->sample.array;
type = session->sample_type;
if (type & PERF_SAMPLE_IP) {
data->ip = event->ip.ip;
array++;
}
if (type & PERF_SAMPLE_TID) {
u32 *p = (u32 *)array;
data->pid = p[0];
data->tid = p[1];
array++;
}
if (type & PERF_SAMPLE_TIME) {
data->time = *array;
array++;
}
if (type & PERF_SAMPLE_ADDR) {
data->addr = *array;
array++;
}
data->id = -1ULL;
if (type & PERF_SAMPLE_ID) {
data->id = *array;
array++;
}
if (type & PERF_SAMPLE_STREAM_ID) {
data->stream_id = *array;
array++;
}
if (type & PERF_SAMPLE_CPU) {
u32 *p = (u32 *)array;
data->cpu = *p;
array++;
} else
data->cpu = -1;
if (type & PERF_SAMPLE_PERIOD) {
data->period = *array;
array++;
}
if (type & PERF_SAMPLE_READ) {
pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
return -1;
}
if (type & PERF_SAMPLE_CALLCHAIN) {
data->callchain = (struct ip_callchain *)array;
array += 1 + data->callchain->nr;
}
if (type & PERF_SAMPLE_RAW) {
u32 *p = (u32 *)array;
data->raw_size = *p;
p++;
data->raw_data = p;
}
return 0;
}