mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 11:44:01 +08:00
a4fb581b15
Currently, the callchains are displayed using a constant left margin. So depending on the current sort dimension configuration, callchains may appear to be well attached to the first sort dimension column field which is mostly the case, except when the first dimension of sorting is done by comm, because these are right aligned. This patch binds the callchain to the first letter in the first column, whatever type of column it is (dso, comm, symbol). Before: 0.80% perf [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify | | __fsnotify_parent After: 0.80% perf [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify | | __fsnotify_parent Also, for clarity, we don't put anymore the callchain as is but: - If we have a top level ancestor in the callchain, start it with a first ascii hook. Before: 0.80% perf [kernel] [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify [..] [..] After: 0.80% perf [kernel] [k] __lock_acquire | --- __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify [..] [..] - Otherwise, if we have several top level ancestors, then display these like we did before: 1.69% Xorg | |--21.21%-- vread_hpet | 0x7fffd85b46fc | 0x7fffd85b494d | 0x7f4fafb4e54d | |--15.15%-- exaOffscreenAlloc | |--9.09%-- I830WaitLpRing Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Anton Blanchard <anton@samba.org> LKML-Reference: <1256246604-17156-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
225 lines
4.4 KiB
C
225 lines
4.4 KiB
C
#include "../perf.h"
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
#include "thread.h"
|
|
#include "util.h"
|
|
#include "debug.h"
|
|
|
|
static struct rb_root threads;
|
|
static struct thread *last_match;
|
|
|
|
static struct thread *thread__new(pid_t pid)
|
|
{
|
|
struct thread *self = calloc(1, sizeof(*self));
|
|
|
|
if (self != NULL) {
|
|
self->pid = pid;
|
|
self->comm = malloc(32);
|
|
if (self->comm)
|
|
snprintf(self->comm, 32, ":%d", self->pid);
|
|
self->maps = RB_ROOT;
|
|
INIT_LIST_HEAD(&self->removed_maps);
|
|
}
|
|
|
|
return self;
|
|
}
|
|
|
|
int thread__set_comm(struct thread *self, const char *comm)
|
|
{
|
|
if (self->comm)
|
|
free(self->comm);
|
|
self->comm = strdup(comm);
|
|
return self->comm ? 0 : -ENOMEM;
|
|
}
|
|
|
|
int thread__comm_len(struct thread *self)
|
|
{
|
|
if (!self->comm_len) {
|
|
if (!self->comm)
|
|
return 0;
|
|
self->comm_len = strlen(self->comm);
|
|
}
|
|
|
|
return self->comm_len;
|
|
}
|
|
|
|
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
|
{
|
|
struct rb_node *nd;
|
|
struct map *pos;
|
|
size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
|
|
self->pid, self->comm);
|
|
|
|
for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
|
|
pos = rb_entry(nd, struct map, rb_node);
|
|
ret += map__fprintf(pos, fp);
|
|
}
|
|
|
|
ret = fprintf(fp, "Removed maps:\n");
|
|
|
|
list_for_each_entry(pos, &self->removed_maps, node)
|
|
ret += map__fprintf(pos, fp);
|
|
|
|
return ret;
|
|
}
|
|
|
|
struct thread *threads__findnew(pid_t pid)
|
|
{
|
|
struct rb_node **p = &threads.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct thread *th;
|
|
|
|
/*
|
|
* Font-end cache - PID lookups come in blocks,
|
|
* so most of the time we dont have to look up
|
|
* the full rbtree:
|
|
*/
|
|
if (last_match && last_match->pid == pid)
|
|
return last_match;
|
|
|
|
while (*p != NULL) {
|
|
parent = *p;
|
|
th = rb_entry(parent, struct thread, rb_node);
|
|
|
|
if (th->pid == pid) {
|
|
last_match = th;
|
|
return th;
|
|
}
|
|
|
|
if (pid < th->pid)
|
|
p = &(*p)->rb_left;
|
|
else
|
|
p = &(*p)->rb_right;
|
|
}
|
|
|
|
th = thread__new(pid);
|
|
if (th != NULL) {
|
|
rb_link_node(&th->rb_node, parent, p);
|
|
rb_insert_color(&th->rb_node, &threads);
|
|
last_match = th;
|
|
}
|
|
|
|
return th;
|
|
}
|
|
|
|
struct thread *register_idle_thread(void)
|
|
{
|
|
struct thread *thread = threads__findnew(0);
|
|
|
|
if (!thread || thread__set_comm(thread, "swapper")) {
|
|
fprintf(stderr, "problem inserting idle task.\n");
|
|
exit(-1);
|
|
}
|
|
|
|
return thread;
|
|
}
|
|
|
|
static void thread__remove_overlappings(struct thread *self, struct map *map)
|
|
{
|
|
struct rb_node *next = rb_first(&self->maps);
|
|
|
|
while (next) {
|
|
struct map *pos = rb_entry(next, struct map, rb_node);
|
|
next = rb_next(&pos->rb_node);
|
|
|
|
if (!map__overlap(pos, map))
|
|
continue;
|
|
|
|
if (verbose >= 2) {
|
|
printf("overlapping maps:\n");
|
|
map__fprintf(map, stdout);
|
|
map__fprintf(pos, stdout);
|
|
}
|
|
|
|
rb_erase(&pos->rb_node, &self->maps);
|
|
/*
|
|
* We may have references to this map, for instance in some
|
|
* hist_entry instances, so just move them to a separate
|
|
* list.
|
|
*/
|
|
list_add_tail(&pos->node, &self->removed_maps);
|
|
}
|
|
}
|
|
|
|
void maps__insert(struct rb_root *maps, struct map *map)
|
|
{
|
|
struct rb_node **p = &maps->rb_node;
|
|
struct rb_node *parent = NULL;
|
|
const u64 ip = map->start;
|
|
struct map *m;
|
|
|
|
while (*p != NULL) {
|
|
parent = *p;
|
|
m = rb_entry(parent, struct map, rb_node);
|
|
if (ip < m->start)
|
|
p = &(*p)->rb_left;
|
|
else
|
|
p = &(*p)->rb_right;
|
|
}
|
|
|
|
rb_link_node(&map->rb_node, parent, p);
|
|
rb_insert_color(&map->rb_node, maps);
|
|
}
|
|
|
|
struct map *maps__find(struct rb_root *maps, u64 ip)
|
|
{
|
|
struct rb_node **p = &maps->rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct map *m;
|
|
|
|
while (*p != NULL) {
|
|
parent = *p;
|
|
m = rb_entry(parent, struct map, rb_node);
|
|
if (ip < m->start)
|
|
p = &(*p)->rb_left;
|
|
else if (ip > m->end)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
return m;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void thread__insert_map(struct thread *self, struct map *map)
|
|
{
|
|
thread__remove_overlappings(self, map);
|
|
maps__insert(&self->maps, map);
|
|
}
|
|
|
|
int thread__fork(struct thread *self, struct thread *parent)
|
|
{
|
|
struct rb_node *nd;
|
|
|
|
if (self->comm)
|
|
free(self->comm);
|
|
self->comm = strdup(parent->comm);
|
|
if (!self->comm)
|
|
return -ENOMEM;
|
|
|
|
for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
|
|
struct map *map = rb_entry(nd, struct map, rb_node);
|
|
struct map *new = map__clone(map);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
thread__insert_map(self, new);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
size_t threads__fprintf(FILE *fp)
|
|
{
|
|
size_t ret = 0;
|
|
struct rb_node *nd;
|
|
|
|
for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
|
|
struct thread *pos = rb_entry(nd, struct thread, rb_node);
|
|
|
|
ret += thread__fprintf(pos, fp);
|
|
}
|
|
|
|
return ret;
|
|
}
|