2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-26 14:14:01 +08:00
linux-next/tools/perf/util/unwind-libdw.c
Milian Wolff 1982ad48fc perf report: Fix off-by-one for non-activation frames
As the documentation for dwfl_frame_pc says, frames that
are no activation frames need to have their program counter
decremented by one to properly find the function of the caller.

This fixes many cases where perf report currently attributes
the cost to the next line. I.e. I have code like this:

~~~~~~~~~~~~~~~
  #include <thread>
  #include <chrono>

  using namespace std;

  int main()
  {
    this_thread::sleep_for(chrono::milliseconds(1000));
    this_thread::sleep_for(chrono::milliseconds(100));
    this_thread::sleep_for(chrono::milliseconds(10));

    return 0;
  }
~~~~~~~~~~~~~~~

Now compile and record it:

~~~~~~~~~~~~~~~
  g++ -std=c++11 -g -O2 test.cpp
  echo 1 | sudo tee /proc/sys/kernel/sched_schedstats
  perf record \
    --event sched:sched_stat_sleep \
    --event sched:sched_process_exit \
    --event sched:sched_switch --call-graph=dwarf \
    --output perf.data.raw \
    ./a.out
  echo 0 | sudo tee /proc/sys/kernel/sched_schedstats
  perf inject --sched-stat --input perf.data.raw --output perf.data
~~~~~~~~~~~~~~~

Before this patch, the report clearly shows the off-by-one issue.
Most notably, the last sleep invocation is incorrectly attributed
to the "return 0;" line:

~~~~~~~~~~~~~~~
  Overhead  Source:Line
  ........  ...........

   100.00%  core.c:0
            |
            ---__schedule core.c:0
               schedule
               do_nanosleep hrtimer.c:0
               hrtimer_nanosleep
               sys_nanosleep
               entry_SYSCALL_64_fastpath .tmp_entry_64.o:0
               __nanosleep_nocancel .:0
               std::this_thread::sleep_for<long, std::ratio<1l, 1000l> > thread:323
               |
               |--90.08%--main test.cpp:9
               |          __libc_start_main
               |          _start
               |
               |--9.01%--main test.cpp:10
               |          __libc_start_main
               |          _start
               |
                --0.91%--main test.cpp:13
                          __libc_start_main
                          _start
~~~~~~~~~~~~~~~

With this patch here applied, the issue is fixed. The report becomes
much more usable:

~~~~~~~~~~~~~~~
  Overhead  Source:Line
  ........  ...........

   100.00%  core.c:0
            |
            ---__schedule core.c:0
               schedule
               do_nanosleep hrtimer.c:0
               hrtimer_nanosleep
               sys_nanosleep
               entry_SYSCALL_64_fastpath .tmp_entry_64.o:0
               __nanosleep_nocancel .:0
               std::this_thread::sleep_for<long, std::ratio<1l, 1000l> > thread:323
               |
               |--90.08%--main test.cpp:8
               |          __libc_start_main
               |          _start
               |
               |--9.01%--main test.cpp:9
               |          __libc_start_main
               |          _start
               |
                --0.91%--main test.cpp:10
                          __libc_start_main
                          _start
~~~~~~~~~~~~~~~

Similarly it works for signal frames:

~~~~~~~~~~~~~~~
  __noinline void bar(void)
  {
    volatile long cnt = 0;

    for (cnt = 0; cnt < 100000000; cnt++);
  }

  __noinline void foo(void)
  {
    bar();
  }

  void sig_handler(int sig)
  {
    foo();
  }

  int main(void)
  {
    signal(SIGUSR1, sig_handler);
    raise(SIGUSR1);

    foo();
    return 0;
  }
~~~~~~~~~~~~~~~~

Before, the report wrongly points to `signal.c:29` after raise():

~~~~~~~~~~~~~~~~
  $ perf report --stdio --no-children -g srcline -s srcline
  ...
   100.00%  signal.c:11
            |
            ---bar signal.c:11
               |
               |--50.49%--main signal.c:29
               |          __libc_start_main
               |          _start
               |
                --49.51%--0x33a8f
                          raise .:0
                          main signal.c:29
                          __libc_start_main
                          _start
~~~~~~~~~~~~~~~~

With this patch in, the issue is fixed and we instead get:

~~~~~~~~~~~~~~~~
   100.00%  signal   signal            [.] bar
            |
            ---bar signal.c:11
               |
               |--50.49%--main signal.c:29
               |          __libc_start_main
               |          _start
               |
                --49.51%--0x33a8f
                          raise .:0
                          main signal.c:27
                          __libc_start_main
                          _start
~~~~~~~~~~~~~~~~

Note how this patch fixes this issue for both unwinding methods, i.e.
both dwfl and libunwind. The former case is straight-forward thanks
to dwfl_frame_pc(). For libunwind, we replace the functionality via
unw_is_signal_frame() for any but the very first frame.

Signed-off-by: Milian Wolff <milian.wolff@kdab.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yao Jin <yao.jin@linux.intel.com>
Cc: kernel-team@lge.com
Link: http://lkml.kernel.org/r/20170524062129.32529-4-namhyung@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-05-24 08:41:48 +02:00

250 lines
5.3 KiB
C

#include <linux/compiler.h>
#include <elfutils/libdw.h>
#include <elfutils/libdwfl.h>
#include <inttypes.h>
#include <errno.h>
#include "debug.h"
#include "unwind.h"
#include "unwind-libdw.h"
#include "machine.h"
#include "thread.h"
#include <linux/types.h>
#include "event.h"
#include "perf_regs.h"
#include "callchain.h"
#include "util.h"
static char *debuginfo_path;
static const Dwfl_Callbacks offline_callbacks = {
.find_debuginfo = dwfl_standard_find_debuginfo,
.debuginfo_path = &debuginfo_path,
.section_address = dwfl_offline_section_address,
};
static int __report_module(struct addr_location *al, u64 ip,
struct unwind_info *ui)
{
Dwfl_Module *mod;
struct dso *dso = NULL;
thread__find_addr_location(ui->thread,
PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, al);
if (al->map)
dso = al->map->dso;
if (!dso)
return 0;
mod = dwfl_addrmodule(ui->dwfl, ip);
if (!mod)
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
dso->long_name, -1, al->map->start,
false);
return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
}
static int report_module(u64 ip, struct unwind_info *ui)
{
struct addr_location al;
return __report_module(&al, ip, ui);
}
/*
* Store all entries within entries array,
* we will process it after we finish unwind.
*/
static int entry(u64 ip, struct unwind_info *ui)
{
struct unwind_entry *e = &ui->entries[ui->idx++];
struct addr_location al;
if (__report_module(&al, ip, ui))
return -1;
e->ip = al.addr;
e->map = al.map;
e->sym = al.sym;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
ip,
al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
return 0;
}
static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
{
/* We want only single thread to be processed. */
if (*thread_argp != NULL)
return 0;
*thread_argp = arg;
return dwfl_pid(dwfl);
}
static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
Dwarf_Word *data)
{
struct addr_location al;
ssize_t size;
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, addr, &al);
if (!al.map) {
/*
* We've seen cases (softice) where DWARF unwinder went
* through non executable mmaps, which we need to lookup
* in MAP__VARIABLE tree.
*/
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__VARIABLE, addr, &al);
}
if (!al.map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1;
}
if (!al.map->dso)
return -1;
size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
addr, (u8 *) data, sizeof(*data));
return !(size == sizeof(*data));
}
static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result,
void *arg)
{
struct unwind_info *ui = arg;
struct stack_dump *stack = &ui->sample->user_stack;
u64 start, end;
int offset;
int ret;
ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
if (ret)
return false;
end = start + stack->size;
/* Check overflow. */
if (addr + sizeof(Dwarf_Word) < addr)
return false;
if (addr < start || addr + sizeof(Dwarf_Word) > end) {
ret = access_dso_mem(ui, addr, result);
if (ret) {
pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range"
" 0x%" PRIx64 "-0x%" PRIx64 "\n",
addr, start, end);
return false;
}
return true;
}
offset = addr - start;
*result = *(Dwarf_Word *)&stack->data[offset];
pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n",
addr, (unsigned long)*result, offset);
return true;
}
static const Dwfl_Thread_Callbacks callbacks = {
.next_thread = next_thread,
.memory_read = memory_read,
.set_initial_registers = libdw__arch_set_initial_registers,
};
static int
frame_callback(Dwfl_Frame *state, void *arg)
{
struct unwind_info *ui = arg;
Dwarf_Addr pc;
bool isactivation;
if (!dwfl_frame_pc(state, &pc, &isactivation)) {
pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT;
}
if (!isactivation)
--pc;
return entry(pc, ui) || !(--ui->max_stack) ?
DWARF_CB_ABORT : DWARF_CB_OK;
}
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data,
int max_stack)
{
struct unwind_info *ui, ui_buf = {
.sample = data,
.thread = thread,
.machine = thread->mg->machine,
.cb = cb,
.arg = arg,
.max_stack = max_stack,
};
Dwarf_Word ip;
int err = -EINVAL, i;
if (!data->user_regs.regs)
return -EINVAL;
ui = zalloc(sizeof(ui_buf) + sizeof(ui_buf.entries[0]) * max_stack);
if (!ui)
return -ENOMEM;
*ui = ui_buf;
ui->dwfl = dwfl_begin(&offline_callbacks);
if (!ui->dwfl)
goto out;
err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
if (err)
goto out;
err = report_module(ip, ui);
if (err)
goto out;
if (!dwfl_attach_state(ui->dwfl, EM_NONE, thread->tid, &callbacks, ui))
goto out;
err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
if (err && !ui->max_stack)
err = 0;
/*
* Display what we got based on the order setup.
*/
for (i = 0; i < ui->idx && !err; i++) {
int j = i;
if (callchain_param.order == ORDER_CALLER)
j = ui->idx - i - 1;
err = ui->entries[j].ip ? ui->cb(&ui->entries[j], ui->arg) : 0;
}
out:
if (err)
pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
dwfl_end(ui->dwfl);
free(ui);
return 0;
}