2019-05-29 22:18:02 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-10-23 18:45:13 +08:00
|
|
|
/*
|
|
|
|
* db-export.c: Support for exporting data suitable for import to a database
|
|
|
|
* Copyright (c) 2014, Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <errno.h>
|
2019-07-04 22:21:24 +08:00
|
|
|
#include <stdlib.h>
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2019-08-30 22:11:01 +08:00
|
|
|
#include "dso.h"
|
2014-10-23 18:45:13 +08:00
|
|
|
#include "evsel.h"
|
|
|
|
#include "machine.h"
|
|
|
|
#include "thread.h"
|
|
|
|
#include "comm.h"
|
|
|
|
#include "symbol.h"
|
2019-01-27 20:42:37 +08:00
|
|
|
#include "map.h"
|
2014-10-23 18:45:13 +08:00
|
|
|
#include "event.h"
|
2014-10-30 22:09:46 +08:00
|
|
|
#include "thread-stack.h"
|
2016-04-28 16:19:08 +08:00
|
|
|
#include "callchain.h"
|
2016-04-28 16:19:07 +08:00
|
|
|
#include "call-path.h"
|
2014-10-23 18:45:13 +08:00
|
|
|
#include "db-export.h"
|
2019-07-04 22:32:27 +08:00
|
|
|
#include <linux/zalloc.h>
|
2014-10-23 18:45:13 +08:00
|
|
|
|
|
|
|
int db_export__init(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
memset(dbe, 0, sizeof(struct db_export));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:46 +08:00
|
|
|
void db_export__exit(struct db_export *dbe)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
2014-10-30 22:09:46 +08:00
|
|
|
call_return_processor__free(dbe->crp);
|
|
|
|
dbe->crp = NULL;
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:51 +08:00
|
|
|
int db_export__evsel(struct db_export *dbe, struct evsel *evsel)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
|
|
|
if (evsel->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
evsel->db_id = ++dbe->evsel_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_evsel)
|
|
|
|
return dbe->export_evsel(dbe, evsel);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__machine(struct db_export *dbe, struct machine *machine)
|
|
|
|
{
|
|
|
|
if (machine->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
machine->db_id = ++dbe->machine_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_machine)
|
|
|
|
return dbe->export_machine(dbe, machine);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__thread(struct db_export *dbe, struct thread *thread,
|
2019-07-10 16:57:55 +08:00
|
|
|
struct machine *machine, struct thread *main_thread)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
|
|
|
u64 main_thread_db_id = 0;
|
|
|
|
|
2023-06-09 07:28:00 +08:00
|
|
|
if (thread__db_id(thread))
|
2014-10-23 18:45:13 +08:00
|
|
|
return 0;
|
|
|
|
|
2023-06-09 07:28:00 +08:00
|
|
|
thread__set_db_id(thread, ++dbe->thread_last_db_id);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2019-07-10 16:57:55 +08:00
|
|
|
if (main_thread)
|
2023-06-09 07:28:00 +08:00
|
|
|
main_thread_db_id = thread__db_id(main_thread);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
|
|
|
if (dbe->export_thread)
|
|
|
|
return dbe->export_thread(dbe, thread, main_thread_db_id,
|
|
|
|
machine);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-10 16:58:00 +08:00
|
|
|
static int __db_export__comm(struct db_export *dbe, struct comm *comm,
|
|
|
|
struct thread *thread)
|
|
|
|
{
|
|
|
|
comm->db_id = ++dbe->comm_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_comm)
|
|
|
|
return dbe->export_comm(dbe, comm, thread);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__comm(struct db_export *dbe, struct comm *comm,
|
|
|
|
struct thread *thread)
|
|
|
|
{
|
|
|
|
if (comm->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __db_export__comm(dbe, comm, thread);
|
|
|
|
}
|
|
|
|
|
2019-07-10 16:57:51 +08:00
|
|
|
/*
|
|
|
|
* Export the "exec" comm. The "exec" comm is the program / application command
|
|
|
|
* name at the time it first executes. It is used to group threads for the same
|
|
|
|
* program. Note that the main thread pid (or thread group id tgid) cannot be
|
|
|
|
* used because it does not change when a new program is exec'ed.
|
|
|
|
*/
|
|
|
|
int db_export__exec_comm(struct db_export *dbe, struct comm *comm,
|
|
|
|
struct thread *main_thread)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (comm->db_id)
|
|
|
|
return 0;
|
|
|
|
|
2019-07-10 16:58:00 +08:00
|
|
|
err = __db_export__comm(dbe, comm, main_thread);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2019-07-10 16:57:51 +08:00
|
|
|
/*
|
|
|
|
* Record the main thread for this comm. Note that the main thread can
|
|
|
|
* have many "exec" comms because there will be a new one every time it
|
|
|
|
* exec's. An "exec" comm however will only ever have 1 main thread.
|
|
|
|
* That is different to any other threads for that same program because
|
|
|
|
* exec() will effectively kill them, so the relationship between the
|
|
|
|
* "exec" comm and non-main threads is 1-to-1. That is why
|
|
|
|
* db_export__comm_thread() is called here for the main thread, but it
|
|
|
|
* is called for non-main threads when they are exported.
|
|
|
|
*/
|
2014-10-23 18:45:13 +08:00
|
|
|
return db_export__comm_thread(dbe, comm, main_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
|
|
|
|
struct thread *thread)
|
|
|
|
{
|
|
|
|
u64 db_id;
|
|
|
|
|
|
|
|
db_id = ++dbe->comm_thread_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_comm_thread)
|
|
|
|
return dbe->export_comm_thread(dbe, db_id, comm, thread);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__dso(struct db_export *dbe, struct dso *dso,
|
|
|
|
struct machine *machine)
|
|
|
|
{
|
2024-05-05 05:38:01 +08:00
|
|
|
if (dso__db_id(dso))
|
2014-10-23 18:45:13 +08:00
|
|
|
return 0;
|
|
|
|
|
2024-05-05 05:38:01 +08:00
|
|
|
dso__set_db_id(dso, ++dbe->dso_last_db_id);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
|
|
|
if (dbe->export_dso)
|
|
|
|
return dbe->export_dso(dbe, dso, machine);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__symbol(struct db_export *dbe, struct symbol *sym,
|
|
|
|
struct dso *dso)
|
|
|
|
{
|
|
|
|
u64 *sym_db_id = symbol__priv(sym);
|
|
|
|
|
|
|
|
if (*sym_db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*sym_db_id = ++dbe->symbol_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_symbol)
|
|
|
|
return dbe->export_symbol(dbe, sym, dso);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
|
|
|
|
u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (al->map) {
|
2023-03-21 05:22:35 +08:00
|
|
|
struct dso *dso = map__dso(al->map);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:34 +08:00
|
|
|
err = db_export__dso(dbe, dso, maps__machine(al->maps));
|
2014-10-23 18:45:13 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2024-05-05 05:38:01 +08:00
|
|
|
*dso_db_id = dso__db_id(dso);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
|
|
|
if (!al->sym) {
|
2018-04-26 22:09:10 +08:00
|
|
|
al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
|
2014-10-23 18:45:13 +08:00
|
|
|
if (al->sym)
|
2018-04-27 03:52:34 +08:00
|
|
|
dso__insert_symbol(dso, al->sym);
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (al->sym) {
|
|
|
|
u64 *db_id = symbol__priv(al->sym);
|
|
|
|
|
|
|
|
err = db_export__symbol(dbe, al->sym, dso);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
*sym_db_id = *db_id;
|
|
|
|
*offset = al->addr - al->sym->start;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
static struct call_path *call_path_from_sample(struct db_export *dbe,
|
|
|
|
struct machine *machine,
|
|
|
|
struct thread *thread,
|
|
|
|
struct perf_sample *sample,
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel)
|
2016-04-28 16:19:08 +08:00
|
|
|
{
|
|
|
|
u64 kernel_start = machine__kernel_start(machine);
|
|
|
|
struct call_path *current = &dbe->cpr->call_path;
|
|
|
|
enum chain_order saved_order = callchain_param.order;
|
2023-06-09 07:28:21 +08:00
|
|
|
struct callchain_cursor *cursor;
|
2016-04-28 16:19:08 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!symbol_conf.use_callchain || !sample->callchain)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the call path tree must be built starting with the root, we
|
|
|
|
* must use ORDER_CALL for call chain resolution, in order to process
|
|
|
|
* the callchain starting with the root node and ending with the leaf.
|
|
|
|
*/
|
|
|
|
callchain_param.order = ORDER_CALLER;
|
2023-06-09 07:28:21 +08:00
|
|
|
cursor = get_tls_callchain_cursor();
|
|
|
|
err = thread__resolve_callchain(thread, cursor, evsel,
|
2016-05-19 22:34:06 +08:00
|
|
|
sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
|
2016-04-28 16:19:08 +08:00
|
|
|
if (err) {
|
|
|
|
callchain_param.order = saved_order;
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-06-09 07:28:21 +08:00
|
|
|
callchain_cursor_commit(cursor);
|
2016-04-28 16:19:08 +08:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct callchain_cursor_node *node;
|
|
|
|
struct addr_location al;
|
|
|
|
u64 dso_db_id = 0, sym_db_id = 0, offset = 0;
|
|
|
|
|
|
|
|
|
2023-06-09 07:28:21 +08:00
|
|
|
node = callchain_cursor_current(cursor);
|
2016-04-28 16:19:08 +08:00
|
|
|
if (!node)
|
|
|
|
break;
|
2023-06-09 07:28:03 +08:00
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
/*
|
|
|
|
* Handle export of symbol and dso for this node by
|
|
|
|
* constructing an addr_location struct and then passing it to
|
|
|
|
* db_ids_from_al() to perform the export.
|
|
|
|
*/
|
2023-06-09 07:28:03 +08:00
|
|
|
addr_location__init(&al);
|
2019-11-04 23:14:32 +08:00
|
|
|
al.sym = node->ms.sym;
|
2023-12-07 22:09:11 +08:00
|
|
|
al.map = map__get(node->ms.map);
|
|
|
|
al.maps = maps__get(thread__maps(thread));
|
2016-05-11 11:26:48 +08:00
|
|
|
al.addr = node->ip;
|
2016-04-28 16:19:08 +08:00
|
|
|
|
2016-05-11 11:26:49 +08:00
|
|
|
if (al.map && !al.sym)
|
2023-03-21 05:22:35 +08:00
|
|
|
al.sym = dso__find_symbol(map__dso(al.map), al.addr);
|
2016-05-11 11:26:49 +08:00
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
|
|
|
|
|
|
|
|
/* add node to the call path tree if it doesn't exist */
|
|
|
|
current = call_path__findnew(dbe->cpr, current,
|
|
|
|
al.sym, node->ip,
|
|
|
|
kernel_start);
|
|
|
|
|
2023-06-09 07:28:21 +08:00
|
|
|
callchain_cursor_advance(cursor);
|
2023-06-09 07:28:03 +08:00
|
|
|
addr_location__exit(&al);
|
2016-04-28 16:19:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the callchain order to its prior value. */
|
|
|
|
callchain_param.order = saved_order;
|
|
|
|
|
|
|
|
if (current == &dbe->cpr->call_path) {
|
|
|
|
/* Bail because the callchain was empty. */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return current;
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:43 +08:00
|
|
|
int db_export__branch_type(struct db_export *dbe, u32 branch_type,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
if (dbe->export_branch_type)
|
|
|
|
return dbe->export_branch_type(dbe, branch_type, name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-10 16:58:07 +08:00
|
|
|
static int db_export__threads(struct db_export *dbe, struct thread *thread,
|
|
|
|
struct thread *main_thread,
|
|
|
|
struct machine *machine, struct comm **comm_ptr)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
|
|
|
struct comm *comm = NULL;
|
2019-07-10 16:58:01 +08:00
|
|
|
struct comm *curr_comm;
|
2014-10-23 18:45:13 +08:00
|
|
|
int err;
|
|
|
|
|
2019-07-10 16:57:53 +08:00
|
|
|
if (main_thread) {
|
|
|
|
/*
|
|
|
|
* A thread has a reference to the main thread, so export the
|
|
|
|
* main thread first.
|
|
|
|
*/
|
2019-07-10 16:58:07 +08:00
|
|
|
err = db_export__thread(dbe, main_thread, machine, main_thread);
|
2019-07-10 16:57:53 +08:00
|
|
|
if (err)
|
2019-07-10 16:58:07 +08:00
|
|
|
return err;
|
2019-07-10 16:57:55 +08:00
|
|
|
/*
|
|
|
|
* Export comm before exporting the non-main thread because
|
|
|
|
* db_export__comm_thread() can be called further below.
|
|
|
|
*/
|
2019-07-10 16:58:07 +08:00
|
|
|
comm = machine__thread_exec_comm(machine, main_thread);
|
2019-07-10 16:57:54 +08:00
|
|
|
if (comm) {
|
|
|
|
err = db_export__exec_comm(dbe, comm, main_thread);
|
|
|
|
if (err)
|
2019-07-10 16:58:07 +08:00
|
|
|
return err;
|
|
|
|
*comm_ptr = comm;
|
2019-07-10 16:57:54 +08:00
|
|
|
}
|
2019-07-10 16:57:53 +08:00
|
|
|
}
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2019-07-10 16:57:53 +08:00
|
|
|
if (thread != main_thread) {
|
2019-07-10 16:57:55 +08:00
|
|
|
/*
|
|
|
|
* For a non-main thread, db_export__comm_thread() must be
|
|
|
|
* called only if thread has not previously been exported.
|
|
|
|
*/
|
2023-06-09 07:28:00 +08:00
|
|
|
bool export_comm_thread = comm && !thread__db_id(thread);
|
2019-07-10 16:57:55 +08:00
|
|
|
|
2019-07-10 16:58:07 +08:00
|
|
|
err = db_export__thread(dbe, thread, machine, main_thread);
|
2019-07-10 16:57:53 +08:00
|
|
|
if (err)
|
2019-07-10 16:58:07 +08:00
|
|
|
return err;
|
2019-07-10 16:57:55 +08:00
|
|
|
|
|
|
|
if (export_comm_thread) {
|
|
|
|
err = db_export__comm_thread(dbe, comm, thread);
|
|
|
|
if (err)
|
2019-07-10 16:58:07 +08:00
|
|
|
return err;
|
2019-07-10 16:57:55 +08:00
|
|
|
}
|
2019-07-10 16:57:53 +08:00
|
|
|
}
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2019-07-10 16:58:01 +08:00
|
|
|
curr_comm = thread__comm(thread);
|
2019-07-10 16:58:07 +08:00
|
|
|
if (curr_comm)
|
|
|
|
return db_export__comm(dbe, curr_comm, thread);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__sample(struct db_export *dbe, union perf_event *event,
|
2019-07-21 19:23:51 +08:00
|
|
|
struct perf_sample *sample, struct evsel *evsel,
|
2021-05-25 17:51:05 +08:00
|
|
|
struct addr_location *al, struct addr_location *addr_al)
|
2019-07-10 16:58:07 +08:00
|
|
|
{
|
|
|
|
struct thread *thread = al->thread;
|
|
|
|
struct export_sample es = {
|
|
|
|
.event = event,
|
|
|
|
.sample = sample,
|
|
|
|
.evsel = evsel,
|
|
|
|
.al = al,
|
|
|
|
};
|
|
|
|
struct thread *main_thread;
|
|
|
|
struct comm *comm = NULL;
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:34 +08:00
|
|
|
struct machine *machine;
|
2019-07-10 16:58:07 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = db_export__evsel(dbe, evsel);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:34 +08:00
|
|
|
machine = maps__machine(al->maps);
|
|
|
|
err = db_export__machine(dbe, machine);
|
2019-07-10 16:58:07 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:34 +08:00
|
|
|
main_thread = thread__main_thread(machine, thread);
|
2019-07-10 16:58:07 +08:00
|
|
|
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:34 +08:00
|
|
|
err = db_export__threads(dbe, thread, main_thread, machine, &comm);
|
2019-07-10 16:58:07 +08:00
|
|
|
if (err)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
if (comm)
|
|
|
|
es.comm_db_id = comm->db_id;
|
2019-07-10 16:58:01 +08:00
|
|
|
|
2014-10-23 18:45:13 +08:00
|
|
|
es.db_id = ++dbe->sample_last_db_id;
|
|
|
|
|
|
|
|
err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
if (dbe->cpr) {
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:34 +08:00
|
|
|
struct call_path *cp = call_path_from_sample(dbe, machine,
|
2016-04-28 16:19:08 +08:00
|
|
|
thread, sample,
|
|
|
|
evsel);
|
2016-04-28 16:19:09 +08:00
|
|
|
if (cp) {
|
2016-04-28 16:19:08 +08:00
|
|
|
db_export__call_path(dbe, cp);
|
2016-04-28 16:19:09 +08:00
|
|
|
es.call_path_id = cp->db_id;
|
|
|
|
}
|
2016-04-28 16:19:08 +08:00
|
|
|
}
|
|
|
|
|
2021-05-25 17:51:05 +08:00
|
|
|
if (addr_al) {
|
|
|
|
err = db_ids_from_al(dbe, addr_al, &es.addr_dso_db_id,
|
2014-10-23 18:45:13 +08:00
|
|
|
&es.addr_sym_db_id, &es.addr_offset);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-30 22:09:46 +08:00
|
|
|
if (dbe->crp) {
|
|
|
|
err = thread_stack__process(thread, comm, sample, al,
|
2021-05-25 17:51:05 +08:00
|
|
|
addr_al, es.db_id,
|
2014-10-30 22:09:46 +08:00
|
|
|
dbe->crp);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-30 22:09:46 +08:00
|
|
|
}
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dbe->export_sample)
|
2015-05-29 21:33:29 +08:00
|
|
|
err = dbe->export_sample(dbe, &es);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2015-05-29 21:33:29 +08:00
|
|
|
out_put:
|
|
|
|
thread__put(main_thread);
|
|
|
|
return err;
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
2014-10-30 22:09:43 +08:00
|
|
|
|
|
|
|
static struct {
|
|
|
|
u32 branch_type;
|
|
|
|
const char *name;
|
|
|
|
} branch_types[] = {
|
|
|
|
{0, "no branch"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"},
|
|
|
|
{PERF_IP_FLAG_BRANCH, "unconditional jump"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT,
|
|
|
|
"software interrupt"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT,
|
|
|
|
"return from interrupt"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET,
|
|
|
|
"system call"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET,
|
|
|
|
"return from system call"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
|
|
|
|
PERF_IP_FLAG_INTERRUPT, "hardware interrupt"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
|
2021-02-18 17:57:51 +08:00
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vm entry"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vm exit"},
|
2014-10-30 22:09:43 +08:00
|
|
|
{0, NULL}
|
|
|
|
};
|
|
|
|
|
|
|
|
int db_export__branch_types(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
for (i = 0; branch_types[i].name ; i++) {
|
|
|
|
err = db_export__branch_type(dbe, branch_types[i].branch_type,
|
|
|
|
branch_types[i].name);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2018-09-20 21:00:44 +08:00
|
|
|
|
|
|
|
/* Add trace begin / end variants */
|
|
|
|
for (i = 0; branch_types[i].name ; i++) {
|
|
|
|
const char *name = branch_types[i].name;
|
|
|
|
u32 type = branch_types[i].branch_type;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
if (type == PERF_IP_FLAG_BRANCH ||
|
|
|
|
(type & (PERF_IP_FLAG_TRACE_BEGIN | PERF_IP_FLAG_TRACE_END)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "trace begin / %s", name);
|
|
|
|
err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_BEGIN, buf);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "%s / trace end", name);
|
|
|
|
err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_END, buf);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:43 +08:00
|
|
|
return err;
|
|
|
|
}
|
2014-10-30 22:09:46 +08:00
|
|
|
|
|
|
|
int db_export__call_path(struct db_export *dbe, struct call_path *cp)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (cp->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (cp->parent) {
|
|
|
|
err = db_export__call_path(dbe, cp->parent);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
cp->db_id = ++dbe->call_path_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_call_path)
|
|
|
|
return dbe->export_call_path(dbe, cp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:00:24 +08:00
|
|
|
int db_export__call_return(struct db_export *dbe, struct call_return *cr,
|
|
|
|
u64 *parent_db_id)
|
2014-10-30 22:09:46 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = db_export__call_path(dbe, cr->cp);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-02-28 21:00:24 +08:00
|
|
|
if (!cr->db_id)
|
|
|
|
cr->db_id = ++dbe->call_return_last_db_id;
|
|
|
|
|
|
|
|
if (parent_db_id) {
|
|
|
|
if (!*parent_db_id)
|
|
|
|
*parent_db_id = ++dbe->call_return_last_db_id;
|
|
|
|
cr->parent_db_id = *parent_db_id;
|
|
|
|
}
|
2014-10-30 22:09:46 +08:00
|
|
|
|
|
|
|
if (dbe->export_call_return)
|
|
|
|
return dbe->export_call_return(dbe, cr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-07-10 16:58:08 +08:00
|
|
|
|
|
|
|
static int db_export__pid_tid(struct db_export *dbe, struct machine *machine,
|
|
|
|
pid_t pid, pid_t tid, u64 *db_id,
|
|
|
|
struct comm **comm_ptr, bool *is_idle)
|
|
|
|
{
|
|
|
|
struct thread *thread = machine__find_thread(machine, pid, tid);
|
|
|
|
struct thread *main_thread;
|
|
|
|
int err = 0;
|
|
|
|
|
2023-06-09 07:28:00 +08:00
|
|
|
if (!thread || !thread__comm_set(thread))
|
2019-07-10 16:58:08 +08:00
|
|
|
goto out_put;
|
|
|
|
|
2023-06-09 07:28:00 +08:00
|
|
|
*is_idle = !thread__pid(thread) && !thread__tid(thread);
|
2019-07-10 16:58:08 +08:00
|
|
|
|
|
|
|
main_thread = thread__main_thread(machine, thread);
|
|
|
|
|
|
|
|
err = db_export__threads(dbe, thread, main_thread, machine, comm_ptr);
|
|
|
|
|
2023-06-09 07:28:00 +08:00
|
|
|
*db_id = thread__db_id(thread);
|
2019-07-10 16:58:08 +08:00
|
|
|
|
|
|
|
thread__put(main_thread);
|
|
|
|
out_put:
|
|
|
|
thread__put(thread);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__switch(struct db_export *dbe, union perf_event *event,
|
|
|
|
struct perf_sample *sample, struct machine *machine)
|
|
|
|
{
|
|
|
|
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
|
|
|
|
bool out_preempt = out &&
|
|
|
|
(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT);
|
|
|
|
int flags = out | (out_preempt << 1);
|
|
|
|
bool is_idle_a = false, is_idle_b = false;
|
|
|
|
u64 th_a_id = 0, th_b_id = 0;
|
|
|
|
u64 comm_out_id, comm_in_id;
|
|
|
|
struct comm *comm_a = NULL;
|
|
|
|
struct comm *comm_b = NULL;
|
|
|
|
u64 th_out_id, th_in_id;
|
|
|
|
u64 db_id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = db_export__machine(dbe, machine);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = db_export__pid_tid(dbe, machine, sample->pid, sample->tid,
|
|
|
|
&th_a_id, &comm_a, &is_idle_a);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
|
|
|
|
pid_t pid = event->context_switch.next_prev_pid;
|
|
|
|
pid_t tid = event->context_switch.next_prev_tid;
|
|
|
|
|
|
|
|
err = db_export__pid_tid(dbe, machine, pid, tid, &th_b_id,
|
|
|
|
&comm_b, &is_idle_b);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not export if both threads are unknown (i.e. not being traced),
|
|
|
|
* or one is unknown and the other is the idle task.
|
|
|
|
*/
|
|
|
|
if ((!th_a_id || is_idle_a) && (!th_b_id || is_idle_b))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
db_id = ++dbe->context_switch_last_db_id;
|
|
|
|
|
|
|
|
if (out) {
|
|
|
|
th_out_id = th_a_id;
|
|
|
|
th_in_id = th_b_id;
|
|
|
|
comm_out_id = comm_a ? comm_a->db_id : 0;
|
|
|
|
comm_in_id = comm_b ? comm_b->db_id : 0;
|
|
|
|
} else {
|
|
|
|
th_out_id = th_b_id;
|
|
|
|
th_in_id = th_a_id;
|
|
|
|
comm_out_id = comm_b ? comm_b->db_id : 0;
|
|
|
|
comm_in_id = comm_a ? comm_a->db_id : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dbe->export_context_switch)
|
|
|
|
return dbe->export_context_switch(dbe, db_id, machine, sample,
|
|
|
|
th_out_id, comm_out_id,
|
|
|
|
th_in_id, comm_in_id, flags);
|
|
|
|
return 0;
|
|
|
|
}
|