perf thread: Add accessor functions for thread

Using accessors will make it easier to add reference count checking in
later patches.

Committer notes:

thread->nsinfo wasn't wrapped as it is used together with
nsinfo__zput(), where does a trick to set the field with a refcount
being dropped to NULL, and that doesn't work well with using
thread__nsinfo(thread), that loses the &thread->nsinfo pointer.

When refcount checking is added to 'struct thread', later in this
series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to
check the thread pointer.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Brian Robbins <brianrob@linux.microsoft.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Fangrui Song <maskray@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Babrou <ivan@cloudflare.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Steinar H. Gunderson <sesse@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Wenyu Liu <liuwenyu7@huawei.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Ye Xingchen <ye.xingchen@zte.com.cn>
Cc: Yuan Can <yuancan@huawei.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2023-06-08 16:28:00 -07:00 committed by Arnaldo Carvalho de Melo
parent 7ee227f674
commit ee84a3032b
45 changed files with 485 additions and 279 deletions

View File

@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_ARM_SP];
map = maps__find(thread->maps, (u64)sp);
map = maps__find(thread__maps(thread), (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);

View File

@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_ARM64_SP];
map = maps__find(thread->maps, (u64)sp);
map = maps__find(thread__maps(thread), (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);

View File

@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_POWERPC_R1];
map = maps__find(thread->maps, (u64)sp);
map = maps__find(thread__maps(thread), (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);

View File

@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_X86_SP];
map = maps__find(thread->maps, (u64)sp);
map = maps__find(thread__maps(thread), (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);

View File

@ -293,7 +293,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
}
if (c2c.stitch_lbr)
al.thread->lbr_stitch_enable = true;
thread__set_lbr_stitch_enable(al.thread, true);
ret = sample__resolve_callchain(sample, &callchain_cursor, NULL,
evsel, &al, sysctl_perf_event_max_stack);
@ -1149,14 +1149,14 @@ pid_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
{
int width = c2c_width(fmt, hpp, he->hists);
return scnprintf(hpp->buf, hpp->size, "%*d", width, he->thread->pid_);
return scnprintf(hpp->buf, hpp->size, "%*d", width, thread__pid(he->thread));
}
static int64_t
pid_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return left->thread->pid_ - right->thread->pid_;
return thread__pid(left->thread) - thread__pid(right->thread);
}
static int64_t

View File

@ -417,7 +417,7 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename,
}
vdso = is_vdso_map(filename);
nsi = nsinfo__get(thread->nsinfo);
nsi = nsinfo__get(thread__nsinfo(thread));
if (vdso) {
/* The vdso maps are always on the host and not the

View File

@ -964,7 +964,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (perf_kmem__skip_sample(sample))
return 0;
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;

View File

@ -292,7 +292,7 @@ static int process_sample_event(struct perf_tool *tool,
}
if (rep->stitch_lbr)
al.thread->lbr_stitch_enable = true;
thread__set_lbr_stitch_enable(al.thread, true);
if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out_put;
@ -829,10 +829,10 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
return NULL;
/* Last one in the chain. */
if (thread->ppid == -1)
if (thread__ppid(thread) == -1)
return task;
parent_thread = machine__find_thread(machine, -1, thread->ppid);
parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
if (!parent_thread)
return ERR_PTR(-ENOENT);
@ -869,12 +869,12 @@ static void task__print_level(struct task *task, FILE *fp, int level)
struct thread *thread = task->thread;
struct task *child;
int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
thread->pid_, thread->tid, thread->ppid,
level, "");
thread__pid(thread), thread__tid(thread),
thread__ppid(thread), level, "");
fprintf(fp, "%s\n", thread__comm_str(thread));
maps__fprintf_task(thread->maps, comm_indent, fp);
maps__fprintf_task(thread__maps(thread), comm_indent, fp);
if (!list_empty(&task->children)) {
list_for_each_entry(child, &task->children, list)

View File

@ -916,12 +916,12 @@ static int replay_fork_event(struct perf_sched *sched,
if (verbose > 0) {
printf("fork event\n");
printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
}
register_pid(sched, parent->tid, thread__comm_str(parent));
register_pid(sched, child->tid, thread__comm_str(child));
register_pid(sched, thread__tid(parent), thread__comm_str(parent));
register_pid(sched, thread__tid(child), thread__comm_str(child));
out_put:
thread__put(child);
thread__put(parent);
@ -1316,7 +1316,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
if (!atoms) {
if (thread_atoms_insert(sched, migrant))
goto out_put;
register_pid(sched, migrant->tid, thread__comm_str(migrant));
register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
pr_err("migration-event: Internal tree error");
@ -1359,10 +1359,13 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
sched->all_runtime += work_list->total_runtime;
sched->all_count += work_list->nb_atoms;
if (work_list->num_merged > 1)
ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
else
ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
if (work_list->num_merged > 1) {
ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
work_list->num_merged);
} else {
ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
thread__tid(work_list->thread));
}
for (i = 0; i < 24 - ret; i++)
printf(" ");
@ -1380,11 +1383,15 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
{
pid_t l_tid, r_tid;
if (l->thread == r->thread)
return 0;
if (l->thread->tid < r->thread->tid)
l_tid = thread__tid(l->thread);
r_tid = thread__tid(r->thread);
if (l_tid < r_tid)
return -1;
if (l->thread->tid > r->thread->tid)
if (l_tid > r_tid)
return 1;
return (int)(l->thread - r->thread);
}
@ -1679,14 +1686,14 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
color_fprintf(stdout, color, " %12s secs ", stimestamp);
if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
const char *pid_color = color;
if (thread__has_color(sched_in))
pid_color = COLOR_PIDS;
color_fprintf(stdout, pid_color, "%s => %s:%d",
tr->shortname, thread__comm_str(sched_in), sched_in->tid);
tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
tr->comm_changed = false;
}
@ -1948,8 +1955,8 @@ static char *timehist_get_commstr(struct thread *thread)
{
static char str[32];
const char *comm = thread__comm_str(thread);
pid_t tid = thread->tid;
pid_t pid = thread->pid_;
pid_t tid = thread__tid(thread);
pid_t pid = thread__pid(thread);
int n;
if (pid == 0)
@ -2032,7 +2039,7 @@ static char task_state_char(struct thread *thread, int state)
unsigned bit = state ? ffs(state) : 0;
/* 'I' for idle */
if (thread->tid == 0)
if (thread__tid(thread) == 0)
return 'I';
return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
@ -2067,7 +2074,7 @@ static void timehist_print_sample(struct perf_sched *sched,
for (i = 0; i < max_cpus; ++i) {
/* flag idle times with 'i'; others are sched events */
if (i == sample->cpu)
c = (thread->tid == 0) ? 'i' : 's';
c = (thread__tid(thread) == 0) ? 'i' : 's';
else
c = ' ';
printf("%c", c);
@ -2094,7 +2101,7 @@ static void timehist_print_sample(struct perf_sched *sched,
if (sched->show_wakeups && !sched->show_next)
printf(" %-*s", comm_width, "");
if (thread->tid == 0)
if (thread__tid(thread) == 0)
goto out;
if (sched->show_callchain)
@ -2626,7 +2633,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
t = ptime->end;
}
if (!sched->idle_hist || thread->tid == 0) {
if (!sched->idle_hist || thread__tid(thread) == 0) {
if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
timehist_update_runtime_stats(tr, t, tprev);
@ -2634,7 +2641,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
struct idle_thread_runtime *itr = (void *)tr;
struct thread_runtime *last_tr;
BUG_ON(thread->tid != 0);
BUG_ON(thread__tid(thread) != 0);
if (itr->last_thread == NULL)
goto out;
@ -2719,7 +2726,7 @@ static void print_thread_runtime(struct thread *t,
float stddev;
printf("%*s %5d %9" PRIu64 " ",
comm_width, timehist_get_commstr(t), t->ppid,
comm_width, timehist_get_commstr(t), thread__ppid(t),
(u64) r->run_stats.n);
print_sched_time(r->total_run_time, 8);
@ -2739,7 +2746,7 @@ static void print_thread_waittime(struct thread *t,
struct thread_runtime *r)
{
printf("%*s %5d %9" PRIu64 " ",
comm_width, timehist_get_commstr(t), t->ppid,
comm_width, timehist_get_commstr(t), thread__ppid(t),
(u64) r->run_stats.n);
print_sched_time(r->total_run_time, 8);

View File

@ -1142,7 +1142,7 @@ static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
if (!al.map)
return 0;
ret = map__fprintf_srccode(al.map, al.addr, stdout,
&thread->srccode_state);
thread__srccode_state(thread));
if (ret)
ret += printf("\n");
return ret;
@ -1439,7 +1439,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
* The 'return' has already been popped off the stack so the depth has
* to be adjusted to match the 'call'.
*/
if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN)
if (thread__ts(thread) && sample->flags & PERF_IP_FLAG_RETURN)
depth += 1;
name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
@ -1577,7 +1577,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
printed += fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE)) {
int ret = map__fprintf_srccode(al->map, al->addr, stdout,
&thread->srccode_state);
thread__srccode_state(thread));
if (ret) {
printed += ret;
printed += printf("\n");
@ -2086,9 +2086,9 @@ static bool show_event(struct perf_sample *sample,
if (!symbol_conf.graph_function)
return true;
if (thread->filter) {
if (depth <= thread->filter_entry_depth) {
thread->filter = false;
if (thread__filter(thread)) {
if (depth <= thread__filter_entry_depth(thread)) {
thread__set_filter(thread, false);
return false;
}
return true;
@ -2105,8 +2105,8 @@ static bool show_event(struct perf_sample *sample,
while (*s) {
unsigned len = strcspn(s, ",");
if (nlen == len && !strncmp(name, s, len)) {
thread->filter = true;
thread->filter_entry_depth = depth;
thread__set_filter(thread, true);
thread__set_filter_entry_depth(thread, depth);
return true;
}
s += len;
@ -2186,7 +2186,7 @@ static void process_event(struct perf_script *script,
struct callchain_cursor *cursor = NULL;
if (script->stitch_lbr)
al->thread->lbr_stitch_enable = true;
thread__set_lbr_stitch_enable(al->thread, true);
if (symbol_conf.use_callchain && sample->callchain &&
thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
@ -2241,7 +2241,7 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(SRCCODE)) {
if (map__fprintf_srccode(al->map, al->addr, stdout,
&thread->srccode_state))
thread__srccode_state(thread)))
printf("\n");
}

View File

@ -777,7 +777,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
return;
if (top->stitch_lbr)
al.thread->lbr_stitch_enable = true;
thread__set_lbr_stitch_enable(al.thread, true);
if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&

View File

@ -1386,12 +1386,13 @@ static int thread__read_fd_path(struct thread *thread, int fd)
struct stat st;
int ret;
if (thread->pid_ == thread->tid) {
if (thread__pid(thread) == thread__tid(thread)) {
scnprintf(linkname, sizeof(linkname),
"/proc/%d/fd/%d", thread->pid_, fd);
"/proc/%d/fd/%d", thread__pid(thread), fd);
} else {
scnprintf(linkname, sizeof(linkname),
"/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
"/proc/%d/task/%d/fd/%d",
thread__pid(thread), thread__tid(thread), fd);
}
if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
@ -1559,7 +1560,7 @@ static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread
if (trace->multiple_threads) {
if (trace->show_comm)
printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
printed += fprintf(fp, "%d ", thread->tid);
printed += fprintf(fp, "%d ", thread__tid(thread));
}
return printed;
@ -2205,7 +2206,8 @@ static void thread__update_stats(struct thread *thread, struct thread_trace *ttr
memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
} else {
pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
thread__comm_str(thread), thread->pid_, thread->tid);
thread__comm_str(thread), thread__pid(thread),
thread__tid(thread));
return;
}
@ -2550,7 +2552,7 @@ errno_print: {
if (child != NULL) {
fprintf(trace->output, "%ld", ret);
if (child->comm_set)
if (thread__comm_set(child))
fprintf(trace->output, " (%s)", thread__comm_str(child));
thread__put(child);
}
@ -3616,14 +3618,16 @@ static int trace__set_filter_loop_pids(struct trace *trace)
struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
while (thread && nr < ARRAY_SIZE(pids)) {
struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
struct thread *parent = machine__find_thread(trace->host,
thread__ppid(thread),
thread__ppid(thread));
if (parent == NULL)
break;
if (!strcmp(thread__comm_str(parent), "sshd") ||
strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = parent->tid;
pids[nr++] = thread__tid(parent);
break;
}
thread = parent;
@ -4322,7 +4326,7 @@ static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trac
ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread));
printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
printed += fprintf(fp, "%.1f%%", ratio);
if (ttrace->pfmaj)
@ -4344,7 +4348,9 @@ static unsigned long thread__nr_events(struct thread_trace *ttrace)
return ttrace ? ttrace->nr_events : 0;
}
DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
DEFINE_RESORT_RB(threads,
(thread__nr_events(thread__priv(a->thread)) <
thread__nr_events(thread__priv(b->thread))),
struct thread *thread;
)
{

View File

@ -100,8 +100,8 @@ static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
if (!c)
return NULL;
if (c->sample->ip && !c->sample->insn_len && c->al->thread->maps) {
struct machine *machine = maps__machine(c->al->thread->maps);
if (c->sample->ip && !c->sample->insn_len && thread__maps(c->al->thread)) {
struct machine *machine = maps__machine(thread__maps(c->al->thread));
script_fetch_insn(c->sample, c->al->thread, machine);
}

View File

@ -269,7 +269,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
len = map__end(al.map) - addr;
/* Read the object code using perf */
ret_len = dso__data_read_offset(dso, maps__machine(thread->maps),
ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
al.addr, buf1, len);
if (ret_len != len) {
pr_debug("dso__data_read_offset failed\n");

View File

@ -211,7 +211,7 @@ void print_hists_out(struct hists *hists)
struct dso *dso = map__dso(he->ms.map);
pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n",
i, thread__comm_str(he->thread), he->thread->tid,
i, thread__comm_str(he->thread), thread__tid(he->thread),
dso->short_name,
he->ms.sym->name, he->stat.period,
he->stat_acc ? he->stat_acc->period : 0);

View File

@ -162,7 +162,6 @@ typedef int (*test_fn_t)(struct evsel *, struct machine *);
#define DSO(he) (map__dso(he->ms.map)->short_name)
#define SYM(he) (he->ms.sym->name)
#define CPU(he) (he->cpu)
#define PID(he) (he->thread->tid)
#define DEPTH(he) (he->callchain->max_depth)
#define CDSO(cl) (map__dso(cl->ms.map)->short_name)
#define CSYM(cl) (cl->ms.sym->name)

View File

@ -128,7 +128,7 @@ typedef int (*test_fn_t)(struct evsel *, struct machine *);
#define DSO(he) (map__dso(he->ms.map)->short_name)
#define SYM(he) (he->ms.sym->name)
#define CPU(he) (he->cpu)
#define PID(he) (he->thread->tid)
#define PID(he) (thread__tid(he->thread))
/* default sort keys (no field) */
static int test1(struct evsel *evsel, struct machine *machine)

View File

@ -7,16 +7,17 @@
# be in such tarball, which sometimes gets broken when we move files around,
# like when we made some files that were in tools/perf/ available to other tools/
# codebases by moving it to tools/include/, etc.
set -e
PERF=$1
cd ${PERF}/../..
make perf-targz-src-pkg > /dev/null
make perf-targz-src-pkg
TARBALL=$(ls -rt perf-*.tar.gz)
TMP_DEST=$(mktemp -d)
tar xf ${TARBALL} -C $TMP_DEST
rm -f ${TARBALL}
cd - > /dev/null
make -C $TMP_DEST/perf*/tools/perf > /dev/null
make -C $TMP_DEST/perf*/tools/perf
RC=$?
rm -rf ${TMP_DEST}
exit $RC

View File

@ -42,13 +42,13 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
TEST_ASSERT_VAL("failed to create threads",
leader && t1 && t2 && t3 && other);
maps = leader->maps;
maps = thread__maps(leader);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4);
/* test the maps pointer is shared */
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(t1->maps));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(t2->maps));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(t3->maps));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t1)));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t2)));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t3)));
/*
* Verify the other leader was created by previous call.
@ -70,10 +70,11 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
machine__remove_thread(machine, other);
machine__remove_thread(machine, other_leader);
other_maps = other->maps;
other_maps = thread__maps(other);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2);
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(other_maps) == RC_CHK_ACCESS(other_leader->maps));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(other_maps) ==
RC_CHK_ACCESS(thread__maps(other_leader)));
/* release thread group */
thread__put(t3);

View File

@ -8,10 +8,10 @@ size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg
struct thread *thread = machine__findnew_thread(trace->host, pid, pid);
if (thread != NULL) {
if (!thread->comm_set)
if (!thread__comm_set(thread))
thread__set_comm_from_proc(thread);
if (thread->comm_set)
if (thread__comm_set(thread))
printed += scnprintf(bf + printed, size - printed,
" (%s)", thread__comm_str(thread));
thread__put(thread);

View File

@ -2533,13 +2533,15 @@ do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
thread__zput(browser->hists->thread_filter);
ui_helpline__pop();
} else {
const char *comm_set_str =
thread__comm_set(thread) ? thread__comm_str(thread) : "";
if (hists__has(browser->hists, thread)) {
ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid);
comm_set_str, thread__tid(thread));
} else {
ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s thread\"",
thread->comm_set ? thread__comm_str(thread) : "");
comm_set_str);
}
browser->hists->thread_filter = thread__get(thread);
@ -2557,20 +2559,19 @@ add_thread_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct thread *thread)
{
int ret;
const char *comm_set_str, *in_out;
if ((!hists__has(browser->hists, thread) &&
!hists__has(browser->hists, comm)) || thread == NULL)
return 0;
in_out = browser->hists->thread_filter ? "out of" : "into";
comm_set_str = thread__comm_set(thread) ? thread__comm_str(thread) : "";
if (hists__has(browser->hists, thread)) {
ret = asprintf(optstr, "Zoom %s %s(%d) thread",
browser->hists->thread_filter ? "out of" : "into",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid);
in_out, comm_set_str, thread__tid(thread));
} else {
ret = asprintf(optstr, "Zoom %s %s thread",
browser->hists->thread_filter ? "out of" : "into",
thread->comm_set ? thread__comm_str(thread) : "");
ret = asprintf(optstr, "Zoom %s %s thread", in_out, comm_set_str);
}
if (ret < 0)
return 0;

View File

@ -885,7 +885,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
}
if (h->ms.map == NULL && verbose > 1) {
maps__fprintf(h->thread->maps, fp);
maps__fprintf(thread__maps(h->thread), fp);
fprintf(fp, "%.10s end\n", graph_dotted_line);
}
}

View File

@ -254,9 +254,9 @@ static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
}
if (speq->thread) {
speq->pid = speq->thread->pid_;
speq->pid = thread__pid(speq->thread);
if (queue->cpu == -1)
speq->cpu = speq->thread->cpu;
speq->cpu = thread__cpu(speq->thread);
}
}

View File

@ -1311,7 +1311,7 @@ static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
tidq->tid);
if (tidq->thread)
tidq->pid = tidq->thread->pid_;
tidq->pid = thread__pid(tidq->thread);
}
int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,

View File

@ -172,13 +172,13 @@ static int process_sample_event(struct perf_tool *tool,
output_json_format(out, false, 2, "{");
output_json_key_format(out, false, 3, "timestamp", "%" PRIi64, sample->time);
output_json_key_format(out, true, 3, "pid", "%i", al.thread->pid_);
output_json_key_format(out, true, 3, "tid", "%i", al.thread->tid);
output_json_key_format(out, true, 3, "pid", "%i", thread__pid(al.thread));
output_json_key_format(out, true, 3, "tid", "%i", thread__tid(al.thread));
if ((sample_type & PERF_SAMPLE_CPU))
output_json_key_format(out, true, 3, "cpu", "%i", sample->cpu);
else if (al.thread->cpu >= 0)
output_json_key_format(out, true, 3, "cpu", "%i", al.thread->cpu);
else if (thread__cpu(al.thread) >= 0)
output_json_key_format(out, true, 3, "cpu", "%i", thread__cpu(al.thread));
output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));

View File

@ -64,13 +64,13 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
{
u64 main_thread_db_id = 0;
if (thread->db_id)
if (thread__db_id(thread))
return 0;
thread->db_id = ++dbe->thread_last_db_id;
thread__set_db_id(thread, ++dbe->thread_last_db_id);
if (main_thread)
main_thread_db_id = main_thread->db_id;
main_thread_db_id = thread__db_id(main_thread);
if (dbe->export_thread)
return dbe->export_thread(dbe, thread, main_thread_db_id,
@ -251,7 +251,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
*/
al.sym = node->ms.sym;
al.map = node->ms.map;
al.maps = thread->maps;
al.maps = thread__maps(thread);
al.addr = node->ip;
if (al.map && !al.sym)
@ -321,7 +321,7 @@ static int db_export__threads(struct db_export *dbe, struct thread *thread,
* For a non-main thread, db_export__comm_thread() must be
* called only if thread has not previously been exported.
*/
bool export_comm_thread = comm && !thread->db_id;
bool export_comm_thread = comm && !thread__db_id(thread);
err = db_export__thread(dbe, thread, machine, main_thread);
if (err)
@ -529,16 +529,16 @@ static int db_export__pid_tid(struct db_export *dbe, struct machine *machine,
struct thread *main_thread;
int err = 0;
if (!thread || !thread->comm_set)
if (!thread || !thread__comm_set(thread))
goto out_put;
*is_idle = !thread->pid_ && !thread->tid;
*is_idle = !thread__pid(thread) && !thread__tid(thread);
main_thread = thread__main_thread(machine, thread);
err = db_export__threads(dbe, thread, main_thread, machine, comm_ptr);
*db_id = thread->db_id;
*db_id = thread__db_id(thread);
thread__put(main_thread);
out_put:

View File

@ -197,8 +197,8 @@ static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
return NULL;
if (al->thread->maps) {
struct machine *machine = maps__machine(al->thread->maps);
if (thread__maps(al->thread)) {
struct machine *machine = maps__machine(thread__maps(al->thread));
if (machine)
script_fetch_insn(d->sample, al->thread, machine);

View File

@ -573,7 +573,7 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct maps *maps = thread->maps;
struct maps *maps = thread__maps(thread);
struct machine *machine = maps__machine(maps);
bool load_map = false;
@ -639,7 +639,7 @@ struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct map *map = thread__find_map(thread, cpumode, addr, al);
struct machine *machine = maps__machine(thread->maps);
struct machine *machine = maps__machine(thread__maps(thread));
u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
if (map || addr_cpumode == cpumode)
@ -696,7 +696,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
if (thread == NULL)
return -1;
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
thread__find_map(thread, sample->cpumode, sample->ip, al);
dso = al->map ? map__dso(al->map) : NULL;
dump_printf(" ...... dso: %s\n",

View File

@ -2778,12 +2778,12 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
if (hists__has(hists, thread)) {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
(thread->comm_set ? thread__comm_str(thread) : ""),
thread->tid);
(thread__comm_set(thread) ? thread__comm_str(thread) : ""),
thread__tid(thread));
} else {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s",
(thread->comm_set ? thread__comm_str(thread) : ""));
(thread__comm_set(thread) ? thread__comm_str(thread) : ""));
}
}
if (dso)

View File

@ -456,7 +456,7 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
thread = machine__find_thread(btsq->bts->machine, -1,
btsq->tid);
if (thread)
btsq->pid = thread->pid_;
btsq->pid = thread__pid(thread);
} else {
thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
btsq->tid);

View File

@ -1428,13 +1428,13 @@ static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
ptq->guest_machine = machine;
}
vcpu = ptq->thread ? ptq->thread->guest_cpu : -1;
vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
if (vcpu < 0)
return -1;
tid = machine__get_current_tid(machine, vcpu);
if (ptq->guest_thread && ptq->guest_thread->tid != tid)
if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
thread__zput(ptq->guest_thread);
if (!ptq->guest_thread) {
@ -1444,7 +1444,7 @@ static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
}
ptq->guest_machine_pid = machine_pid;
ptq->guest_pid = ptq->guest_thread->pid_;
ptq->guest_pid = thread__pid(ptq->guest_thread);
ptq->guest_tid = tid;
ptq->vcpu = vcpu;
@ -1467,9 +1467,9 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
if (ptq->thread) {
ptq->pid = ptq->thread->pid_;
ptq->pid = thread__pid(ptq->thread);
if (queue->cpu == -1)
ptq->cpu = ptq->thread->cpu;
ptq->cpu = thread__cpu(ptq->thread);
}
if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
@ -3074,7 +3074,7 @@ static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
if (ptq->pid == -1) {
ptq->thread = machine__find_thread(m, -1, ptq->tid);
if (ptq->thread)
ptq->pid = ptq->thread->pid_;
ptq->pid = thread__pid(ptq->thread);
return;
}

View File

@ -799,17 +799,19 @@ static void jit_add_pid(struct machine *machine, pid_t pid)
return;
}
thread->priv = (void *)1;
thread__set_priv(thread, (void *)true);
}
static bool jit_has_pid(struct machine *machine, pid_t pid)
{
struct thread *thread = machine__find_thread(machine, pid, pid);
void *priv;
if (!thread)
return 0;
return false;
return (bool)thread->priv;
priv = thread__priv(thread);
return (bool)priv;
}
int
@ -833,7 +835,7 @@ jit_process(struct perf_session *session,
return 0;
}
nsi = nsinfo__get(thread->nsinfo);
nsi = nsinfo__get(thread__nsinfo(thread));
thread__put(thread);
/*

View File

@ -77,13 +77,14 @@ static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
{
int to_find = (int) *((pid_t *)key);
return to_find - (int)rb_entry(nd, struct thread_rb_node, rb_node)->thread->tid;
return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
}
static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
struct rb_root *tree)
{
struct rb_node *nd = rb_find(&th->tid, tree, thread_rb_node__cmp_tid);
pid_t to_find = thread__tid(th);
struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
return rb_entry(nd, struct thread_rb_node, rb_node);
}
@ -440,7 +441,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
return NULL;
/* Assume maps are set up if there are any */
if (maps__nr_maps(thread->maps))
if (maps__nr_maps(thread__maps(thread)))
return thread;
host_thread = machine__find_thread(host_machine, -1, pid);
@ -453,7 +454,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
* Guest code can be found in hypervisor process at the same address
* so copy host maps.
*/
err = maps__clone(thread, host_thread->maps);
err = maps__clone(thread, thread__maps(host_thread));
thread__put(host_thread);
if (err)
goto out_err;
@ -518,45 +519,45 @@ static void machine__update_thread_pid(struct machine *machine,
{
struct thread *leader;
if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
return;
th->pid_ = pid;
thread__set_pid(th, pid);
if (th->pid_ == th->tid)
if (thread__pid(th) == thread__tid(th))
return;
leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
if (!leader)
goto out_err;
if (!leader->maps)
leader->maps = maps__new(machine);
if (!thread__maps(leader))
thread__set_maps(leader, maps__new(machine));
if (!leader->maps)
if (!thread__maps(leader))
goto out_err;
if (th->maps == leader->maps)
if (thread__maps(th) == thread__maps(leader))
return;
if (th->maps) {
if (thread__maps(th)) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
if (!maps__empty(th->maps))
if (!maps__empty(thread__maps(th)))
pr_err("Discarding thread maps for %d:%d\n",
th->pid_, th->tid);
maps__put(th->maps);
thread__pid(th), thread__tid(th));
maps__put(thread__maps(th));
}
th->maps = maps__get(leader->maps);
thread__set_maps(th, maps__get(thread__maps(leader)));
out_put:
thread__put(leader);
return;
out_err:
pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
goto out_put;
}
@ -573,7 +574,7 @@ __threads__get_last_match(struct threads *threads, struct machine *machine,
th = threads->last_match;
if (th != NULL) {
if (th->tid == tid) {
if (thread__tid(th) == tid) {
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
@ -632,13 +633,13 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
parent = *p;
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
if (th->tid == tid) {
if (thread__tid(th) == tid) {
threads__set_last_match(threads, th);
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
if (tid < th->tid)
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
@ -2049,7 +2050,7 @@ out_problem:
static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
struct thread *th, bool lock)
{
struct threads *threads = machine__threads(machine, th->tid);
struct threads *threads = machine__threads(machine, thread__tid(th));
if (!nd)
nd = thread_rb_node__find(th, &threads->entries.rb_root);
@ -2060,7 +2061,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread_rb_n
if (lock)
down_write(&threads->lock);
BUG_ON(refcount_read(&th->refcnt) == 0);
BUG_ON(refcount_read(thread__refcnt(th)) == 0);
thread__put(nd->thread);
rb_erase_cached(&nd->rb_node, &threads->entries);
@ -2099,9 +2100,9 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
* (fork) event that would have removed the thread was lost. Assume the
* latter case and continue on as best we can.
*/
if (parent->pid_ != (pid_t)event->fork.ppid) {
if (thread__pid(parent) != (pid_t)event->fork.ppid) {
dump_printf("removing erroneous parent thread %d/%d\n",
parent->pid_, parent->tid);
thread__pid(parent), thread__tid(parent));
machine__remove_thread(machine, parent);
thread__put(parent);
parent = machine__findnew_thread(machine, event->fork.ppid,
@ -2511,7 +2512,7 @@ static void save_lbr_cursor_node(struct thread *thread,
struct callchain_cursor *cursor,
int idx)
{
struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
if (!lbr_stitch)
return;
@ -2553,7 +2554,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
* in callchain_cursor_commit() when the writing session is closed.
* Using curr and pos to track the current cursor node.
*/
if (thread->lbr_stitch) {
if (thread__lbr_stitch(thread)) {
cursor->curr = NULL;
cursor->pos = cursor->nr;
if (cursor->nr) {
@ -2581,7 +2582,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
* But does not need to save current cursor node for entry 0.
* It's impossible to stitch the whole LBRs of previous sample.
*/
if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
if (!cursor->curr)
cursor->curr = cursor->first;
else
@ -2634,7 +2635,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
struct callchain_cursor *cursor)
{
struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct callchain_cursor_node *cnode;
struct stitch_list *stitch_node;
int err;
@ -2658,7 +2659,7 @@ static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
static struct stitch_list *get_stitch_node(struct thread *thread)
{
struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct stitch_list *stitch_node;
if (!list_empty(&lbr_stitch->free_lists)) {
@ -2682,7 +2683,7 @@ static bool has_stitched_lbr(struct thread *thread,
struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
struct branch_stack *prev_stack = prev->branch_stack;
struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
int i, j, nr_identical_branches = 0;
struct stitch_list *stitch_node;
u64 cur_base, distance;
@ -2746,27 +2747,29 @@ static bool has_stitched_lbr(struct thread *thread,
static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
{
if (thread->lbr_stitch)
if (thread__lbr_stitch(thread))
return true;
thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
if (!thread->lbr_stitch)
thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
if (!thread__lbr_stitch(thread))
goto err;
thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
if (!thread->lbr_stitch->prev_lbr_cursor)
thread__lbr_stitch(thread)->prev_lbr_cursor =
calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
goto free_lbr_stitch;
INIT_LIST_HEAD(&thread->lbr_stitch->lists);
INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
return true;
free_lbr_stitch:
zfree(&thread->lbr_stitch);
free(thread__lbr_stitch(thread));
thread__set_lbr_stitch(thread, NULL);
err:
pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
thread->lbr_stitch_enable = false;
thread__set_lbr_stitch_enable(thread, false);
return false;
}
@ -2802,9 +2805,9 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
if (i == chain_nr)
return 0;
if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
(max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
lbr_stitch = thread->lbr_stitch;
lbr_stitch = thread__lbr_stitch(thread);
stitched_lbr = has_stitched_lbr(thread, sample,
&lbr_stitch->prev_sample,
@ -2884,7 +2887,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
static u64 get_leaf_frame_caller(struct perf_sample *sample,
struct thread *thread, int usr_idx)
{
if (machine__normalized_is(maps__machine(thread->maps), "arm64"))
if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
else
return 0;
@ -3265,7 +3268,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
if (!thread)
return -ENOMEM;
thread->cpu = cpu;
thread__set_cpu(thread, cpu);
thread__put(thread);
return 0;

View File

@ -137,7 +137,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
no_dso = is_no_dso_memory(filename);
map->prot = prot;
map->flags = flags;
nsi = nsinfo__get(thread->nsinfo);
nsi = nsinfo__get(thread__nsinfo(thread));
if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
snprintf(newfilename, sizeof(newfilename),

View File

@ -384,7 +384,7 @@ put_map:
*/
int maps__clone(struct thread *thread, struct maps *parent)
{
struct maps *maps = thread->maps;
struct maps *maps = thread__maps(thread);
int err;
struct map_rb_node *rb_node;

View File

@ -1163,11 +1163,11 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
t = tuple_new(5);
tuple_set_d64(t, 0, thread->db_id);
tuple_set_d64(t, 0, thread__db_id(thread));
tuple_set_d64(t, 1, machine->db_id);
tuple_set_d64(t, 2, main_thread_db_id);
tuple_set_s32(t, 3, thread->pid_);
tuple_set_s32(t, 4, thread->tid);
tuple_set_s32(t, 3, thread__pid(thread));
tuple_set_s32(t, 4, thread__tid(thread));
call_object(tables->thread_handler, t, "thread_table");
@ -1186,7 +1186,7 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
tuple_set_d64(t, 0, comm->db_id);
tuple_set_string(t, 1, comm__str(comm));
tuple_set_d64(t, 2, thread->db_id);
tuple_set_d64(t, 2, thread__db_id(thread));
tuple_set_d64(t, 3, comm->start);
tuple_set_s32(t, 4, comm->exec);
@ -1207,7 +1207,7 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
tuple_set_d64(t, 0, db_id);
tuple_set_d64(t, 1, comm->db_id);
tuple_set_d64(t, 2, thread->db_id);
tuple_set_d64(t, 2, thread__db_id(thread));
call_object(tables->comm_thread_handler, t, "comm_thread_table");
@ -1292,7 +1292,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->db_id);
tuple_set_d64(t, 2, maps__machine(es->al->maps)->db_id);
tuple_set_d64(t, 3, es->al->thread->db_id);
tuple_set_d64(t, 3, thread__db_id(es->al->thread));
tuple_set_d64(t, 4, es->comm_db_id);
tuple_set_d64(t, 5, es->dso_db_id);
tuple_set_d64(t, 6, es->sym_db_id);
@ -1382,7 +1382,7 @@ static int python_export_call_return(struct db_export *dbe,
t = tuple_new(14);
tuple_set_d64(t, 0, cr->db_id);
tuple_set_d64(t, 1, cr->thread->db_id);
tuple_set_d64(t, 1, thread__db_id(cr->thread));
tuple_set_d64(t, 2, comm_db_id);
tuple_set_d64(t, 3, cr->cp->db_id);
tuple_set_d64(t, 4, cr->call_time);

View File

@ -2807,7 +2807,7 @@ static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
if (!thread)
return -ENOMEM;
thread->guest_cpu = guest_cpu;
thread__set_guest_cpu(thread, guest_cpu);
thread__put(thread);
return 0;

View File

@ -108,7 +108,7 @@ static int64_t cmp_null(const void *l, const void *r)
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->thread->tid - left->thread->tid;
return thread__tid(right->thread) - thread__tid(left->thread);
}
static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
@ -117,7 +117,7 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
const char *comm = thread__comm_str(he->thread);
width = max(7U, width) - 8;
return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
width, width, comm ?: "");
}
@ -1543,8 +1543,10 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
!l_dso->id.ino && !l_dso->id.ino_generation) {
/* userspace anonymous */
if (left->thread->pid_ > right->thread->pid_) return -1;
if (left->thread->pid_ < right->thread->pid_) return 1;
if (thread__pid(left->thread) > thread__pid(right->thread))
return -1;
if (thread__pid(left->thread) < thread__pid(right->thread))
return 1;
}
addr:

View File

@ -112,7 +112,7 @@ struct thread_stack {
*/
static inline bool thread_stack__per_cpu(struct thread *thread)
{
return !(thread->tid || thread->pid_);
return !(thread__tid(thread) || thread__pid(thread));
}
static int thread_stack__grow(struct thread_stack *ts)
@ -155,8 +155,8 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
ts->br_stack_sz = br_stack_sz;
}
if (thread->maps && maps__machine(thread->maps)) {
struct machine *machine = maps__machine(thread->maps);
if (thread__maps(thread) && maps__machine(thread__maps(thread))) {
struct machine *machine = maps__machine(thread__maps(thread));
const char *arch = perf_env__arch(machine->env);
ts->kernel_start = machine__kernel_start(machine);
@ -175,7 +175,7 @@ static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
bool callstack,
unsigned int br_stack_sz)
{
struct thread_stack *ts = thread->ts, *new_ts;
struct thread_stack *ts = thread__ts(thread), *new_ts;
unsigned int old_sz = ts ? ts->arr_sz : 0;
unsigned int new_sz = 1;
@ -189,8 +189,8 @@ static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
if (ts)
memcpy(new_ts, ts, old_sz * sizeof(*ts));
new_ts->arr_sz = new_sz;
zfree(&thread->ts);
thread->ts = new_ts;
free(thread__ts(thread));
thread__set_ts(thread, new_ts);
ts = new_ts;
}
@ -207,7 +207,7 @@ static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
{
struct thread_stack *ts = thread->ts;
struct thread_stack *ts = thread__ts(thread);
if (cpu < 0)
cpu = 0;
@ -232,7 +232,7 @@ static inline struct thread_stack *thread__stack(struct thread *thread,
if (thread_stack__per_cpu(thread))
return thread__cpu_stack(thread, cpu);
return thread->ts;
return thread__ts(thread);
}
static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
@ -363,7 +363,7 @@ static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
int thread_stack__flush(struct thread *thread)
{
struct thread_stack *ts = thread->ts;
struct thread_stack *ts = thread__ts(thread);
unsigned int pos;
int err = 0;
@ -502,13 +502,14 @@ static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
void thread_stack__free(struct thread *thread)
{
struct thread_stack *ts = thread->ts;
struct thread_stack *ts = thread__ts(thread);
unsigned int pos;
if (ts) {
for (pos = 0; pos < ts->arr_sz; pos++)
__thread_stack__free(thread, ts + pos);
zfree(&thread->ts);
free(thread__ts(thread));
thread__set_ts(thread, NULL);
}
}
@ -1127,7 +1128,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
ts->rstate = X86_RETPOLINE_POSSIBLE;
/* Flush stack on exec */
if (ts->comm != comm && thread->pid_ == thread->tid) {
if (ts->comm != comm && thread__pid(thread) == thread__tid(thread)) {
err = __thread_stack__flush(thread, ts);
if (err)
return err;

View File

@ -21,19 +21,20 @@
int thread__init_maps(struct thread *thread, struct machine *machine)
{
pid_t pid = thread->pid_;
pid_t pid = thread__pid(thread);
if (pid == thread->tid || pid == -1) {
thread->maps = maps__new(machine);
if (pid == thread__tid(thread) || pid == -1) {
thread__set_maps(thread, maps__new(machine));
} else {
struct thread *leader = __machine__findnew_thread(machine, pid, pid);
if (leader) {
thread->maps = maps__get(leader->maps);
thread__set_maps(thread, maps__get(thread__maps(leader)));
thread__put(leader);
}
}
return thread->maps ? 0 : -1;
return thread__maps(thread) ? 0 : -1;
}
struct thread *thread__new(pid_t pid, pid_t tid)
@ -43,16 +44,16 @@ struct thread *thread__new(pid_t pid, pid_t tid)
struct thread *thread = zalloc(sizeof(*thread));
if (thread != NULL) {
thread->pid_ = pid;
thread->tid = tid;
thread->ppid = -1;
thread->cpu = -1;
thread->guest_cpu = -1;
thread->lbr_stitch_enable = false;
INIT_LIST_HEAD(&thread->namespaces_list);
INIT_LIST_HEAD(&thread->comm_list);
init_rwsem(&thread->namespaces_lock);
init_rwsem(&thread->comm_lock);
thread__set_pid(thread, pid);
thread__set_tid(thread, tid);
thread__set_ppid(thread, -1);
thread__set_cpu(thread, -1);
thread__set_guest_cpu(thread, -1);
thread__set_lbr_stitch_enable(thread, false);
INIT_LIST_HEAD(thread__namespaces_list(thread));
INIT_LIST_HEAD(thread__comm_list(thread));
init_rwsem(thread__namespaces_lock(thread));
init_rwsem(thread__comm_lock(thread));
comm_str = malloc(32);
if (!comm_str)
@ -64,11 +65,11 @@ struct thread *thread__new(pid_t pid, pid_t tid)
if (!comm)
goto err_thread;
list_add(&comm->list, &thread->comm_list);
refcount_set(&thread->refcnt, 1);
list_add(&comm->list, thread__comm_list(thread));
refcount_set(thread__refcnt(thread), 1);
/* Thread holds first ref to nsdata. */
thread->nsinfo = nsinfo__new(pid);
srccode_state_init(&thread->srccode_state);
srccode_state_init(thread__srccode_state(thread));
}
return thread;
@ -85,30 +86,30 @@ void thread__delete(struct thread *thread)
thread_stack__free(thread);
if (thread->maps) {
maps__put(thread->maps);
thread->maps = NULL;
if (thread__maps(thread)) {
maps__put(thread__maps(thread));
thread__set_maps(thread, NULL);
}
down_write(&thread->namespaces_lock);
down_write(thread__namespaces_lock(thread));
list_for_each_entry_safe(namespaces, tmp_namespaces,
&thread->namespaces_list, list) {
thread__namespaces_list(thread), list) {
list_del_init(&namespaces->list);
namespaces__free(namespaces);
}
up_write(&thread->namespaces_lock);
up_write(thread__namespaces_lock(thread));
down_write(&thread->comm_lock);
list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
down_write(thread__comm_lock(thread));
list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
list_del_init(&comm->list);
comm__free(comm);
}
up_write(&thread->comm_lock);
up_write(thread__comm_lock(thread));
nsinfo__zput(thread->nsinfo);
srccode_state_free(&thread->srccode_state);
srccode_state_free(thread__srccode_state(thread));
exit_rwsem(&thread->namespaces_lock);
exit_rwsem(&thread->comm_lock);
exit_rwsem(thread__namespaces_lock(thread));
exit_rwsem(thread__comm_lock(thread));
thread__free_stitch_list(thread);
free(thread);
}
@ -116,31 +117,31 @@ void thread__delete(struct thread *thread)
struct thread *thread__get(struct thread *thread)
{
if (thread)
refcount_inc(&thread->refcnt);
refcount_inc(thread__refcnt(thread));
return thread;
}
void thread__put(struct thread *thread)
{
if (thread && refcount_dec_and_test(&thread->refcnt))
if (thread && refcount_dec_and_test(thread__refcnt(thread)))
thread__delete(thread);
}
static struct namespaces *__thread__namespaces(const struct thread *thread)
static struct namespaces *__thread__namespaces(struct thread *thread)
{
if (list_empty(&thread->namespaces_list))
if (list_empty(thread__namespaces_list(thread)))
return NULL;
return list_first_entry(&thread->namespaces_list, struct namespaces, list);
return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
}
struct namespaces *thread__namespaces(struct thread *thread)
{
struct namespaces *ns;
down_read(&thread->namespaces_lock);
down_read(thread__namespaces_lock(thread));
ns = __thread__namespaces(thread);
up_read(&thread->namespaces_lock);
up_read(thread__namespaces_lock(thread));
return ns;
}
@ -154,7 +155,7 @@ static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
if (!new)
return -ENOMEM;
list_add(&new->list, &thread->namespaces_list);
list_add(&new->list, thread__namespaces_list(thread));
if (timestamp && curr) {
/*
@ -174,25 +175,25 @@ int thread__set_namespaces(struct thread *thread, u64 timestamp,
{
int ret;
down_write(&thread->namespaces_lock);
down_write(thread__namespaces_lock(thread));
ret = __thread__set_namespaces(thread, timestamp, event);
up_write(&thread->namespaces_lock);
up_write(thread__namespaces_lock(thread));
return ret;
}
struct comm *thread__comm(const struct thread *thread)
struct comm *thread__comm(struct thread *thread)
{
if (list_empty(&thread->comm_list))
if (list_empty(thread__comm_list(thread)))
return NULL;
return list_first_entry(&thread->comm_list, struct comm, list);
return list_first_entry(thread__comm_list(thread), struct comm, list);
}
struct comm *thread__exec_comm(const struct thread *thread)
struct comm *thread__exec_comm(struct thread *thread)
{
struct comm *comm, *last = NULL, *second_last = NULL;
list_for_each_entry(comm, &thread->comm_list, list) {
list_for_each_entry(comm, thread__comm_list(thread), list) {
if (comm->exec)
return comm;
second_last = last;
@ -205,7 +206,7 @@ struct comm *thread__exec_comm(const struct thread *thread)
* thread, that is very probably wrong. Prefer a later comm to avoid
* that case.
*/
if (second_last && !last->start && thread->pid_ == thread->tid)
if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
return second_last;
return last;
@ -217,7 +218,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str,
struct comm *new, *curr = thread__comm(thread);
/* Override the default :tid entry */
if (!thread->comm_set) {
if (!thread__comm_set(thread)) {
int err = comm__override(curr, str, timestamp, exec);
if (err)
return err;
@ -225,13 +226,13 @@ static int ____thread__set_comm(struct thread *thread, const char *str,
new = comm__new(str, timestamp, exec);
if (!new)
return -ENOMEM;
list_add(&new->list, &thread->comm_list);
list_add(&new->list, thread__comm_list(thread));
if (exec)
unwind__flush_access(thread->maps);
unwind__flush_access(thread__maps(thread));
}
thread->comm_set = true;
thread__set_comm_set(thread, true);
return 0;
}
@ -241,9 +242,9 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
{
int ret;
down_write(&thread->comm_lock);
down_write(thread__comm_lock(thread));
ret = ____thread__set_comm(thread, str, timestamp, exec);
up_write(&thread->comm_lock);
up_write(thread__comm_lock(thread));
return ret;
}
@ -255,7 +256,7 @@ int thread__set_comm_from_proc(struct thread *thread)
int err = -1;
if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
thread->pid_, thread->tid) >= (int)sizeof(path)) &&
thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
procfs__read_str(path, &comm, &sz) == 0) {
comm[sz - 1] = '\0';
err = thread__set_comm(thread, comm, 0);
@ -264,7 +265,7 @@ int thread__set_comm_from_proc(struct thread *thread)
return err;
}
static const char *__thread__comm_str(const struct thread *thread)
static const char *__thread__comm_str(struct thread *thread)
{
const struct comm *comm = thread__comm(thread);
@ -278,9 +279,9 @@ const char *thread__comm_str(struct thread *thread)
{
const char *str;
down_read(&thread->comm_lock);
down_read(thread__comm_lock(thread));
str = __thread__comm_str(thread);
up_read(&thread->comm_lock);
up_read(thread__comm_lock(thread));
return str;
}
@ -289,23 +290,23 @@ static int __thread__comm_len(struct thread *thread, const char *comm)
{
if (!comm)
return 0;
thread->comm_len = strlen(comm);
thread__set_comm_len(thread, strlen(comm));
return thread->comm_len;
return thread__var_comm_len(thread);
}
/* CHECKME: it should probably better return the max comm len from its comm list */
int thread__comm_len(struct thread *thread)
{
int comm_len = thread->comm_len;
int comm_len = thread__var_comm_len(thread);
if (!comm_len) {
const char *comm;
down_read(&thread->comm_lock);
down_read(thread__comm_lock(thread));
comm = __thread__comm_str(thread);
comm_len = __thread__comm_len(thread, comm);
up_read(&thread->comm_lock);
up_read(thread__comm_lock(thread));
}
return comm_len;
@ -313,33 +314,33 @@ int thread__comm_len(struct thread *thread)
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
maps__fprintf(thread->maps, fp);
return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
maps__fprintf(thread__maps(thread), fp);
}
int thread__insert_map(struct thread *thread, struct map *map)
{
int ret;
ret = unwind__prepare_access(thread->maps, map, NULL);
ret = unwind__prepare_access(thread__maps(thread), map, NULL);
if (ret)
return ret;
maps__fixup_overlappings(thread->maps, map, stderr);
return maps__insert(thread->maps, map);
maps__fixup_overlappings(thread__maps(thread), map, stderr);
return maps__insert(thread__maps(thread), map);
}
static int __thread__prepare_access(struct thread *thread)
{
bool initialized = false;
int err = 0;
struct maps *maps = thread->maps;
struct maps *maps = thread__maps(thread);
struct map_rb_node *rb_node;
down_read(maps__lock(maps));
maps__for_each_entry(maps, rb_node) {
err = unwind__prepare_access(thread->maps, rb_node->map, &initialized);
err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
if (err || initialized)
break;
}
@ -362,21 +363,22 @@ static int thread__prepare_access(struct thread *thread)
static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
{
/* This is new thread, we share map groups for process. */
if (thread->pid_ == parent->pid_)
if (thread__pid(thread) == thread__pid(parent))
return thread__prepare_access(thread);
if (thread->maps == parent->maps) {
if (thread__maps(thread) == thread__maps(parent)) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
thread->pid_, thread->tid, parent->pid_, parent->tid);
thread__pid(thread), thread__tid(thread),
thread__pid(parent), thread__tid(parent));
return 0;
}
/* But this one is new process, copy maps. */
return do_maps_clone ? maps__clone(thread, parent->maps) : 0;
return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
}
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
{
if (parent->comm_set) {
if (thread__comm_set(parent)) {
const char *comm = thread__comm_str(parent);
int err;
if (!comm)
@ -386,7 +388,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bo
return err;
}
thread->ppid = parent->tid;
thread__set_ppid(thread, thread__tid(parent));
return thread__clone_maps(thread, parent, do_maps_clone);
}
@ -410,13 +412,13 @@ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
{
if (thread->pid_ == thread->tid)
if (thread__pid(thread) == thread__tid(thread))
return thread__get(thread);
if (thread->pid_ == -1)
if (thread__pid(thread) == -1)
return NULL;
return machine__find_thread(machine, thread->pid_, thread->pid_);
return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
}
int thread__memcpy(struct thread *thread, struct machine *machine,
@ -447,7 +449,7 @@ int thread__memcpy(struct thread *thread, struct machine *machine,
void thread__free_stitch_list(struct thread *thread)
{
struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct stitch_list *pos, *tmp;
if (!lbr_stitch)
@ -464,5 +466,6 @@ void thread__free_stitch_list(struct thread *thread)
}
zfree(&lbr_stitch->prev_lbr_cursor);
zfree(&thread->lbr_stitch);
free(thread__lbr_stitch(thread));
thread__set_lbr_stitch(thread, NULL);
}

View File

@ -96,8 +96,8 @@ static inline int thread__set_comm(struct thread *thread, const char *comm,
int thread__set_comm_from_proc(struct thread *thread);
int thread__comm_len(struct thread *thread);
struct comm *thread__comm(const struct thread *thread);
struct comm *thread__exec_comm(const struct thread *thread);
struct comm *thread__comm(struct thread *thread);
struct comm *thread__exec_comm(struct thread *thread);
const char *thread__comm_str(struct thread *thread);
int thread__insert_map(struct thread *thread, struct map *map);
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone);
@ -121,6 +121,126 @@ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
int thread__memcpy(struct thread *thread, struct machine *machine,
void *buf, u64 ip, int len, bool *is64bit);
static inline struct maps *thread__maps(struct thread *thread)
{
return thread->maps;
}
static inline void thread__set_maps(struct thread *thread, struct maps *maps)
{
thread->maps = maps;
}
static inline pid_t thread__pid(const struct thread *thread)
{
return thread->pid_;
}
static inline void thread__set_pid(struct thread *thread, pid_t pid_)
{
thread->pid_ = pid_;
}
static inline pid_t thread__tid(const struct thread *thread)
{
return thread->tid;
}
static inline void thread__set_tid(struct thread *thread, pid_t tid)
{
thread->tid = tid;
}
static inline pid_t thread__ppid(const struct thread *thread)
{
return thread->ppid;
}
static inline void thread__set_ppid(struct thread *thread, pid_t ppid)
{
thread->ppid = ppid;
}
static inline int thread__cpu(const struct thread *thread)
{
return thread->cpu;
}
static inline void thread__set_cpu(struct thread *thread, int cpu)
{
thread->cpu = cpu;
}
static inline int thread__guest_cpu(const struct thread *thread)
{
return thread->guest_cpu;
}
static inline void thread__set_guest_cpu(struct thread *thread, int guest_cpu)
{
thread->guest_cpu = guest_cpu;
}
static inline refcount_t *thread__refcnt(struct thread *thread)
{
return &thread->refcnt;
}
static inline bool thread__comm_set(const struct thread *thread)
{
return thread->comm_set;
}
static inline void thread__set_comm_set(struct thread *thread, bool set)
{
thread->comm_set = set;
}
static inline int thread__var_comm_len(const struct thread *thread)
{
return thread->comm_len;
}
static inline void thread__set_comm_len(struct thread *thread, int len)
{
thread->comm_len = len;
}
static inline struct list_head *thread__namespaces_list(struct thread *thread)
{
return &thread->namespaces_list;
}
static inline int thread__namespaces_list_empty(const struct thread *thread)
{
return list_empty(&thread->namespaces_list);
}
static inline struct rw_semaphore *thread__namespaces_lock(struct thread *thread)
{
return &thread->namespaces_lock;
}
static inline struct list_head *thread__comm_list(struct thread *thread)
{
return &thread->comm_list;
}
static inline struct rw_semaphore *thread__comm_lock(struct thread *thread)
{
return &thread->comm_lock;
}
static inline u64 thread__db_id(const struct thread *thread)
{
return thread->db_id;
}
static inline void thread__set_db_id(struct thread *thread, u64 db_id)
{
thread->db_id = db_id;
}
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
@ -131,6 +251,66 @@ static inline void thread__set_priv(struct thread *thread, void *p)
thread->priv = p;
}
static inline struct thread_stack *thread__ts(struct thread *thread)
{
return thread->ts;
}
static inline void thread__set_ts(struct thread *thread, struct thread_stack *ts)
{
thread->ts = ts;
}
static inline struct nsinfo *thread__nsinfo(struct thread *thread)
{
return thread->nsinfo;
}
static inline struct srccode_state *thread__srccode_state(struct thread *thread)
{
return &thread->srccode_state;
}
static inline bool thread__filter(const struct thread *thread)
{
return thread->filter;
}
static inline void thread__set_filter(struct thread *thread, bool filter)
{
thread->filter = filter;
}
static inline int thread__filter_entry_depth(const struct thread *thread)
{
return thread->filter_entry_depth;
}
static inline void thread__set_filter_entry_depth(struct thread *thread, int depth)
{
thread->filter_entry_depth = depth;
}
static inline bool thread__lbr_stitch_enable(const struct thread *thread)
{
return thread->lbr_stitch_enable;
}
static inline void thread__set_lbr_stitch_enable(struct thread *thread, bool en)
{
thread->lbr_stitch_enable = en;
}
static inline struct lbr_stitch *thread__lbr_stitch(struct thread *thread)
{
return thread->lbr_stitch;
}
static inline void thread__set_lbr_stitch(struct thread *thread, struct lbr_stitch *lbrs)
{
thread->lbr_stitch = lbrs;
}
static inline bool thread__is_filtered(struct thread *thread)
{
if (symbol_conf.comm_list &&
@ -139,12 +319,12 @@ static inline bool thread__is_filtered(struct thread *thread)
}
if (symbol_conf.pid_list &&
!intlist__has_entry(symbol_conf.pid_list, thread->pid_)) {
!intlist__has_entry(symbol_conf.pid_list, thread__pid(thread))) {
return true;
}
if (symbol_conf.tid_list &&
!intlist__has_entry(symbol_conf.tid_list, thread->tid)) {
!intlist__has_entry(symbol_conf.tid_list, thread__tid(thread))) {
return true;
}

View File

@ -230,7 +230,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct unwind_info *ui, ui_buf = {
.sample = data,
.thread = thread,
.machine = RC_CHK_ACCESS(thread->maps)->machine,
.machine = RC_CHK_ACCESS(thread__maps(thread))->machine,
.cb = cb,
.arg = arg,
.max_stack = max_stack,
@ -260,11 +260,11 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
if (err)
goto out;
err = !dwfl_attach_state(ui->dwfl, EM_NONE, thread->tid, &callbacks, ui);
err = !dwfl_attach_state(ui->dwfl, EM_NONE, thread__tid(thread), &callbacks, ui);
if (err)
goto out;
err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
err = dwfl_getthread_frames(ui->dwfl, thread__tid(thread), frame_callback, ui);
if (err && ui->max_stack != max_stack)
err = 0;

View File

@ -325,7 +325,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
return -EINVAL;
}
maps__for_each_entry(ui->thread->maps, map_node) {
maps__for_each_entry(thread__maps(ui->thread), map_node) {
struct map *map = map_node->map;
u64 start = map__start(map);
@ -719,7 +719,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
*/
if (max_stack - 1 > 0) {
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
addr_space = maps__addr_space(ui->thread->maps);
addr_space = maps__addr_space(thread__maps(ui->thread));
if (addr_space == NULL)
return -1;
@ -769,7 +769,7 @@ static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct unwind_info ui = {
.sample = data,
.thread = thread,
.machine = maps__machine(thread->maps),
.machine = maps__machine(thread__maps(thread)),
.best_effort = best_effort
};

View File

@ -89,7 +89,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct perf_sample *data, int max_stack,
bool best_effort)
{
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread->maps);
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread__maps(thread));
if (ops)
return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);

View File

@ -146,7 +146,7 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map_rb_node *rb_node;
maps__for_each_entry(thread->maps, rb_node) {
maps__for_each_entry(thread__maps(thread), rb_node) {
struct dso *dso = map__dso(rb_node->map);
if (!dso || dso->long_name[0] != '/')