perf evlist: Refactor evlist__for_each_cpu()

Previously evlist__for_each_cpu() needed to iterate over the evlist in
an inner loop and call "skip" routines. Refactor this so that the
iteratr is smarter and the next function can update both the current CPU
and evsel.

By using a cpu map index, fix apparent off-by-1 in __run_perf_stat's
call to perf_evsel__close_cpu().

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Clarke <pc@us.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Vineet Singh <vineet.singh@intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: zhengjun.xing@intel.com
Link: https://lore.kernel.org/r/20220105061351.120843-35-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2022-01-04 22:13:37 -08:00 committed by Arnaldo Carvalho de Melo
parent 80b82f3b65
commit 472832d2c0
4 changed files with 213 additions and 169 deletions

View File

@ -405,36 +405,33 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
static int read_affinity_counters(struct timespec *rs) static int read_affinity_counters(struct timespec *rs)
{ {
struct evsel *counter; struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity saved_affinity, *affinity;
int i, ncpus, cpu;
if (all_counters_use_bpf) if (all_counters_use_bpf)
return 0; return 0;
if (affinity__setup(&affinity) < 0)
return -1;
ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
if (!target__has_cpu(&target) || target__has_per_thread(&target)) if (!target__has_cpu(&target) || target__has_per_thread(&target))
ncpus = 1; affinity = NULL;
evlist__for_each_cpu(evsel_list, i, cpu) { else if (affinity__setup(&saved_affinity) < 0)
if (i >= ncpus) return -1;
break; else
affinity__set(&affinity, cpu); affinity = &saved_affinity;
evlist__for_each_entry(evsel_list, counter) { evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
if (evsel__cpu_iter_skip(counter, cpu)) struct evsel *counter = evlist_cpu_itr.evsel;
continue;
if (evsel__is_bpf(counter)) if (evsel__is_bpf(counter))
continue; continue;
if (!counter->err) {
counter->err = read_counter_cpu(counter, rs, if (!counter->err) {
counter->cpu_iter - 1); counter->err = read_counter_cpu(counter, rs,
} evlist_cpu_itr.cpu_map_idx);
} }
} }
affinity__cleanup(&affinity); if (affinity)
affinity__cleanup(&saved_affinity);
return 0; return 0;
} }
@ -788,8 +785,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
int status = 0; int status = 0;
const bool forks = (argc > 0); const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity affinity;
int i, cpu, err; int err;
bool second_pass = false; bool second_pass = false;
if (forks) { if (forks) {
@ -813,56 +811,53 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
all_counters_use_bpf = false; all_counters_use_bpf = false;
} }
evlist__for_each_cpu (evsel_list, i, cpu) { evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
counter = evlist_cpu_itr.evsel;
/* /*
* bperf calls evsel__open_per_cpu() in bperf__load(), so * bperf calls evsel__open_per_cpu() in bperf__load(), so
* no need to call it again here. * no need to call it again here.
*/ */
if (target.use_bpf) if (target.use_bpf)
break; break;
affinity__set(&affinity, cpu);
evlist__for_each_entry(evsel_list, counter) { if (counter->reset_group || counter->errored)
if (evsel__cpu_iter_skip(counter, cpu)) continue;
continue; if (evsel__is_bpf(counter))
if (counter->reset_group || counter->errored) continue;
continue;
if (evsel__is_bpf(counter))
continue;
try_again: try_again:
if (create_perf_stat_counter(counter, &stat_config, &target, if (create_perf_stat_counter(counter, &stat_config, &target,
counter->cpu_iter - 1) < 0) { evlist_cpu_itr.cpu_map_idx) < 0) {
/*
* Weak group failed. We cannot just undo this here
* because earlier CPUs might be in group mode, and the kernel
* doesn't support mixing group and non group reads. Defer
* it to later.
* Don't close here because we're in the wrong affinity.
*/
if ((errno == EINVAL || errno == EBADF) &&
evsel__leader(counter) != counter &&
counter->weak_group) {
evlist__reset_weak_group(evsel_list, counter, false);
assert(counter->reset_group);
second_pass = true;
continue;
}
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
return -1;
case COUNTER_RETRY:
goto try_again;
case COUNTER_SKIP:
continue;
default:
break;
}
/*
* Weak group failed. We cannot just undo this here
* because earlier CPUs might be in group mode, and the kernel
* doesn't support mixing group and non group reads. Defer
* it to later.
* Don't close here because we're in the wrong affinity.
*/
if ((errno == EINVAL || errno == EBADF) &&
evsel__leader(counter) != counter &&
counter->weak_group) {
evlist__reset_weak_group(evsel_list, counter, false);
assert(counter->reset_group);
second_pass = true;
continue;
} }
counter->supported = true;
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
return -1;
case COUNTER_RETRY:
goto try_again;
case COUNTER_SKIP:
continue;
default:
break;
}
} }
counter->supported = true;
} }
if (second_pass) { if (second_pass) {
@ -871,42 +866,40 @@ try_again:
* and also close errored counters. * and also close errored counters.
*/ */
evlist__for_each_cpu(evsel_list, i, cpu) { /* First close errored or weak retry */
affinity__set(&affinity, cpu); evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
/* First close errored or weak retry */ counter = evlist_cpu_itr.evsel;
evlist__for_each_entry(evsel_list, counter) {
if (!counter->reset_group && !counter->errored)
continue;
if (evsel__cpu_iter_skip_no_inc(counter, cpu))
continue;
perf_evsel__close_cpu(&counter->core, counter->cpu_iter);
}
/* Now reopen weak */
evlist__for_each_entry(evsel_list, counter) {
if (!counter->reset_group && !counter->errored)
continue;
if (evsel__cpu_iter_skip(counter, cpu))
continue;
if (!counter->reset_group)
continue;
try_again_reset:
pr_debug2("reopening weak %s\n", evsel__name(counter));
if (create_perf_stat_counter(counter, &stat_config, &target,
counter->cpu_iter - 1) < 0) {
switch (stat_handle_error(counter)) { if (!counter->reset_group && !counter->errored)
case COUNTER_FATAL: continue;
return -1;
case COUNTER_RETRY: perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
goto try_again_reset; }
case COUNTER_SKIP: /* Now reopen weak */
continue; evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
default: counter = evlist_cpu_itr.evsel;
break;
} if (!counter->reset_group && !counter->errored)
continue;
if (!counter->reset_group)
continue;
try_again_reset:
pr_debug2("reopening weak %s\n", evsel__name(counter));
if (create_perf_stat_counter(counter, &stat_config, &target,
evlist_cpu_itr.cpu_map_idx) < 0) {
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
return -1;
case COUNTER_RETRY:
goto try_again_reset;
case COUNTER_SKIP:
continue;
default:
break;
} }
counter->supported = true;
} }
counter->supported = true;
} }
} }
affinity__cleanup(&affinity); affinity__cleanup(&affinity);

View File

@ -342,36 +342,65 @@ static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel)
return perf_thread_map__nr(evlist->core.threads); return perf_thread_map__nr(evlist->core.threads);
} }
void evlist__cpu_iter_start(struct evlist *evlist) struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
{ {
struct evsel *pos; struct evlist_cpu_iterator itr = {
.container = evlist,
.evsel = evlist__first(evlist),
.cpu_map_idx = 0,
.evlist_cpu_map_idx = 0,
.evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
.cpu = -1,
.affinity = affinity,
};
/* if (itr.affinity) {
* Reset the per evsel cpu_iter. This is needed because itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
* each evsel's cpumap may have a different index space, affinity__set(itr.affinity, itr.cpu);
* and some operations need the index to modify itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
* the FD xyarray (e.g. open, close) /*
*/ * If this CPU isn't in the evsel's cpu map then advance through
evlist__for_each_entry(evlist, pos) * the list.
pos->cpu_iter = 0; */
} if (itr.cpu_map_idx == -1)
evlist_cpu_iterator__next(&itr);
bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
{
if (ev->cpu_iter >= ev->core.cpus->nr)
return true;
if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
return true;
return false;
}
bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
{
if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
ev->cpu_iter++;
return false;
} }
return true; return itr;
}
void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
{
while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
evlist_cpu_itr->cpu_map_idx =
perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
evlist_cpu_itr->cpu);
if (evlist_cpu_itr->cpu_map_idx != -1)
return;
}
evlist_cpu_itr->evlist_cpu_map_idx++;
if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
evlist_cpu_itr->cpu =
perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
evlist_cpu_itr->evlist_cpu_map_idx);
if (evlist_cpu_itr->affinity)
affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu);
evlist_cpu_itr->cpu_map_idx =
perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
evlist_cpu_itr->cpu);
/*
* If this CPU isn't in the evsel's cpu map then advance through
* the list.
*/
if (evlist_cpu_itr->cpu_map_idx == -1)
evlist_cpu_iterator__next(evlist_cpu_itr);
}
}
bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
{
return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
} }
static int evsel__strcmp(struct evsel *pos, char *evsel_name) static int evsel__strcmp(struct evsel *pos, char *evsel_name)
@ -400,31 +429,26 @@ static int evlist__is_enabled(struct evlist *evlist)
static void __evlist__disable(struct evlist *evlist, char *evsel_name) static void __evlist__disable(struct evlist *evlist, char *evsel_name)
{ {
struct evsel *pos; struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity affinity;
int cpu, i, imm = 0;
bool has_imm = false; bool has_imm = false;
if (affinity__setup(&affinity) < 0) if (affinity__setup(&affinity) < 0)
return; return;
/* Disable 'immediate' events last */ /* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) { for (int imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) { evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
affinity__set(&affinity, cpu); pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name))
evlist__for_each_entry(evlist, pos) { continue;
if (evsel__strcmp(pos, evsel_name)) if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
continue; continue;
if (evsel__cpu_iter_skip(pos, cpu)) if (pos->immediate)
continue; has_imm = true;
if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) if (pos->immediate != imm)
continue; continue;
if (pos->immediate) evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
has_imm = true;
if (pos->immediate != imm)
continue;
evsel__disable_cpu(pos, pos->cpu_iter - 1);
}
} }
if (!has_imm) if (!has_imm)
break; break;
@ -462,24 +486,19 @@ void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
static void __evlist__enable(struct evlist *evlist, char *evsel_name) static void __evlist__enable(struct evlist *evlist, char *evsel_name)
{ {
struct evsel *pos; struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity affinity;
int cpu, i;
if (affinity__setup(&affinity) < 0) if (affinity__setup(&affinity) < 0)
return; return;
evlist__for_each_cpu(evlist, i, cpu) { evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
affinity__set(&affinity, cpu); pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name))
evlist__for_each_entry(evlist, pos) { continue;
if (evsel__strcmp(pos, evsel_name)) if (!evsel__is_group_leader(pos) || !pos->core.fd)
continue; continue;
if (evsel__cpu_iter_skip(pos, cpu)) evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
continue;
if (!evsel__is_group_leader(pos) || !pos->core.fd)
continue;
evsel__enable_cpu(pos, pos->cpu_iter - 1);
}
} }
affinity__cleanup(&affinity); affinity__cleanup(&affinity);
evlist__for_each_entry(evlist, pos) { evlist__for_each_entry(evlist, pos) {
@ -1264,8 +1283,8 @@ void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
void evlist__close(struct evlist *evlist) void evlist__close(struct evlist *evlist)
{ {
struct evsel *evsel; struct evsel *evsel;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity affinity;
int cpu, i;
/* /*
* With perf record core.cpus is usually NULL. * With perf record core.cpus is usually NULL.
@ -1279,15 +1298,12 @@ void evlist__close(struct evlist *evlist)
if (affinity__setup(&affinity) < 0) if (affinity__setup(&affinity) < 0)
return; return;
evlist__for_each_cpu(evlist, i, cpu) {
affinity__set(&affinity, cpu);
evlist__for_each_entry_reverse(evlist, evsel) { evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
if (evsel__cpu_iter_skip(evsel, cpu)) perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
continue; evlist_cpu_itr.cpu_map_idx);
perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
}
} }
affinity__cleanup(&affinity); affinity__cleanup(&affinity);
evlist__for_each_entry_reverse(evlist, evsel) { evlist__for_each_entry_reverse(evlist, evsel) {
perf_evsel__free_fd(&evsel->core); perf_evsel__free_fd(&evsel->core);

View File

@ -327,17 +327,53 @@ void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel);
#define evlist__for_each_entry_safe(evlist, tmp, evsel) \ #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
__evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel) __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel)
#define evlist__for_each_cpu(evlist, index, cpu) \ /** Iterator state for evlist__for_each_cpu */
evlist__cpu_iter_start(evlist); \ struct evlist_cpu_iterator {
perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus) /** The list being iterated through. */
struct evlist *container;
/** The current evsel of the iterator. */
struct evsel *evsel;
/** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */
int cpu_map_idx;
/**
* The CPU map index corresponding to evlist->core.all_cpus for the
* current CPU. Distinct from cpu_map_idx as the evsel's cpu map may
* contain fewer entries.
*/
int evlist_cpu_map_idx;
/** The number of CPU map entries in evlist->core.all_cpus. */
int evlist_cpu_map_nr;
/** The current CPU of the iterator. */
int cpu;
/** If present, used to set the affinity when switching between CPUs. */
struct affinity *affinity;
};
/**
* evlist__for_each_cpu - without affinity, iterate over the evlist. With
* affinity, iterate over all CPUs and then the evlist
* for each evsel on that CPU. When switching between
* CPUs the affinity is set to the CPU to avoid IPIs
* during syscalls.
* @evlist_cpu_itr: the iterator instance.
* @evlist: evlist instance to iterate.
* @affinity: NULL or used to set the affinity to the current CPU.
*/
#define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \
for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \
!evlist_cpu_iterator__end(&evlist_cpu_itr); \
evlist_cpu_iterator__next(&evlist_cpu_itr))
/** Returns an iterator set to the first CPU/evsel of evlist. */
struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
/** Move to next element in iterator, updating CPU, evsel and the affinity. */
void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr);
/** Returns true when iterator is at the end of the CPUs and evlist. */
bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
struct evsel *evlist__get_tracking_event(struct evlist *evlist); struct evsel *evlist__get_tracking_event(struct evlist *evlist);
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel); void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
void evlist__cpu_iter_start(struct evlist *evlist);
bool evsel__cpu_iter_skip(struct evsel *ev, int cpu);
bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu);
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str); struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event); struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event);

View File

@ -121,7 +121,6 @@ struct evsel {
bool errored; bool errored;
struct hashmap *per_pkg_mask; struct hashmap *per_pkg_mask;
int err; int err;
int cpu_iter;
struct { struct {
evsel__sb_cb_t *cb; evsel__sb_cb_t *cb;
void *data; void *data;