mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-06 02:24:14 +08:00
d7e3c39708
Since for cpu_core or cpu_atom, they have different topdown events groups. For cpu_core, --topdown equals to: "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/, cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/, cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/, cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}" For cpu_atom, --topdown equals to: "{cpu_atom/topdown-retiring/,cpu_atom/topdown-bad-spec/, cpu_atom/topdown-fe-bound/,cpu_atom/topdown-be-bound/}" To simplify the implementation, on hybrid, --topdown is used together with --cputype. If without --cputype, it uses cpu_core topdown events by default. # ./perf stat --topdown -a sleep 1 WARNING: default to use cpu_core topdown events Performance counter stats for 'system wide': retiring bad speculation frontend bound backend bound heavy operations light operations branch mispredict machine clears fetch latency fetch bandwidth memory bound Core bound 4.1% 0.0% 5.1% 90.8% 2.3% 1.8% 0.0% 0.0% 4.2% 0.9% 9.9% 81.0% 1.002624229 seconds time elapsed # ./perf stat --topdown -a --cputype atom sleep 1 Performance counter stats for 'system wide': retiring bad speculation frontend bound backend bound 13.5% 0.1% 31.2% 55.2% 1.002366987 seconds time elapsed Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220422065635.767648-3-zhengjun.xing@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
68 lines
1.1 KiB
C
68 lines
1.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <stdio.h>
|
|
#include "pmu.h"
|
|
#include "pmu-hybrid.h"
|
|
#include "topdown.h"
|
|
|
|
int topdown_filter_events(const char **attr, char **str, bool use_group,
|
|
const char *pmu_name)
|
|
{
|
|
int off = 0;
|
|
int i;
|
|
int len = 0;
|
|
char *s;
|
|
bool is_hybrid = perf_pmu__is_hybrid(pmu_name);
|
|
|
|
for (i = 0; attr[i]; i++) {
|
|
if (pmu_have_event(pmu_name, attr[i])) {
|
|
if (is_hybrid)
|
|
len += strlen(attr[i]) + strlen(pmu_name) + 3;
|
|
else
|
|
len += strlen(attr[i]) + 1;
|
|
attr[i - off] = attr[i];
|
|
} else
|
|
off++;
|
|
}
|
|
attr[i - off] = NULL;
|
|
|
|
*str = malloc(len + 1 + 2);
|
|
if (!*str)
|
|
return -1;
|
|
s = *str;
|
|
if (i - off == 0) {
|
|
*s = 0;
|
|
return 0;
|
|
}
|
|
if (use_group)
|
|
*s++ = '{';
|
|
for (i = 0; attr[i]; i++) {
|
|
if (!is_hybrid)
|
|
strcpy(s, attr[i]);
|
|
else
|
|
sprintf(s, "%s/%s/", pmu_name, attr[i]);
|
|
s += strlen(s);
|
|
*s++ = ',';
|
|
}
|
|
if (use_group) {
|
|
s[-1] = '}';
|
|
*s = 0;
|
|
} else
|
|
s[-1] = 0;
|
|
return 0;
|
|
}
|
|
|
|
__weak bool arch_topdown_check_group(bool *warn)
|
|
{
|
|
*warn = false;
|
|
return false;
|
|
}
|
|
|
|
__weak void arch_topdown_group_warn(void)
|
|
{
|
|
}
|
|
|
|
__weak bool arch_topdown_sample_read(struct evsel *leader __maybe_unused)
|
|
{
|
|
return false;
|
|
}
|