mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
perf record ibs: Warn about sampling period skew
Samples without an L3 miss are discarded and counter is reset with random value (between 1-15 for fetch PMU and 1-127 for op PMU) when IBS L3 miss filtering is enabled. This causes a sampling period skew but there is no way to reconstruct aggregated sampling period. So print a warning at perf record if user sets l3missonly=1. Ex: # perf record -c 10000 -C 0 -e ibs_op/l3missonly=1/ WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled and tagged operation does not cause L3 Miss. This causes sampling period skew. Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com> Acked-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Ananth Narayan <ananth.narayan@amd.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Kim Phillips <kim.phillips@amd.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Robert Richter <rrichter@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Santosh Shukla <santosh.shukla@amd.com> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: like.xu.linux@gmail.com Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20220604044519.594-2-ravi.bangoria@amd.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
52f28b7bac
commit
9ab95b0b15
@ -6,6 +6,10 @@
|
||||
#include "util/pmu.h"
|
||||
#include "linux/string.h"
|
||||
#include "evsel.h"
|
||||
#include "util/debug.h"
|
||||
|
||||
#define IBS_FETCH_L3MISSONLY (1ULL << 59)
|
||||
#define IBS_OP_L3MISSONLY (1ULL << 16)
|
||||
|
||||
void arch_evsel__set_sample_weight(struct evsel *evsel)
|
||||
{
|
||||
@ -61,3 +65,51 @@ bool arch_evsel__must_be_in_group(const struct evsel *evsel)
|
||||
(strcasestr(evsel->name, "slots") ||
|
||||
strcasestr(evsel->name, "topdown"));
|
||||
}
|
||||
|
||||
static void ibs_l3miss_warn(void)
|
||||
{
|
||||
pr_warning(
|
||||
"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
|
||||
"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
|
||||
}
|
||||
|
||||
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
|
||||
{
|
||||
struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
|
||||
static int warned_once;
|
||||
/* 0: Uninitialized, 1: Yes, -1: No */
|
||||
static int is_amd;
|
||||
|
||||
if (warned_once || is_amd == -1)
|
||||
return;
|
||||
|
||||
if (!is_amd) {
|
||||
struct perf_env *env = evsel__env(evsel);
|
||||
|
||||
if (!perf_env__cpuid(env) || !env->cpuid ||
|
||||
!strstarts(env->cpuid, "AuthenticAMD")) {
|
||||
is_amd = -1;
|
||||
return;
|
||||
}
|
||||
is_amd = 1;
|
||||
}
|
||||
|
||||
evsel_pmu = evsel__find_pmu(evsel);
|
||||
if (!evsel_pmu)
|
||||
return;
|
||||
|
||||
ibs_fetch_pmu = perf_pmu__find("ibs_fetch");
|
||||
ibs_op_pmu = perf_pmu__find("ibs_op");
|
||||
|
||||
if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
|
||||
if (attr->config & IBS_FETCH_L3MISSONLY) {
|
||||
ibs_l3miss_warn();
|
||||
warned_once = 1;
|
||||
}
|
||||
} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
|
||||
if (attr->config & IBS_OP_L3MISSONLY) {
|
||||
ibs_l3miss_warn();
|
||||
warned_once = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1091,6 +1091,11 @@ void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_un
|
||||
{
|
||||
}
|
||||
|
||||
void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
|
||||
struct perf_event_attr *attr __maybe_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static void evsel__set_default_freq_period(struct record_opts *opts,
|
||||
struct perf_event_attr *attr)
|
||||
{
|
||||
@ -1366,6 +1371,8 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
||||
*/
|
||||
if (evsel__is_dummy_event(evsel))
|
||||
evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
||||
|
||||
arch__post_evsel_config(evsel, attr);
|
||||
}
|
||||
|
||||
int evsel__set_filter(struct evsel *evsel, const char *filter)
|
||||
|
@ -297,6 +297,7 @@ void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
|
||||
|
||||
void arch_evsel__set_sample_weight(struct evsel *evsel);
|
||||
void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
|
||||
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
|
||||
|
||||
int evsel__set_filter(struct evsel *evsel, const char *filter);
|
||||
int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
|
||||
|
Loading…
Reference in New Issue
Block a user