mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 09:14:19 +08:00
perf/x86/intel: Support CPUID 10.ECX to disable fixed counters
With Architectural Performance Monitoring Version 5, CPUID 10.ECX cpu leaf indicates the fixed counter enumeration. This extends the previous count to a bitmap which allows disabling even lower fixed counters. It could be used by a Hypervisor. The existing intel_ctrl variable is used to remember the bitmask of the counters. All code that reads all counters is fixed to check this extra bitmask. Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Originally-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/1611873611-156687-6-git-send-email-kan.liang@linux.intel.com
This commit is contained in:
parent
61b985e3e7
commit
32451614da
@ -255,6 +255,8 @@ static bool check_hw_exists(void)
|
||||
if (ret)
|
||||
goto msr_fail;
|
||||
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
|
||||
if (fixed_counter_disabled(i))
|
||||
continue;
|
||||
if (val & (0x03 << i*4)) {
|
||||
bios_fail = 1;
|
||||
val_fail = val;
|
||||
@ -1531,6 +1533,8 @@ void perf_event_print_debug(void)
|
||||
cpu, idx, prev_left);
|
||||
}
|
||||
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
|
||||
if (fixed_counter_disabled(idx))
|
||||
continue;
|
||||
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
|
||||
|
||||
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
|
||||
@ -2012,7 +2016,9 @@ static int __init init_hw_perf_events(void)
|
||||
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
|
||||
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
|
||||
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
|
||||
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
|
||||
pr_info("... fixed-purpose events: %lu\n",
|
||||
hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
|
||||
<< INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
|
||||
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
|
||||
|
||||
if (!x86_pmu.read)
|
||||
|
@ -2723,8 +2723,11 @@ static void intel_pmu_reset(void)
|
||||
wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
|
||||
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
|
||||
}
|
||||
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
||||
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
|
||||
if (fixed_counter_disabled(idx))
|
||||
continue;
|
||||
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
||||
}
|
||||
|
||||
if (ds)
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
@ -5042,7 +5045,7 @@ __init int intel_pmu_init(void)
|
||||
union cpuid10_eax eax;
|
||||
union cpuid10_ebx ebx;
|
||||
struct event_constraint *c;
|
||||
unsigned int unused;
|
||||
unsigned int fixed_mask;
|
||||
struct extra_reg *er;
|
||||
bool pmem = false;
|
||||
int version, i;
|
||||
@ -5064,7 +5067,7 @@ __init int intel_pmu_init(void)
|
||||
* Check whether the Architectural PerfMon supports
|
||||
* Branch Misses Retired hw_event or not.
|
||||
*/
|
||||
cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
|
||||
cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
|
||||
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
|
||||
return -ENODEV;
|
||||
|
||||
@ -5088,12 +5091,15 @@ __init int intel_pmu_init(void)
|
||||
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
||||
* assume at least 3 events, when not running in a hypervisor:
|
||||
*/
|
||||
if (version > 1) {
|
||||
if (version > 1 && version < 5) {
|
||||
int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||
|
||||
x86_pmu.num_counters_fixed =
|
||||
max((int)edx.split.num_counters_fixed, assume);
|
||||
}
|
||||
|
||||
fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
|
||||
} else if (version >= 5)
|
||||
x86_pmu.num_counters_fixed = fls(fixed_mask);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PDCM)) {
|
||||
u64 capabilities;
|
||||
@ -5680,8 +5686,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
|
||||
}
|
||||
|
||||
x86_pmu.intel_ctrl |=
|
||||
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
|
||||
x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
|
||||
|
||||
/* AnyThread may be deprecated on arch perfmon v5 or later */
|
||||
if (x86_pmu.intel_cap.anythread_deprecated)
|
||||
@ -5698,13 +5703,22 @@ __init int intel_pmu_init(void)
|
||||
* events to the generic counters.
|
||||
*/
|
||||
if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
|
||||
/*
|
||||
* Disable topdown slots and metrics events,
|
||||
* if slots event is not in CPUID.
|
||||
*/
|
||||
if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl))
|
||||
c->idxmsk64 = 0;
|
||||
c->weight = hweight64(c->idxmsk64);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (c->cmask == FIXED_EVENT_FLAGS
|
||||
&& c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
|
||||
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
||||
if (c->cmask == FIXED_EVENT_FLAGS) {
|
||||
/* Disabled fixed counters which are not in CPUID */
|
||||
c->idxmsk64 &= x86_pmu.intel_ctrl;
|
||||
|
||||
if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
|
||||
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
||||
}
|
||||
c->idxmsk64 &=
|
||||
~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
|
||||
|
@ -1068,6 +1068,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
char *page);
|
||||
|
||||
static inline bool fixed_counter_disabled(int i)
|
||||
{
|
||||
return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
|
||||
int amd_pmu_init(void);
|
||||
|
Loading…
Reference in New Issue
Block a user