2019-06-04 16:11:32 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-06-19 21:45:05 +08:00
|
|
|
/*
|
|
|
|
* KVM PMU support for Intel CPUs
|
|
|
|
*
|
|
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
* Gleb Natapov <gleb@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <asm/perf_event.h>
|
|
|
|
#include "x86.h"
|
|
|
|
#include "cpuid.h"
|
|
|
|
#include "lapic.h"
|
2019-11-14 08:17:20 +08:00
|
|
|
#include "nested.h"
|
2015-06-19 21:45:05 +08:00
|
|
|
#include "pmu.h"
|
|
|
|
|
2020-05-29 15:43:45 +08:00
|
|
|
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
|
|
|
|
|
2015-06-19 21:45:05 +08:00
|
|
|
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
|
|
|
|
/* Index must match CPUID 0x0A.EBX bit vector */
|
|
|
|
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
|
|
|
|
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
|
|
|
|
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
|
|
|
|
[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
|
|
|
|
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
|
|
|
|
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
|
|
|
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
2020-12-30 16:19:16 +08:00
|
|
|
[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
|
2015-06-19 21:45:05 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* mapping between fixed pmc index and intel_arch_events array */
|
|
|
|
static int fixed_pmc_events[] = {1, 0, 7};
|
|
|
|
|
|
|
|
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
|
|
|
|
u8 new_ctrl = fixed_ctrl_field(data, i);
|
|
|
|
u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
|
|
|
|
struct kvm_pmc *pmc;
|
|
|
|
|
|
|
|
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
|
|
|
|
|
|
|
|
if (old_ctrl == new_ctrl)
|
|
|
|
continue;
|
|
|
|
|
2019-10-27 18:52:43 +08:00
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
|
2015-06-19 21:45:05 +08:00
|
|
|
reprogram_fixed_counter(pmc, new_ctrl, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
pmu->fixed_ctr_ctrl = data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* function is called when global control register has been updated. */
|
|
|
|
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
|
|
|
|
{
|
|
|
|
int bit;
|
|
|
|
u64 diff = pmu->global_ctrl ^ data;
|
|
|
|
|
|
|
|
pmu->global_ctrl = data;
|
|
|
|
|
|
|
|
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
|
|
|
|
reprogram_counter(pmu, bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
|
|
|
|
u8 event_select,
|
|
|
|
u8 unit_mask)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
|
|
|
|
if (intel_arch_events[i].eventsel == event_select
|
|
|
|
&& intel_arch_events[i].unit_mask == unit_mask
|
|
|
|
&& (pmu->available_event_types & (1 << i)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (i == ARRAY_SIZE(intel_arch_events))
|
|
|
|
return PERF_COUNT_HW_MAX;
|
|
|
|
|
|
|
|
return intel_arch_events[i].event_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned intel_find_fixed_event(int idx)
|
|
|
|
{
|
2019-12-12 04:47:53 +08:00
|
|
|
u32 event;
|
|
|
|
size_t size = ARRAY_SIZE(fixed_pmc_events);
|
|
|
|
|
|
|
|
if (idx >= size)
|
2015-06-19 21:45:05 +08:00
|
|
|
return PERF_COUNT_HW_MAX;
|
|
|
|
|
2019-12-12 04:47:53 +08:00
|
|
|
event = fixed_pmc_events[array_index_nospec(idx, size)];
|
|
|
|
return intel_arch_events[event].event_type;
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
2016-05-21 20:14:44 +08:00
|
|
|
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
|
2015-06-19 21:45:05 +08:00
|
|
|
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
|
|
|
|
|
|
|
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
|
|
|
{
|
|
|
|
if (pmc_idx < INTEL_PMC_IDX_FIXED)
|
|
|
|
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
|
|
|
|
MSR_P6_EVNTSEL0);
|
|
|
|
else {
|
|
|
|
u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
|
|
|
|
|
|
|
|
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
|
2019-10-27 18:52:40 +08:00
|
|
|
static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
2015-06-19 21:45:05 +08:00
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
bool fixed = idx & (1u << 30);
|
|
|
|
|
|
|
|
idx &= ~(3u << 30);
|
|
|
|
|
|
|
|
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
|
|
|
|
(fixed && idx >= pmu->nr_arch_fixed_counters);
|
|
|
|
}
|
|
|
|
|
2019-10-27 18:52:40 +08:00
|
|
|
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned int idx, u64 *mask)
|
2015-06-19 21:45:05 +08:00
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
bool fixed = idx & (1u << 30);
|
|
|
|
struct kvm_pmc *counters;
|
2019-12-12 04:47:53 +08:00
|
|
|
unsigned int num_counters;
|
2015-06-19 21:45:05 +08:00
|
|
|
|
|
|
|
idx &= ~(3u << 30);
|
2019-12-12 04:47:53 +08:00
|
|
|
if (fixed) {
|
|
|
|
counters = pmu->fixed_counters;
|
|
|
|
num_counters = pmu->nr_arch_fixed_counters;
|
|
|
|
} else {
|
|
|
|
counters = pmu->gp_counters;
|
|
|
|
num_counters = pmu->nr_arch_gp_counters;
|
|
|
|
}
|
|
|
|
if (idx >= num_counters)
|
2015-06-19 21:45:05 +08:00
|
|
|
return NULL;
|
2019-05-20 23:20:40 +08:00
|
|
|
*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
|
2019-12-12 04:47:53 +08:00
|
|
|
return &counters[array_index_nospec(idx, num_counters)];
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
2021-02-02 22:32:35 +08:00
|
|
|
static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
|
2020-05-29 15:43:45 +08:00
|
|
|
{
|
|
|
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
|
2021-02-02 22:32:35 +08:00
|
|
|
return 0;
|
2020-05-29 15:43:45 +08:00
|
|
|
|
2021-02-02 22:32:35 +08:00
|
|
|
return vcpu->arch.perf_capabilities;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
|
2020-05-29 15:43:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
|
|
|
|
{
|
|
|
|
if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
|
|
|
|
}
|
|
|
|
|
2021-02-02 22:36:08 +08:00
|
|
|
bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* As a first step, a guest could only enable LBR feature if its
|
|
|
|
* cpu model is the same as the host because the LBR registers
|
|
|
|
* would be pass-through to the guest and they're model specific.
|
|
|
|
*/
|
|
|
|
return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:31 +08:00
|
|
|
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
|
|
|
|
|
|
|
|
return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
|
|
|
|
{
|
|
|
|
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!intel_pmu_lbr_is_enabled(vcpu))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
|
|
|
|
(index >= records->from && index < records->from + records->nr) ||
|
|
|
|
(index >= records->to && index < records->to + records->nr);
|
|
|
|
|
|
|
|
if (!ret && records->info)
|
|
|
|
ret = (index >= records->info && index < records->info + records->nr);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-19 21:45:05 +08:00
|
|
|
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
|
|
|
ret = pmu->version > 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
|
|
|
|
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
|
2021-02-01 13:10:34 +08:00
|
|
|
get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
|
|
|
|
intel_pmu_is_valid_lbr_msr(vcpu, msr);
|
2015-06-19 21:45:05 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-27 18:52:41 +08:00
|
|
|
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
|
|
|
|
|
|
|
pmc = get_fixed_pmc(pmu, msr);
|
|
|
|
pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
|
|
|
|
pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
|
|
|
|
|
|
|
|
return pmc;
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:33 +08:00
|
|
|
static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
|
|
|
if (lbr_desc->event) {
|
|
|
|
perf_event_release_kernel(lbr_desc->event);
|
|
|
|
lbr_desc->event = NULL;
|
|
|
|
vcpu_to_pmu(vcpu)->event_count--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct perf_event *event;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The perf_event_attr is constructed in the minimum efficient way:
|
|
|
|
* - set 'pinned = true' to make it task pinned so that if another
|
|
|
|
* cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
|
|
|
|
* - set '.exclude_host = true' to record guest branches behavior;
|
|
|
|
*
|
|
|
|
* - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
|
|
|
|
* schedule the event without a real HW counter but a fake one;
|
|
|
|
* check is_guest_lbr_event() and __intel_get_event_constraints();
|
|
|
|
*
|
|
|
|
* - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
|
|
|
|
* 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
|
|
|
|
* PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
|
|
|
|
* event, which helps KVM to save/restore guest LBR records
|
|
|
|
* during host context switches and reduces quite a lot overhead,
|
|
|
|
* check branch_user_callstack() and intel_pmu_lbr_sched_task();
|
|
|
|
*/
|
|
|
|
struct perf_event_attr attr = {
|
|
|
|
.type = PERF_TYPE_RAW,
|
|
|
|
.size = sizeof(attr),
|
|
|
|
.config = INTEL_FIXED_VLBR_EVENT,
|
|
|
|
.sample_type = PERF_SAMPLE_BRANCH_STACK,
|
|
|
|
.pinned = true,
|
|
|
|
.exclude_host = true,
|
|
|
|
.branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
|
|
|
|
PERF_SAMPLE_BRANCH_USER,
|
|
|
|
};
|
|
|
|
|
2021-02-01 13:10:37 +08:00
|
|
|
if (unlikely(lbr_desc->event)) {
|
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
2021-02-01 13:10:33 +08:00
|
|
|
return 0;
|
2021-02-01 13:10:37 +08:00
|
|
|
}
|
2021-02-01 13:10:33 +08:00
|
|
|
|
|
|
|
event = perf_event_create_kernel_counter(&attr, -1,
|
|
|
|
current, NULL, NULL);
|
|
|
|
if (IS_ERR(event)) {
|
|
|
|
pr_debug_ratelimited("%s: failed %ld\n",
|
|
|
|
__func__, PTR_ERR(event));
|
2021-02-23 09:39:57 +08:00
|
|
|
return PTR_ERR(event);
|
2021-02-01 13:10:33 +08:00
|
|
|
}
|
|
|
|
lbr_desc->event = event;
|
|
|
|
pmu->event_count++;
|
2021-02-01 13:10:37 +08:00
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
2021-02-01 13:10:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
/*
|
|
|
|
* It's safe to access LBR msrs from guest when they have not
|
|
|
|
* been passthrough since the host would help restore or reset
|
|
|
|
* the LBR msrs records when the guest LBR event is scheduled in.
|
|
|
|
*/
|
|
|
|
static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
|
|
|
|
struct msr_data *msr_info, bool read)
|
|
|
|
{
|
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
u32 index = msr_info->index;
|
|
|
|
|
|
|
|
if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
|
|
|
|
return false;
|
|
|
|
|
2021-02-23 09:39:57 +08:00
|
|
|
if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
|
2021-02-01 13:10:34 +08:00
|
|
|
goto dummy;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable irq to ensure the LBR feature doesn't get reclaimed by the
|
|
|
|
* host at the time the value is read from the msr, and this avoids the
|
|
|
|
* host LBR value to be leaked to the guest. If LBR has been reclaimed,
|
|
|
|
* return 0 on guest reads.
|
|
|
|
*/
|
|
|
|
local_irq_disable();
|
|
|
|
if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
|
|
|
|
if (read)
|
|
|
|
rdmsrl(index, msr_info->data);
|
|
|
|
else
|
|
|
|
wrmsrl(index, msr_info->data);
|
2021-02-01 13:10:37 +08:00
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
|
2021-02-01 13:10:34 +08:00
|
|
|
local_irq_enable();
|
|
|
|
return true;
|
|
|
|
}
|
2021-02-01 13:10:37 +08:00
|
|
|
clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
|
2021-02-01 13:10:34 +08:00
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
dummy:
|
|
|
|
if (read)
|
|
|
|
msr_info->data = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-29 15:43:44 +08:00
|
|
|
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
2015-06-19 21:45:05 +08:00
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
2020-05-29 15:43:44 +08:00
|
|
|
u32 msr = msr_info->index;
|
2015-06-19 21:45:05 +08:00
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmu->fixed_ctr_ctrl;
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmu->global_status;
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmu->global_ctrl;
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmu->global_ovf_ctrl;
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
|
|
|
default:
|
2020-05-29 15:43:45 +08:00
|
|
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
|
|
|
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
|
2019-05-20 23:20:40 +08:00
|
|
|
u64 val = pmc_read_counter(pmc);
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data =
|
|
|
|
val & pmu->counter_bitmask[KVM_PMC_GP];
|
2019-05-20 23:20:40 +08:00
|
|
|
return 0;
|
|
|
|
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
|
|
|
u64 val = pmc_read_counter(pmc);
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data =
|
|
|
|
val & pmu->counter_bitmask[KVM_PMC_FIXED];
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
|
|
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmc->eventsel;
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
2021-02-01 13:10:34 +08:00
|
|
|
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
|
|
|
|
return 0;
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
|
|
|
u32 msr = msr_info->index;
|
|
|
|
u64 data = msr_info->data;
|
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
|
|
if (pmu->fixed_ctr_ctrl == data)
|
|
|
|
return 0;
|
|
|
|
if (!(data & 0xfffffffffffff444ull)) {
|
|
|
|
reprogram_fixed_counters(pmu, data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
|
|
if (msr_info->host_initiated) {
|
|
|
|
pmu->global_status = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break; /* RO MSR */
|
|
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
|
|
if (pmu->global_ctrl == data)
|
|
|
|
return 0;
|
2019-11-14 08:17:15 +08:00
|
|
|
if (kvm_valid_perf_global_ctrl(pmu, data)) {
|
2015-06-19 21:45:05 +08:00
|
|
|
global_ctrl_changed(pmu, data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
2019-02-19 08:26:08 +08:00
|
|
|
if (!(data & pmu->global_ovf_ctrl_mask)) {
|
2015-06-19 21:45:05 +08:00
|
|
|
if (!msr_info->host_initiated)
|
|
|
|
pmu->global_status &= ~data;
|
|
|
|
pmu->global_ovf_ctrl = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2020-05-29 15:43:45 +08:00
|
|
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
|
|
|
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
|
|
|
|
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
|
|
|
|
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
|
|
|
|
return 1;
|
|
|
|
if (!msr_info->host_initiated &&
|
|
|
|
!(msr & MSR_PMC_FULL_WIDTH_BIT))
|
2020-01-28 05:22:56 +08:00
|
|
|
data = (s64)(s32)data;
|
|
|
|
pmc->counter += data - pmc_read_counter(pmc);
|
2020-02-22 10:34:13 +08:00
|
|
|
if (pmc->perf_event)
|
|
|
|
perf_event_period(pmc->perf_event,
|
|
|
|
get_sample_period(pmc, data));
|
2019-05-20 23:34:30 +08:00
|
|
|
return 0;
|
|
|
|
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
2020-01-28 05:22:56 +08:00
|
|
|
pmc->counter += data - pmc_read_counter(pmc);
|
2020-02-22 10:34:13 +08:00
|
|
|
if (pmc->perf_event)
|
|
|
|
perf_event_period(pmc->perf_event,
|
|
|
|
get_sample_period(pmc, data));
|
2015-06-19 21:45:05 +08:00
|
|
|
return 0;
|
|
|
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
|
|
|
if (data == pmc->eventsel)
|
|
|
|
return 0;
|
|
|
|
if (!(data & pmu->reserved_bits)) {
|
|
|
|
reprogram_gp_counter(pmc, data);
|
|
|
|
return 0;
|
|
|
|
}
|
2021-02-01 13:10:34 +08:00
|
|
|
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
|
|
|
|
return 0;
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2021-02-02 22:36:08 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
2019-10-01 07:38:54 +08:00
|
|
|
struct x86_pmu_capability x86_pmu;
|
2015-06-19 21:45:05 +08:00
|
|
|
struct kvm_cpuid_entry2 *entry;
|
|
|
|
union cpuid10_eax eax;
|
|
|
|
union cpuid10_edx edx;
|
|
|
|
|
|
|
|
pmu->nr_arch_gp_counters = 0;
|
|
|
|
pmu->nr_arch_fixed_counters = 0;
|
|
|
|
pmu->counter_bitmask[KVM_PMC_GP] = 0;
|
|
|
|
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
|
|
|
pmu->version = 0;
|
|
|
|
pmu->reserved_bits = 0xffffffff00200000ull;
|
|
|
|
|
|
|
|
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
|
|
|
|
if (!entry)
|
|
|
|
return;
|
|
|
|
eax.full = entry->eax;
|
|
|
|
edx.full = entry->edx;
|
|
|
|
|
|
|
|
pmu->version = eax.split.version_id;
|
|
|
|
if (!pmu->version)
|
|
|
|
return;
|
|
|
|
|
2019-10-01 07:38:54 +08:00
|
|
|
perf_get_x86_pmu_capability(&x86_pmu);
|
|
|
|
|
2015-06-19 21:45:05 +08:00
|
|
|
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
|
2019-10-01 07:38:54 +08:00
|
|
|
x86_pmu.num_counters_gp);
|
2021-01-18 10:58:00 +08:00
|
|
|
eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
|
2015-06-19 21:45:05 +08:00
|
|
|
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
|
2021-01-18 10:58:00 +08:00
|
|
|
eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
|
2015-06-19 21:45:05 +08:00
|
|
|
pmu->available_event_types = ~entry->ebx &
|
|
|
|
((1ull << eax.split.mask_length) - 1);
|
|
|
|
|
|
|
|
if (pmu->version == 1) {
|
|
|
|
pmu->nr_arch_fixed_counters = 0;
|
|
|
|
} else {
|
|
|
|
pmu->nr_arch_fixed_counters =
|
|
|
|
min_t(int, edx.split.num_counters_fixed,
|
2019-10-01 07:38:54 +08:00
|
|
|
x86_pmu.num_counters_fixed);
|
2021-01-18 10:58:00 +08:00
|
|
|
edx.split.bit_width_fixed = min_t(int,
|
|
|
|
edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
|
2015-06-19 21:45:05 +08:00
|
|
|
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
|
|
|
((u64)1 << edx.split.bit_width_fixed) - 1;
|
|
|
|
}
|
|
|
|
|
2017-05-19 01:37:31 +08:00
|
|
|
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
|
2015-06-19 21:45:05 +08:00
|
|
|
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
|
|
|
|
pmu->global_ctrl_mask = ~pmu->global_ctrl;
|
2019-02-19 08:26:08 +08:00
|
|
|
pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
|
|
|
|
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
|
|
|
|
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
|
2020-03-03 07:57:00 +08:00
|
|
|
if (vmx_pt_mode_is_host_guest())
|
2019-02-19 08:26:08 +08:00
|
|
|
pmu->global_ovf_ctrl_mask &=
|
|
|
|
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
|
2015-06-19 21:45:05 +08:00
|
|
|
|
|
|
|
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
|
if (entry &&
|
|
|
|
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
|
|
|
|
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
|
|
|
|
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
|
2019-10-27 18:52:43 +08:00
|
|
|
|
|
|
|
bitmap_set(pmu->all_valid_pmc_idx,
|
|
|
|
0, pmu->nr_arch_gp_counters);
|
|
|
|
bitmap_set(pmu->all_valid_pmc_idx,
|
|
|
|
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
|
2019-11-14 08:17:20 +08:00
|
|
|
|
|
|
|
nested_vmx_pmu_entry_exit_ctls_update(vcpu);
|
2021-02-02 22:36:08 +08:00
|
|
|
|
|
|
|
if (intel_pmu_lbr_is_compatible(vcpu))
|
|
|
|
x86_perf_get_lbr(&lbr_desc->records);
|
|
|
|
else
|
|
|
|
lbr_desc->records.nr = 0;
|
2021-02-01 13:10:37 +08:00
|
|
|
|
|
|
|
if (lbr_desc->records.nr)
|
|
|
|
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2021-02-02 22:36:08 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
2015-06-19 21:45:05 +08:00
|
|
|
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
|
|
|
pmu->gp_counters[i].type = KVM_PMC_GP;
|
|
|
|
pmu->gp_counters[i].vcpu = vcpu;
|
|
|
|
pmu->gp_counters[i].idx = i;
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 18:52:42 +08:00
|
|
|
pmu->gp_counters[i].current_config = 0;
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
|
|
|
|
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
|
|
|
|
pmu->fixed_counters[i].vcpu = vcpu;
|
|
|
|
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 18:52:42 +08:00
|
|
|
pmu->fixed_counters[i].current_config = 0;
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
2021-02-02 22:32:35 +08:00
|
|
|
|
|
|
|
vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
|
2021-02-02 22:36:08 +08:00
|
|
|
lbr_desc->records.nr = 0;
|
2021-02-01 13:10:33 +08:00
|
|
|
lbr_desc->event = NULL;
|
2021-02-01 13:10:35 +08:00
|
|
|
lbr_desc->msr_passthrough = false;
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2019-07-17 10:51:18 +08:00
|
|
|
struct kvm_pmc *pmc = NULL;
|
2015-06-19 21:45:05 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
2019-07-17 10:51:18 +08:00
|
|
|
pmc = &pmu->gp_counters[i];
|
2015-06-19 21:45:05 +08:00
|
|
|
|
|
|
|
pmc_stop_counter(pmc);
|
|
|
|
pmc->counter = pmc->eventsel = 0;
|
|
|
|
}
|
|
|
|
|
2019-07-17 10:51:18 +08:00
|
|
|
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
|
|
|
|
pmc = &pmu->fixed_counters[i];
|
|
|
|
|
|
|
|
pmc_stop_counter(pmc);
|
|
|
|
pmc->counter = 0;
|
|
|
|
}
|
2015-06-19 21:45:05 +08:00
|
|
|
|
|
|
|
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
|
|
|
|
pmu->global_ovf_ctrl = 0;
|
2021-02-01 13:10:33 +08:00
|
|
|
|
|
|
|
intel_pmu_release_guest_lbr_event(vcpu);
|
2015-06-19 21:45:05 +08:00
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:36 +08:00
|
|
|
/*
|
|
|
|
* Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
|
|
|
|
*
|
|
|
|
* If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
|
|
|
|
* the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
|
|
|
|
*
|
|
|
|
* Guest needs to re-enable LBR to resume branches recording.
|
|
|
|
*/
|
|
|
|
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
|
|
|
|
|
|
|
if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
|
|
|
|
data &= ~DEBUGCTLMSR_LBR;
|
|
|
|
vmcs_write64(GUEST_IA32_DEBUGCTL, data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u8 version = vcpu_to_pmu(vcpu)->version;
|
|
|
|
|
|
|
|
if (!intel_pmu_lbr_is_enabled(vcpu))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (version > 1 && version < 4)
|
|
|
|
intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
|
|
|
|
{
|
|
|
|
struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < lbr->nr; i++) {
|
|
|
|
vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
|
|
|
|
vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
|
|
|
|
if (lbr->info)
|
|
|
|
vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
|
|
|
|
vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:35 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
|
|
|
if (!lbr_desc->msr_passthrough)
|
|
|
|
return;
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
vmx_update_intercept_for_lbr_msrs(vcpu, true);
|
2021-02-01 13:10:35 +08:00
|
|
|
lbr_desc->msr_passthrough = false;
|
2021-02-01 13:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:35 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
|
|
|
if (lbr_desc->msr_passthrough)
|
|
|
|
return;
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
vmx_update_intercept_for_lbr_msrs(vcpu, false);
|
2021-02-01 13:10:35 +08:00
|
|
|
lbr_desc->msr_passthrough = true;
|
2021-02-01 13:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Higher priority host perf events (e.g. cpu pinned) could reclaim the
|
|
|
|
* pmu resources (e.g. LBR) that were assigned to the guest. This is
|
|
|
|
* usually done via ipi calls (more details in perf_install_in_context).
|
|
|
|
*
|
|
|
|
* Before entering the non-root mode (with irq disabled here), double
|
|
|
|
* confirm that the pmu features enabled to the guest are not reclaimed
|
|
|
|
* by higher priority host events. Otherwise, disallow vcpu's access to
|
|
|
|
* the reclaimed features.
|
|
|
|
*/
|
|
|
|
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:37 +08:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2021-02-01 13:10:34 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
|
|
|
if (!lbr_desc->event) {
|
|
|
|
vmx_disable_lbr_msrs_passthrough(vcpu);
|
|
|
|
if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
|
|
|
|
goto warn;
|
2021-02-01 13:10:37 +08:00
|
|
|
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
|
|
|
|
goto warn;
|
2021-02-01 13:10:34 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
|
|
|
|
vmx_disable_lbr_msrs_passthrough(vcpu);
|
2021-02-01 13:10:37 +08:00
|
|
|
__clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
2021-02-01 13:10:34 +08:00
|
|
|
goto warn;
|
|
|
|
} else
|
|
|
|
vmx_enable_lbr_msrs_passthrough(vcpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
warn:
|
|
|
|
pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
|
|
|
|
vcpu->vcpu_id);
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:37 +08:00
|
|
|
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
|
|
|
|
intel_pmu_release_guest_lbr_event(vcpu);
|
|
|
|
}
|
|
|
|
|
2015-06-19 21:45:05 +08:00
|
|
|
struct kvm_pmu_ops intel_pmu_ops = {
|
|
|
|
.find_arch_event = intel_find_arch_event,
|
|
|
|
.find_fixed_event = intel_find_fixed_event,
|
|
|
|
.pmc_is_enabled = intel_pmc_is_enabled,
|
|
|
|
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
2019-10-27 18:52:40 +08:00
|
|
|
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
|
2019-10-27 18:52:41 +08:00
|
|
|
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
|
2019-10-27 18:52:40 +08:00
|
|
|
.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
|
2015-06-19 21:45:05 +08:00
|
|
|
.is_valid_msr = intel_is_valid_msr,
|
|
|
|
.get_msr = intel_pmu_get_msr,
|
|
|
|
.set_msr = intel_pmu_set_msr,
|
|
|
|
.refresh = intel_pmu_refresh,
|
|
|
|
.init = intel_pmu_init,
|
|
|
|
.reset = intel_pmu_reset,
|
2021-02-01 13:10:36 +08:00
|
|
|
.deliver_pmi = intel_pmu_deliver_pmi,
|
2021-02-01 13:10:37 +08:00
|
|
|
.cleanup = intel_pmu_cleanup,
|
2015-06-19 21:45:05 +08:00
|
|
|
};
|