KVM: VMX: Directly query Intel PT mode when refreshing PMUs

Use vmx_pt_mode_is_host_guest() in intel_pmu_refresh() instead of
bouncing through kvm_x86_ops->pt_supported, and remove ->pt_supported()
as the PMU code was the last remaining user.

Opportunistically clean up the wording of a comment that referenced
kvm_x86_ops->pt_supported().

No functional change intended.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2020-03-02 15:57:00 -08:00 committed by Paolo Bonzini
parent 7b874c26a6
commit a1bead2aba
5 changed files with 4 additions and 20 deletions

View File

@ -1176,8 +1176,6 @@ struct kvm_x86_ops {
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu, void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion *exit_fastpath); enum exit_fastpath_completion *exit_fastpath);
bool (*pt_supported)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu); int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu);

View File

@ -6074,11 +6074,6 @@ static int svm_get_lpage_level(void)
return PT_PDPE_LEVEL; return PT_PDPE_LEVEL;
} }
static bool svm_pt_supported(void)
{
return false;
}
static bool svm_has_wbinvd_exit(void) static bool svm_has_wbinvd_exit(void)
{ {
return true; return true;
@ -7440,8 +7435,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpuid_update = svm_cpuid_update, .cpuid_update = svm_cpuid_update,
.pt_supported = svm_pt_supported,
.set_supported_cpuid = svm_set_supported_cpuid, .set_supported_cpuid = svm_set_supported_cpuid,
.has_wbinvd_exit = svm_has_wbinvd_exit, .has_wbinvd_exit = svm_has_wbinvd_exit,

View File

@ -335,7 +335,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
if (kvm_x86_ops->pt_supported()) if (vmx_pt_mode_is_host_guest())
pmu->global_ovf_ctrl_mask &= pmu->global_ovf_ctrl_mask &=
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;

View File

@ -6306,11 +6306,6 @@ static bool vmx_has_emulated_msr(int index)
} }
} }
static bool vmx_pt_supported(void)
{
return vmx_pt_mode_is_host_guest();
}
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{ {
u32 exit_intr_info; u32 exit_intr_info;
@ -7945,7 +7940,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.check_intercept = vmx_check_intercept, .check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff, .handle_exit_irqoff = vmx_handle_exit_irqoff,
.pt_supported = vmx_pt_supported,
.request_immediate_exit = vmx_request_immediate_exit, .request_immediate_exit = vmx_request_immediate_exit,

View File

@ -2820,10 +2820,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
return 1; return 1;
/* /*
* We do support PT if kvm_x86_ops->pt_supported(), but we do * KVM supports exposing PT to the guest, but does not support
* not support IA32_XSS[bit 8]. Guests will have to use * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
* RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT * XSAVES/XRSTORS to save/restore PT MSRs.
* MSRs.
*/ */
if (data != 0) if (data != 0)
return 1; return 1;