KVM: x86: hyper-v: Cache guest CPUID leaves determining features availability

Limiting exposed Hyper-V features requires a fast way to check if the
particular feature is exposed in guest visible CPUIDs or not. To aboid
looping through all CPUID entries on every hypercall/MSR access cache
the required leaves on CPUID update.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210521095204.2161214-4-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Vitaly Kuznetsov 2021-05-21 11:51:37 +02:00 committed by Paolo Bonzini
parent 644f706719
commit 10d7bf1e46
2 changed files with 47 additions and 10 deletions

View File

@ -544,6 +544,14 @@ struct kvm_vcpu_hv {
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
cpumask_t tlb_flush; cpumask_t tlb_flush;
bool enforce_cpuid; bool enforce_cpuid;
struct {
u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
} cpuid_cache;
}; };
/* Xen HVM per vcpu emulation context */ /* Xen HVM per vcpu emulation context */

View File

@ -274,15 +274,10 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu) static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *entry; struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
entry = kvm_find_cpuid_entry(vcpu, return hv_vcpu->cpuid_cache.syndbg_cap_eax &
HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
0);
if (!entry)
return false;
return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
} }
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
@ -1845,12 +1840,46 @@ ret_success:
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *entry; struct kvm_cpuid_entry2 *entry;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
vcpu->arch.hyperv_enabled = true; vcpu->arch.hyperv_enabled = true;
else } else {
vcpu->arch.hyperv_enabled = false; vcpu->arch.hyperv_enabled = false;
return;
}
if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
return;
hv_vcpu = to_hv_vcpu(vcpu);
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
if (entry) {
hv_vcpu->cpuid_cache.features_eax = entry->eax;
hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
hv_vcpu->cpuid_cache.features_edx = entry->edx;
} else {
hv_vcpu->cpuid_cache.features_eax = 0;
hv_vcpu->cpuid_cache.features_ebx = 0;
hv_vcpu->cpuid_cache.features_edx = 0;
}
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
if (entry) {
hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
} else {
hv_vcpu->cpuid_cache.enlightenments_eax = 0;
hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
}
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
if (entry)
hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
else
hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
} }
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce) int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)