mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
KVM: x86: do not scan IRR twice on APICv vmentry
Calls to apic_find_highest_irr are scanning IRR twice, once in vmx_sync_pir_from_irr and once in apic_search_irr. Change sync_pir_from_irr to get the new maximum IRR from kvm_apic_update_irr; now that it does the computation, it can also do the RVI write. In order to avoid complications in svm.c, make the callback optional. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3d92789f69
commit
76dfafd536
@ -969,7 +969,7 @@ struct kvm_x86_ops {
|
|||||||
void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
|
void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
|
||||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
||||||
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||||
void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
||||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||||
int (*get_tdp_level)(void);
|
int (*get_tdp_level)(void);
|
||||||
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
|
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
|
||||||
|
@ -515,6 +515,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
return apic_find_highest_irr(vcpu->arch.apic);
|
return apic_find_highest_irr(vcpu->arch.apic);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
|
||||||
|
|
||||||
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
||||||
int vector, int level, int trig_mode,
|
int vector, int level, int trig_mode,
|
||||||
@ -580,9 +581,10 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
|
|||||||
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
||||||
{
|
{
|
||||||
int highest_irr;
|
int highest_irr;
|
||||||
if (apic->vcpu->arch.apicv_active)
|
if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active)
|
||||||
kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
|
highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
|
||||||
highest_irr = apic_find_highest_irr(apic);
|
else
|
||||||
|
highest_irr = apic_find_highest_irr(apic);
|
||||||
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
|
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
|
||||||
return -1;
|
return -1;
|
||||||
return highest_irr;
|
return highest_irr;
|
||||||
|
@ -4359,11 +4359,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
||||||
{
|
{
|
||||||
kvm_lapic_set_irr(vec, vcpu->arch.apic);
|
kvm_lapic_set_irr(vec, vcpu->arch.apic);
|
||||||
@ -5373,7 +5368,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||||||
.get_enable_apicv = svm_get_enable_apicv,
|
.get_enable_apicv = svm_get_enable_apicv,
|
||||||
.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
|
.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
|
||||||
.load_eoi_exitmap = svm_load_eoi_exitmap,
|
.load_eoi_exitmap = svm_load_eoi_exitmap,
|
||||||
.sync_pir_to_irr = svm_sync_pir_to_irr,
|
|
||||||
.hwapic_irr_update = svm_hwapic_irr_update,
|
.hwapic_irr_update = svm_hwapic_irr_update,
|
||||||
.hwapic_isr_update = svm_hwapic_isr_update,
|
.hwapic_isr_update = svm_hwapic_isr_update,
|
||||||
.apicv_post_state_restore = avic_post_state_restore,
|
.apicv_post_state_restore = avic_post_state_restore,
|
||||||
|
@ -6649,8 +6649,10 @@ static __init int hardware_setup(void)
|
|||||||
if (!cpu_has_vmx_ple())
|
if (!cpu_has_vmx_ple())
|
||||||
ple_gap = 0;
|
ple_gap = 0;
|
||||||
|
|
||||||
if (!cpu_has_vmx_apicv())
|
if (!cpu_has_vmx_apicv()) {
|
||||||
enable_apicv = 0;
|
enable_apicv = 0;
|
||||||
|
kvm_x86_ops->sync_pir_to_irr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_has_vmx_tsc_scaling()) {
|
if (cpu_has_vmx_tsc_scaling()) {
|
||||||
kvm_has_tsc_control = true;
|
kvm_has_tsc_control = true;
|
||||||
@ -8722,20 +8724,25 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
int max_irr;
|
||||||
|
|
||||||
if (!pi_test_on(&vmx->pi_desc))
|
WARN_ON(!vcpu->arch.apicv_active);
|
||||||
return;
|
if (pi_test_on(&vmx->pi_desc)) {
|
||||||
|
pi_clear_on(&vmx->pi_desc);
|
||||||
pi_clear_on(&vmx->pi_desc);
|
/*
|
||||||
/*
|
* IOMMU can write to PIR.ON, so the barrier matters even on UP.
|
||||||
* IOMMU can write to PIR.ON, so the barrier matters even on UP.
|
* But on x86 this is just a compiler barrier anyway.
|
||||||
* But on x86 this is just a compiler barrier anyway.
|
*/
|
||||||
*/
|
smp_mb__after_atomic();
|
||||||
smp_mb__after_atomic();
|
max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
|
||||||
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
|
} else {
|
||||||
|
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
||||||
|
}
|
||||||
|
vmx_hwapic_irr_update(vcpu, max_irr);
|
||||||
|
return max_irr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||||
|
@ -2909,7 +2909,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||||||
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_lapic_state *s)
|
struct kvm_lapic_state *s)
|
||||||
{
|
{
|
||||||
if (vcpu->arch.apicv_active)
|
if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
||||||
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
||||||
|
|
||||||
return kvm_apic_get_state(vcpu, s);
|
return kvm_apic_get_state(vcpu, s);
|
||||||
@ -6659,7 +6659,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
|||||||
if (irqchip_split(vcpu->kvm))
|
if (irqchip_split(vcpu->kvm))
|
||||||
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||||
else {
|
else {
|
||||||
if (vcpu->arch.apicv_active)
|
if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
||||||
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
||||||
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||||
}
|
}
|
||||||
@ -6822,11 +6822,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
* Update architecture specific hints for APIC
|
* Update architecture specific hints for APIC
|
||||||
* virtual interrupt delivery.
|
* virtual interrupt delivery.
|
||||||
*/
|
*/
|
||||||
if (vcpu->arch.apicv_active) {
|
if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
||||||
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
||||||
kvm_x86_ops->hwapic_irr_update(vcpu,
|
|
||||||
kvm_lapic_find_highest_irr(vcpu));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
|
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
|
||||||
|
Loading…
Reference in New Issue
Block a user