mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
KVM: nVMX: Fold requested virtual interrupt check into has_nested_events()
commit 321ef62b0c
upstream.
Check for a Requested Virtual Interrupt, i.e. a virtual interrupt that is
pending delivery, in vmx_has_nested_events() and drop the one-off
kvm_x86_ops.guest_apic_has_interrupt() hook.
In addition to dropping a superfluous hook, this fixes a bug where KVM
would incorrectly treat virtual interrupts _for L2_ as always enabled due
to kvm_arch_interrupt_allowed(), by way of vmx_interrupt_blocked(),
treating IRQs as enabled if L2 is active and vmcs12 is configured to exit
on IRQs, i.e. KVM would treat a virtual interrupt for L2 as a valid wake
event based on L1's IRQ blocking status.
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20240607172609.3205077-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
65d5d97e75
commit
be7486f6e0
@ -85,7 +85,6 @@ KVM_X86_OP_OPTIONAL(update_cr8_intercept)
|
||||
KVM_X86_OP(refresh_apicv_exec_ctrl)
|
||||
KVM_X86_OP_OPTIONAL(hwapic_irr_update)
|
||||
KVM_X86_OP_OPTIONAL(hwapic_isr_update)
|
||||
KVM_X86_OP_OPTIONAL_RET0(guest_apic_has_interrupt)
|
||||
KVM_X86_OP_OPTIONAL(load_eoi_exitmap)
|
||||
KVM_X86_OP_OPTIONAL(set_virtual_apic_mode)
|
||||
KVM_X86_OP_OPTIONAL(set_apic_access_page_addr)
|
||||
|
@ -1714,7 +1714,6 @@ struct kvm_x86_ops {
|
||||
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
|
||||
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void (*hwapic_isr_update)(int isr);
|
||||
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
|
||||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
|
||||
|
@ -97,7 +97,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
|
||||
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
|
||||
.hwapic_irr_update = vmx_hwapic_irr_update,
|
||||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_interrupt = vmx_deliver_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
|
||||
|
@ -4060,6 +4060,10 @@ static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
|
||||
vppr = *((u32 *)(vapic + APIC_PROCPRI));
|
||||
|
||||
max_irr = vmx_get_rvi();
|
||||
if ((max_irr & 0xf0) > (vppr & 0xf0))
|
||||
return true;
|
||||
|
||||
if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
|
||||
pi_test_on(vmx->nested.pi_desc)) {
|
||||
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
|
||||
|
@ -4108,26 +4108,6 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
void *vapic_page;
|
||||
u32 vppr;
|
||||
int rvi;
|
||||
|
||||
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
|
||||
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
|
||||
WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
|
||||
return false;
|
||||
|
||||
rvi = vmx_get_rvi();
|
||||
|
||||
vapic_page = vmx->nested.virtual_apic_map.hva;
|
||||
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
|
||||
|
||||
return ((rvi & 0xf0) > (vppr & 0xf0));
|
||||
}
|
||||
|
||||
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -49,7 +49,6 @@ void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
|
||||
bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
|
||||
void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void vmx_hwapic_isr_update(int max_isr);
|
||||
bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
|
||||
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector);
|
||||
|
@ -13100,12 +13100,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
kvm_arch_free_memslot(kvm, old);
|
||||
}
|
||||
|
||||
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (is_guest_mode(vcpu) &&
|
||||
static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!list_empty_careful(&vcpu->async_pf.done))
|
||||
@ -13136,9 +13130,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
if (kvm_test_request(KVM_REQ_PMI, vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||
(kvm_cpu_has_interrupt(vcpu) ||
|
||||
kvm_guest_apic_has_interrupt(vcpu)))
|
||||
if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_hv_has_stimer_pending(vcpu))
|
||||
|
Loading…
Reference in New Issue
Block a user