mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 22:14:20 +08:00
5c7d4f9ad3
kvm_clear_exception_queue() should clear pending exception.
This also includes exceptions which were only marked pending but not
yet injected. This is because exception.pending is used for both L1
and L2 to determine if an exception should be raised to guest.
Note that an exception which is pending but not yet injected will
be raised again once the guest will be resumed.
Consider the following scenario:
1) L0 KVM with ignore_msrs=false.
2) L1 prepare vmcs12 with the following:
a) No intercepts on MSR (MSR_BITMAP exist and is filled with 0).
b) No intercept for #GP.
c) vmx-preemption-timer is configured.
3) L1 enters into L2.
4) L2 reads an unhandled MSR that exists in MSR_BITMAP
(such as 0x1fff).
L2 RDMSR could be handled as described below:
1) L2 exits to L0 on RDMSR and calls handle_rdmsr().
2) handle_rdmsr() calls kvm_inject_gp() which sets
KVM_REQ_EVENT, exception.pending=true and exception.injected=false.
3) vcpu_enter_guest() consumes KVM_REQ_EVENT and calls
inject_pending_event() which calls vmx_check_nested_events()
which sees that exception.pending=true but
nested_vmx_check_exception() returns 0 and therefore does nothing at
this point. However let's assume it later sees vmx-preemption-timer
expired and therefore exits from L2 to L1 by calling
nested_vmx_vmexit().
4) nested_vmx_vmexit() calls prepare_vmcs12()
which calls vmcs12_save_pending_event() but it does nothing as
exception.injected is false. Also prepare_vmcs12() calls
kvm_clear_exception_queue() which does nothing as
exception.injected is already false.
5) We now return from vmx_check_nested_events() with 0 while still
having exception.pending=true!
6) Therefore inject_pending_event() continues
and we inject L2 exception to L1!...
This commit will fix above issue by changing step (4) to
clear exception.pending in kvm_clear_exception_queue().
Fixes: 664f8e26b0
("KVM: X86: Fix loss of exception which has not yet been injected")
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Nikita Leshenko <nikita.leshchenko@oracle.com>
Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
274 lines
6.7 KiB
C
274 lines
6.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef ARCH_X86_KVM_X86_H
|
|
#define ARCH_X86_KVM_X86_H
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/mwait.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <asm/pvclock.h>
|
|
#include "kvm_cache_regs.h"
|
|
|
|
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
|
|
|
|
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
|
|
{
|
|
vcpu->arch.exception.pending = false;
|
|
vcpu->arch.exception.injected = false;
|
|
}
|
|
|
|
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
|
|
bool soft)
|
|
{
|
|
vcpu->arch.interrupt.pending = true;
|
|
vcpu->arch.interrupt.soft = soft;
|
|
vcpu->arch.interrupt.nr = vector;
|
|
}
|
|
|
|
static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
|
|
{
|
|
vcpu->arch.interrupt.pending = false;
|
|
}
|
|
|
|
static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
|
|
{
|
|
return vcpu->arch.exception.injected || vcpu->arch.interrupt.pending ||
|
|
vcpu->arch.nmi_injected;
|
|
}
|
|
|
|
static inline bool kvm_exception_is_soft(unsigned int nr)
|
|
{
|
|
return (nr == BP_VECTOR) || (nr == OF_VECTOR);
|
|
}
|
|
|
|
static inline bool is_protmode(struct kvm_vcpu *vcpu)
|
|
{
|
|
return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
|
|
}
|
|
|
|
static inline int is_long_mode(struct kvm_vcpu *vcpu)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
return vcpu->arch.efer & EFER_LMA;
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
|
|
{
|
|
int cs_db, cs_l;
|
|
|
|
if (!is_long_mode(vcpu))
|
|
return false;
|
|
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
|
return cs_l;
|
|
}
|
|
|
|
static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
return (vcpu->arch.efer & EFER_LMA) &&
|
|
kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
|
|
{
|
|
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
|
|
}
|
|
|
|
static inline int is_pae(struct kvm_vcpu *vcpu)
|
|
{
|
|
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
|
|
}
|
|
|
|
static inline int is_pse(struct kvm_vcpu *vcpu)
|
|
{
|
|
return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
|
|
}
|
|
|
|
static inline int is_paging(struct kvm_vcpu *vcpu)
|
|
{
|
|
return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
|
|
}
|
|
|
|
static inline u32 bit(int bitno)
|
|
{
|
|
return 1 << (bitno & 31);
|
|
}
|
|
|
|
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
|
|
{
|
|
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
|
|
}
|
|
|
|
static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
|
|
}
|
|
|
|
static inline u64 get_canonical(u64 la, u8 vaddr_bits)
|
|
{
|
|
return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
|
}
|
|
|
|
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
static inline bool emul_is_noncanonical_address(u64 la,
|
|
struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
|
gva_t gva, gfn_t gfn, unsigned access)
|
|
{
|
|
/*
|
|
* If this is a shadow nested page table, the "GVA" is
|
|
* actually a nGPA.
|
|
*/
|
|
vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
|
|
vcpu->arch.access = access;
|
|
vcpu->arch.mmio_gfn = gfn;
|
|
vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
|
|
}
|
|
|
|
static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
|
|
{
|
|
return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
|
|
}
|
|
|
|
/*
|
|
* Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
|
|
* clear all mmio cache info.
|
|
*/
|
|
#define MMIO_GVA_ANY (~(gva_t)0)
|
|
|
|
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
|
{
|
|
if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
|
return;
|
|
|
|
vcpu->arch.mmio_gva = 0;
|
|
}
|
|
|
|
static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
|
{
|
|
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
|
|
vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
|
|
{
|
|
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
|
|
vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
|
|
enum kvm_reg reg)
|
|
{
|
|
unsigned long val = kvm_register_read(vcpu, reg);
|
|
|
|
return is_64_bit_mode(vcpu) ? val : (u32)val;
|
|
}
|
|
|
|
static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
|
|
enum kvm_reg reg,
|
|
unsigned long val)
|
|
{
|
|
if (!is_64_bit_mode(vcpu))
|
|
val = (u32)val;
|
|
return kvm_register_write(vcpu, reg, val);
|
|
}
|
|
|
|
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
|
|
{
|
|
return !(kvm->arch.disabled_quirks & quirk);
|
|
}
|
|
|
|
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
|
|
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
|
|
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
|
|
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
|
|
|
|
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
|
u64 get_kvmclock_ns(struct kvm *kvm);
|
|
|
|
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
|
|
gva_t addr, void *val, unsigned int bytes,
|
|
struct x86_exception *exception);
|
|
|
|
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
|
|
gva_t addr, void *val, unsigned int bytes,
|
|
struct x86_exception *exception);
|
|
|
|
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
|
|
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
|
|
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
|
|
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
|
|
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
int page_num);
|
|
bool kvm_vector_hashing_enabled(void);
|
|
|
|
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
|
|
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
|
|
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
|
|
| XFEATURE_MASK_PKRU)
|
|
extern u64 host_xcr0;
|
|
|
|
extern u64 kvm_supported_xcr0(void);
|
|
|
|
extern unsigned int min_timer_period_us;
|
|
|
|
extern unsigned int lapic_timer_advance_ns;
|
|
|
|
extern struct static_key kvm_no_apic_vcpu;
|
|
|
|
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
|
|
{
|
|
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
|
|
vcpu->arch.virtual_tsc_shift);
|
|
}
|
|
|
|
/* Same "calling convention" as do_div:
|
|
* - divide (n << 32) by base
|
|
* - put result in n
|
|
* - return remainder
|
|
*/
|
|
#define do_shl32_div32(n, base) \
|
|
({ \
|
|
u32 __quot, __rem; \
|
|
asm("divl %2" : "=a" (__quot), "=d" (__rem) \
|
|
: "rm" (base), "0" (0), "1" ((u32) n)); \
|
|
n = __quot; \
|
|
__rem; \
|
|
})
|
|
|
|
static inline bool kvm_mwait_in_guest(void)
|
|
{
|
|
return boot_cpu_has(X86_FEATURE_MWAIT) &&
|
|
!boot_cpu_has_bug(X86_BUG_MONITOR);
|
|
}
|
|
|
|
#endif
|