mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
Bug fixes for all architectures. Nothing really stands out.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJWVcuuAAoJEL/70l94x66DmUEIAKnU6SCojoFOxWY0/EH/PBue m53mjiRiHp+YH/74dW0XF843+IKLfbLiADRaWHTqc9VW0ifnXmRjOv/bYpC7I/+R 8XKHaJZQfpb6yvICEqWvMItBpddoakbhv8DJOf4bUfipNY0zx5F2STFfx0KICtbc mHTB4y5bFgIz8mJBLX+Dmh/UyXL0kbjSnksu0WA80Szr0pq2Sr4Csrx8PqGAEfIJ e5DUW0h3UXY77J5fQbpgJs93hzp1YwkuRKEeYpB8POx4fmvssHoybmOk46sP0Ipb IYxrJ+CUQ4o6Vpp3LTMjzMfJ4Y/NaOHCvYxL0okhxtUuq+UbUZjM5ziclfQ/32M= =cbxQ -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "Bug fixes for all architectures. Nothing really stands out" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (21 commits) KVM: nVMX: remove incorrect vpid check in nested invvpid emulation arm64: kvm: report original PAR_EL1 upon panic arm64: kvm: avoid %p in __kvm_hyp_panic KVM: arm/arm64: vgic: Trust the LR state for HW IRQs KVM: arm/arm64: arch_timer: Preserve physical dist. active state on LR.active KVM: arm/arm64: Fix preemptible timer active state crazyness arm64: KVM: Add workaround for Cortex-A57 erratum 834220 arm64: KVM: Fix AArch32 to AArch64 register mapping ARM/arm64: KVM: test properly for a PTE's uncachedness KVM: s390: fix wrong lookup of VCPUs by array index KVM: s390: avoid memory overwrites on emergency signal injection KVM: Provide function for VCPU lookup by id KVM: s390: fix pfmf intercept handler KVM: s390: enable SIMD only when no VCPUs were created KVM: x86: request interrupt window when IRQ chip is split KVM: x86: set KVM_REQ_EVENT on local interrupt request from user space KVM: x86: split kvm_vcpu_ready_for_interrupt_injection out of dm_request_for_irq_injection KVM: x86: fix interrupt window handling in split IRQ chip case MIPS: KVM: Uninit VCPU in vcpu_create error path MIPS: KVM: Fix CACHE immediate offset sign extension ...
This commit is contained in:
commit
4cf193b4b2
@ -563,18 +563,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
if (vcpu->arch.power_off || vcpu->arch.pause)
|
||||
vcpu_sleep(vcpu);
|
||||
|
||||
/*
|
||||
* Disarming the background timer must be done in a
|
||||
* preemptible context, as this call may sleep.
|
||||
*/
|
||||
kvm_timer_flush_hwstate(vcpu);
|
||||
|
||||
/*
|
||||
* Preparing the interrupts to be injected also
|
||||
* involves poking the GIC, which must be done in a
|
||||
* non-preemptible context.
|
||||
*/
|
||||
preempt_disable();
|
||||
kvm_timer_flush_hwstate(vcpu);
|
||||
kvm_vgic_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
|
||||
__kvm_flush_dcache_pud(pud);
|
||||
}
|
||||
|
||||
static bool kvm_is_device_pfn(unsigned long pfn)
|
||||
{
|
||||
return !pfn_valid(pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_dissolve_pmd() - clear and flush huge PMD entry
|
||||
* @kvm: pointer to kvm structure.
|
||||
@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
|
||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
|
||||
/* No need to invalidate the cache for device mappings */
|
||||
if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
|
||||
if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
|
||||
kvm_flush_dcache_pte(old_pte);
|
||||
|
||||
put_page(virt_to_page(pte));
|
||||
@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
do {
|
||||
if (!pte_none(*pte) &&
|
||||
(pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
|
||||
if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
|
||||
kvm_flush_dcache_pte(*pte);
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_is_device_pfn(unsigned long pfn)
|
||||
{
|
||||
return !pfn_valid(pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_ptes - write protect PMD range
|
||||
* @pmd: pointer to pmd entry
|
||||
|
@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_834220
|
||||
bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
|
||||
depends on KVM
|
||||
default y
|
||||
help
|
||||
This option adds an alternative code sequence to work around ARM
|
||||
erratum 834220 on Cortex-A57 parts up to r1p2.
|
||||
|
||||
Affected Cortex-A57 parts might report a Stage 2 translation
|
||||
fault as the result of a Stage 1 fault for load crossing a
|
||||
page boundary when there is a permission or device memory
|
||||
alignment fault at Stage 1 and a translation fault at Stage 2.
|
||||
|
||||
The workaround is to verify that the Stage 1 translation
|
||||
doesn't generate a fault before handling the Stage 2 fault.
|
||||
Please note that this does not necessarily enable the workaround,
|
||||
as it depends on the alternative framework, which will only patch
|
||||
the kernel if an affected CPU is detected.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_845719
|
||||
bool "Cortex-A53: 845719: a load might read incorrect data"
|
||||
depends on COMPAT
|
||||
|
@ -29,8 +29,9 @@
|
||||
#define ARM64_HAS_PAN 4
|
||||
#define ARM64_HAS_LSE_ATOMICS 5
|
||||
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
||||
#define ARM64_WORKAROUND_834220 7
|
||||
|
||||
#define ARM64_NCAPS 7
|
||||
#define ARM64_NCAPS 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
|
||||
}
|
||||
|
||||
/*
|
||||
* vcpu_reg should always be passed a register number coming from a
|
||||
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
|
||||
* with banked registers.
|
||||
*/
|
||||
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return vcpu_reg32(vcpu, reg_num);
|
||||
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
(1 << MIDR_VARIANT_SHIFT) | 2),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_834220
|
||||
{
|
||||
/* Cortex-A57 r0p0 - r1p2 */
|
||||
.desc = "ARM erratum 834220",
|
||||
.capability = ARM64_WORKAROUND_834220,
|
||||
MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
|
||||
(1 << MIDR_VARIANT_SHIFT) | 2),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_845719
|
||||
{
|
||||
/* Cortex-A53 r0p[01234] */
|
||||
|
@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
|
||||
ENDPROC(__kvm_flush_vm_context)
|
||||
|
||||
__kvm_hyp_panic:
|
||||
// Stash PAR_EL1 before corrupting it in __restore_sysregs
|
||||
mrs x0, par_el1
|
||||
push x0, xzr
|
||||
|
||||
// Guess the context by looking at VTTBR:
|
||||
// If zero, then we're already a host.
|
||||
// Otherwise restore a minimal host context before panicing.
|
||||
@ -898,7 +902,7 @@ __kvm_hyp_panic:
|
||||
mrs x3, esr_el2
|
||||
mrs x4, far_el2
|
||||
mrs x5, hpfar_el2
|
||||
mrs x6, par_el1
|
||||
pop x6, xzr // active context PAR_EL1
|
||||
mrs x7, tpidr_el2
|
||||
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
@ -914,7 +918,7 @@ __kvm_hyp_panic:
|
||||
ENDPROC(__kvm_hyp_panic)
|
||||
|
||||
__hyp_panic_str:
|
||||
.ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
|
||||
.ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
|
||||
|
||||
.align 2
|
||||
|
||||
@ -1015,9 +1019,15 @@ el1_trap:
|
||||
b.ne 1f // Not an abort we care about
|
||||
|
||||
/* This is an abort. Check for permission fault */
|
||||
alternative_if_not ARM64_WORKAROUND_834220
|
||||
and x2, x1, #ESR_ELx_FSC_TYPE
|
||||
cmp x2, #FSC_PERM
|
||||
b.ne 1f // Not a permission fault
|
||||
alternative_else
|
||||
nop // Use the permission fault path to
|
||||
nop // check for a valid S1 translation,
|
||||
nop // regardless of the ESR value.
|
||||
alternative_endif
|
||||
|
||||
/*
|
||||
* Check for Stage-1 page table walk, which is guaranteed
|
||||
|
@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
|
||||
/* Note: These now point to the banked copies */
|
||||
*vcpu_spsr(vcpu) = new_spsr_value;
|
||||
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
/* Branch to exception vector */
|
||||
if (sctlr & (1 << 13))
|
||||
|
@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
||||
|
||||
base = (inst >> 21) & 0x1f;
|
||||
op_inst = (inst >> 16) & 0x1f;
|
||||
offset = inst & 0xffff;
|
||||
offset = (int16_t)inst;
|
||||
cache = (inst >> 16) & 0x3;
|
||||
op = (inst >> 18) & 0x7;
|
||||
|
||||
|
@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
|
||||
|
||||
FEXPORT(__kvm_mips_load_asid)
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
|
||||
/* addresses shift to 0x80000000 */
|
||||
bltz t0, 1f /* If kernel */
|
||||
PTR_L t0, VCPU_COP0(k1)
|
||||
LONG_L t0, COP0_STATUS(t0)
|
||||
andi t0, KSU_USER | ST0_ERL | ST0_EXL
|
||||
xori t0, KSU_USER
|
||||
bnez t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
|
||||
mtc0 t0, CP0_EPC
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
|
||||
/* addresses shift to 0x80000000 */
|
||||
bltz t0, 1f /* If kernel */
|
||||
PTR_L t0, VCPU_COP0(k1)
|
||||
LONG_L t0, COP0_STATUS(t0)
|
||||
andi t0, KSU_USER | ST0_ERL | ST0_EXL
|
||||
xori t0, KSU_USER
|
||||
bnez t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
|
@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
|
||||
if (!gebase) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_cpu;
|
||||
goto out_uninit_cpu;
|
||||
}
|
||||
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
||||
ALIGN(size, PAGE_SIZE), gebase);
|
||||
@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
out_free_gebase:
|
||||
kfree(gebase);
|
||||
|
||||
out_uninit_cpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
|
||||
out_free_cpu:
|
||||
kfree(vcpu);
|
||||
|
||||
|
@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
|
||||
src_id, 0);
|
||||
|
||||
/* sending vcpu invalid */
|
||||
if (src_id >= KVM_MAX_VCPUS ||
|
||||
kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
|
||||
if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (sclp.has_sigpif)
|
||||
@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
|
||||
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
|
||||
irq->u.emerg.code, 0);
|
||||
|
||||
/* sending vcpu invalid */
|
||||
if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
|
||||
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
|
@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
if (MACHINE_HAS_VX) {
|
||||
mutex_lock(&kvm->lock);
|
||||
if (atomic_read(&kvm->online_vcpus)) {
|
||||
r = -EBUSY;
|
||||
} else if (MACHINE_HAS_VX) {
|
||||
set_kvm_facility(kvm->arch.model.fac->mask, 129);
|
||||
set_kvm_facility(kvm->arch.model.fac->list, 129);
|
||||
r = 0;
|
||||
} else
|
||||
r = -EINVAL;
|
||||
mutex_unlock(&kvm->lock);
|
||||
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
|
||||
r ? "(not available)" : "(success)");
|
||||
break;
|
||||
|
@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
||||
|
||||
if (!MACHINE_HAS_PFMF)
|
||||
if (!test_kvm_facility(vcpu->kvm, 8))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
|
@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
|
||||
u16 cpu_addr, u32 parameter, u64 *status_reg)
|
||||
{
|
||||
int rc;
|
||||
struct kvm_vcpu *dst_vcpu;
|
||||
struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
|
||||
|
||||
if (order_code == SIGP_EXTERNAL_CALL) {
|
||||
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
|
||||
BUG_ON(dest_vcpu == NULL);
|
||||
|
||||
kvm_s390_vcpu_wakeup(dest_vcpu);
|
||||
|
@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
|
||||
|
||||
switch (type) {
|
||||
case VMX_VPID_EXTENT_ALL_CONTEXT:
|
||||
if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
||||
return 1;
|
||||
}
|
||||
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
|
||||
nested_vmx_succeed(vcpu);
|
||||
break;
|
||||
|
@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (!lapic_in_kernel(vcpu) ||
|
||||
kvm_apic_accept_pic_intr(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* if userspace requested an interrupt window, check that the
|
||||
* interrupt window is open.
|
||||
*
|
||||
* No need to exit to userspace if we already have an interrupt queued.
|
||||
*/
|
||||
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_arch_interrupt_allowed(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu) &&
|
||||
!kvm_event_needs_reinjection(vcpu) &&
|
||||
kvm_cpu_accept_dm_intr(vcpu);
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
||||
struct kvm_interrupt *irq)
|
||||
{
|
||||
@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
||||
return -EEXIST;
|
||||
|
||||
vcpu->arch.pending_external_vector = irq->irq;
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
|
||||
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if userspace requested an interrupt window, and that the
|
||||
* interrupt window is open.
|
||||
*
|
||||
* No need to exit to userspace if we already have an interrupt queued.
|
||||
*/
|
||||
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
|
||||
return false;
|
||||
|
||||
if (kvm_cpu_has_interrupt(vcpu))
|
||||
return false;
|
||||
|
||||
return (irqchip_split(vcpu->kvm)
|
||||
? kvm_apic_accept_pic_intr(vcpu)
|
||||
: kvm_arch_interrupt_allowed(vcpu));
|
||||
return vcpu->run->request_interrupt_window &&
|
||||
likely(!pic_in_kernel(vcpu->kvm));
|
||||
}
|
||||
|
||||
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
|
||||
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
||||
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
kvm_arch_interrupt_allowed(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu) &&
|
||||
!kvm_event_needs_reinjection(vcpu);
|
||||
else if (!pic_in_kernel(vcpu->kvm))
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
kvm_apic_accept_pic_intr(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu);
|
||||
else
|
||||
kvm_run->ready_for_interrupt_injection = 1;
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
pic_in_kernel(vcpu->kvm) ||
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
||||
}
|
||||
|
||||
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
||||
@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
bool req_int_win = !lapic_in_kernel(vcpu) &&
|
||||
vcpu->run->request_interrupt_window;
|
||||
bool req_int_win =
|
||||
dm_request_for_irq_injection(vcpu) &&
|
||||
kvm_cpu_accept_dm_intr(vcpu);
|
||||
|
||||
bool req_immediate_exit = false;
|
||||
|
||||
if (vcpu->requests) {
|
||||
@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
if (kvm_cpu_has_pending_timer(vcpu))
|
||||
kvm_inject_pending_timer_irqs(vcpu);
|
||||
|
||||
if (dm_request_for_irq_injection(vcpu)) {
|
||||
if (dm_request_for_irq_injection(vcpu) &&
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
|
||||
r = 0;
|
||||
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
|
||||
++vcpu->stat.request_irq_exits;
|
||||
|
@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
|
||||
struct irq_phys_map *map, bool level);
|
||||
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
||||
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
|
||||
struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
|
||||
int virt_irq, int irq);
|
||||
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
|
||||
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
|
||||
|
||||
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
|
||||
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
|
||||
|
@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
|
||||
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
|
||||
idx++)
|
||||
|
||||
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
if (vcpu->vcpu_id == id)
|
||||
return vcpu;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define kvm_for_each_memslot(memslot, slots) \
|
||||
for (memslot = &slots->memslots[0]; \
|
||||
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
|
||||
|
@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
kvm_timer_update_state(vcpu);
|
||||
|
||||
/*
|
||||
* If we enter the guest with the virtual input level to the VGIC
|
||||
* asserted, then we have already told the VGIC what we need to, and
|
||||
* we don't need to exit from the guest until the guest deactivates
|
||||
* the already injected interrupt, so therefore we should set the
|
||||
* hardware active state to prevent unnecessary exits from the guest.
|
||||
*
|
||||
* Conversely, if the virtual input level is deasserted, then always
|
||||
* clear the hardware active state to ensure that hardware interrupts
|
||||
* from the timer triggers a guest exit.
|
||||
*/
|
||||
if (timer->irq.level)
|
||||
* If we enter the guest with the virtual input level to the VGIC
|
||||
* asserted, then we have already told the VGIC what we need to, and
|
||||
* we don't need to exit from the guest until the guest deactivates
|
||||
* the already injected interrupt, so therefore we should set the
|
||||
* hardware active state to prevent unnecessary exits from the guest.
|
||||
*
|
||||
* Also, if we enter the guest with the virtual timer interrupt active,
|
||||
* then it must be active on the physical distributor, because we set
|
||||
* the HW bit and the guest must be able to deactivate the virtual and
|
||||
* physical interrupt at the same time.
|
||||
*
|
||||
* Conversely, if the virtual input level is deasserted and the virtual
|
||||
* interrupt is not active, then always clear the hardware active state
|
||||
* to ensure that hardware interrupts from the timer triggers a guest
|
||||
* exit.
|
||||
*/
|
||||
if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
|
||||
phys_active = true;
|
||||
else
|
||||
phys_active = false;
|
||||
|
@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
|
||||
vgic_set_lr(vcpu, lr_nr, vlr);
|
||||
}
|
||||
|
||||
static bool dist_active_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
|
||||
}
|
||||
|
||||
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
|
||||
struct vgic_lr vlr = vgic_get_lr(vcpu, i);
|
||||
|
||||
if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
|
||||
return true;
|
||||
}
|
||||
|
||||
return dist_active_irq(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* An interrupt may have been disabled after being made pending on the
|
||||
* CPU interface (the classic case is a timer running while we're
|
||||
@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
* may have been serviced from another vcpu. In all cases,
|
||||
* move along.
|
||||
*/
|
||||
if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
|
||||
if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
|
||||
goto epilog;
|
||||
|
||||
/* SGIs */
|
||||
@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
struct irq_phys_map *map;
|
||||
bool phys_active;
|
||||
bool level_pending;
|
||||
int ret;
|
||||
|
||||
if (!(vlr.state & LR_HW))
|
||||
return false;
|
||||
|
||||
map = vgic_irq_map_search(vcpu, vlr.irq);
|
||||
BUG_ON(!map);
|
||||
|
||||
ret = irq_get_irqchip_state(map->irq,
|
||||
IRQCHIP_STATE_ACTIVE,
|
||||
&phys_active);
|
||||
|
||||
WARN_ON(ret);
|
||||
|
||||
if (phys_active)
|
||||
return 0;
|
||||
if (vlr.state & LR_STATE_ACTIVE)
|
||||
return false;
|
||||
|
||||
spin_lock(&dist->lock);
|
||||
level_pending = process_queued_irq(vcpu, lr, vlr);
|
||||
@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
|
||||
}
|
||||
|
||||
|
||||
void vgic_kick_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
Loading…
Reference in New Issue
Block a user