mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
52cd0d972f
- Loongson port PPC: - Fixes ARM: - Fixes x86: - KVM_SET_USER_MEMORY_REGION optimizations - Fixes - Selftest fixes The guest side of the asynchronous page fault work has been delayed to 5.9 in order to sync with Thomas's interrupt entry rework. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl7icj4UHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPHGQgAj9+5j+f5v06iMP/+ponWwsVfh+5/ UR1gPbpMSFMKF0U+BCFxsBeGKWPDiz9QXaLfy6UGfOFYBI475Su5SoZ8/i/o6a2V QjcKIJxBRNs66IG/774pIpONY8/mm/3b6vxmQktyBTqjb6XMGlOwoGZixj/RTp85 +uwSICxMlrijg+fhFMwC4Bo/8SFg+FeBVbwR07my88JaLj+3cV/NPolG900qLSa6 uPqJ289EQ86LrHIHXCEWRKYvwy77GFsmBYjKZH8yXpdzUlSGNexV8eIMAz50figu wYRJGmHrRqwuzFwEGknv8SA3s2HVggXO4WVkWWCeJyO8nIVfYFUhME5l6Q== =+Hh0 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull more KVM updates from Paolo Bonzini: "The guest side of the asynchronous page fault work has been delayed to 5.9 in order to sync with Thomas's interrupt entry rework, but here's the rest of the KVM updates for this merge window. MIPS: - Loongson port PPC: - Fixes ARM: - Fixes x86: - KVM_SET_USER_MEMORY_REGION optimizations - Fixes - Selftest fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (62 commits) KVM: x86: do not pass poisoned hva to __kvm_set_memory_region KVM: selftests: fix sync_with_host() in smm_test KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected KVM: async_pf: Cleanup kvm_setup_async_pf() kvm: i8254: remove redundant assignment to pointer s KVM: x86: respect singlestep when emulating instruction KVM: selftests: Don't probe KVM_CAP_HYPERV_ENLIGHTENED_VMCS when nested VMX is unsupported KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check KVM: nVMX: Consult only the "basic" exit reason when routing nested exit KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts KVM: arm64: Remove host_cpu_context member from vcpu structure KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr KVM: arm64: Handle PtrAuth traps early KVM: x86: Unexport x86_fpu_cache and make it static KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K KVM: arm64: Save the host's PtrAuth keys in non-preemptible context KVM: arm64: Stop save/restoring ACTLR_EL1 KVM: arm64: Add emulation for 32bit guests accessing ACTLR2 ...
371 lines
9.0 KiB
C
371 lines
9.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
*
|
|
* Copyright IBM Corp. 2007
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
|
*
|
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
|
*/
|
|
|
|
#include <linux/jiffies.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/clockchips.h>
|
|
|
|
#include <asm/reg.h>
|
|
#include <asm/time.h>
|
|
#include <asm/byteorder.h>
|
|
#include <asm/kvm_ppc.h>
|
|
#include <asm/disassemble.h>
|
|
#include <asm/ppc-opcode.h>
|
|
#include <asm/sstep.h>
|
|
#include "timing.h"
|
|
#include "trace.h"
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
|
kvmppc_core_queue_fpunavail(vcpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_PPC_FPU */
|
|
|
|
#ifdef CONFIG_VSX
|
|
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
|
kvmppc_core_queue_vsx_unavail(vcpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_VSX */
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
|
|
kvmppc_core_queue_vec_unavail(vcpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
/*
|
|
* XXX to do:
|
|
* lfiwax, lfiwzx
|
|
* vector loads and stores
|
|
*
|
|
* Instructions that trap when used on cache-inhibited mappings
|
|
* are not emulated here: multiple and string instructions,
|
|
* lq/stq, and the load-reserve/store-conditional instructions.
|
|
*/
|
|
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|
{
|
|
u32 inst;
|
|
enum emulation_result emulated = EMULATE_FAIL;
|
|
int advance = 1;
|
|
struct instruction_op op;
|
|
|
|
/* this default type might be overwritten by subcategories */
|
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
|
|
|
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
|
|
if (emulated != EMULATE_DONE)
|
|
return emulated;
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
|
vcpu->arch.mmio_vsx_offset = 0;
|
|
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
|
|
vcpu->arch.mmio_sp64_extend = 0;
|
|
vcpu->arch.mmio_sign_extend = 0;
|
|
vcpu->arch.mmio_vmx_copy_nums = 0;
|
|
vcpu->arch.mmio_vmx_offset = 0;
|
|
vcpu->arch.mmio_host_swabbed = 0;
|
|
|
|
emulated = EMULATE_FAIL;
|
|
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
|
|
if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
|
|
int type = op.type & INSTR_TYPE_MASK;
|
|
int size = GETSIZE(op.type);
|
|
|
|
switch (type) {
|
|
case LOAD: {
|
|
int instr_byte_swap = op.type & BYTEREV;
|
|
|
|
if (op.type & SIGNEXT)
|
|
emulated = kvmppc_handle_loads(vcpu,
|
|
op.reg, size, !instr_byte_swap);
|
|
else
|
|
emulated = kvmppc_handle_load(vcpu,
|
|
op.reg, size, !instr_byte_swap);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
}
|
|
#ifdef CONFIG_PPC_FPU
|
|
case LOAD_FP:
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
if (op.type & FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.type & SIGNEXT)
|
|
emulated = kvmppc_handle_loads(vcpu,
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
else
|
|
emulated = kvmppc_handle_load(vcpu,
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_ALTIVEC
|
|
case LOAD_VMX:
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* Hardware enforces alignment of VMX accesses */
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
if (size == 16) { /* lvx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
} else if (size == 4) { /* lvewx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_WORD;
|
|
} else if (size == 2) { /* lvehx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
} else if (size == 1) { /* lvebx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
} else
|
|
break;
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
if (size == 16) {
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
emulated = kvmppc_handle_vmx_load(vcpu,
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
8, 1);
|
|
} else {
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
emulated = kvmppc_handle_vmx_load(vcpu,
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
size, 1);
|
|
}
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_VSX
|
|
case LOAD_VSX: {
|
|
int io_size_each;
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
} else {
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
}
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.element_size == 8) {
|
|
if (op.vsx_flags & VSX_SPLAT)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
|
else
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
} else if (op.element_size == 4) {
|
|
if (op.vsx_flags & VSX_SPLAT)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
|
|
else
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD;
|
|
} else
|
|
break;
|
|
|
|
if (size < op.element_size) {
|
|
/* precision convert case: lxsspx, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
io_size_each = size;
|
|
} else { /* lxvw4x, lxvd2x, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
size/op.element_size;
|
|
io_size_each = op.element_size;
|
|
}
|
|
|
|
emulated = kvmppc_handle_vsx_load(vcpu,
|
|
KVM_MMIO_REG_VSX|op.reg, io_size_each,
|
|
1, op.type & SIGNEXT);
|
|
break;
|
|
}
|
|
#endif
|
|
case STORE:
|
|
/* if need byte reverse, op.val has been reversed by
|
|
* analyse_instr().
|
|
*/
|
|
emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
#ifdef CONFIG_PPC_FPU
|
|
case STORE_FP:
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* The FP registers need to be flushed so that
|
|
* kvmppc_handle_store() can read actual FP vals
|
|
* from vcpu->arch.
|
|
*/
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_FP);
|
|
|
|
if (op.type & FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
emulated = kvmppc_handle_store(vcpu,
|
|
VCPU_FPR(vcpu, op.reg), size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_ALTIVEC
|
|
case STORE_VMX:
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* Hardware enforces alignment of VMX accesses. */
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_VEC);
|
|
if (size == 16) { /* stvx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
} else if (size == 4) { /* stvewx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_WORD;
|
|
} else if (size == 2) { /* stvehx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
} else if (size == 1) { /* stvebx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
} else
|
|
break;
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
if (size == 16) {
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
emulated = kvmppc_handle_vmx_store(vcpu,
|
|
op.reg, 8, 1);
|
|
} else {
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
emulated = kvmppc_handle_vmx_store(vcpu,
|
|
op.reg, size, 1);
|
|
}
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_VSX
|
|
case STORE_VSX: {
|
|
int io_size_each;
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
} else {
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
}
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_VSX);
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.element_size == 8)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
else if (op.element_size == 4)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD;
|
|
else
|
|
break;
|
|
|
|
if (size < op.element_size) {
|
|
/* precise conversion case, like stxsspx */
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
io_size_each = size;
|
|
} else { /* stxvw4x, stxvd2x, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
size/op.element_size;
|
|
io_size_each = op.element_size;
|
|
}
|
|
|
|
emulated = kvmppc_handle_vsx_store(vcpu,
|
|
op.reg, io_size_each, 1);
|
|
break;
|
|
}
|
|
#endif
|
|
case CACHEOP:
|
|
/* Do nothing. The guest is performing dcbi because
|
|
* hardware DMA is not snooped by the dcache, but
|
|
* emulated DMA either goes through the dcache as
|
|
* normal writes, or the host kernel has handled dcache
|
|
* coherence.
|
|
*/
|
|
emulated = EMULATE_DONE;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (emulated == EMULATE_FAIL) {
|
|
advance = 0;
|
|
kvmppc_core_queue_program(vcpu, 0);
|
|
}
|
|
|
|
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
|
|
|
|
/* Advance past emulated instruction. */
|
|
if (advance)
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
|
|
|
|
return emulated;
|
|
}
|