mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
KVM/arm updates for Linux 5.6
- Fix MMIO sign extension - Fix HYP VA tagging on tag space exhaustion - Fix PSTATE/CPSR handling when generating exception - Fix MMU notifier's advertizing of young pages - Fix poisoned page handling - Fix PMU SW event handling - Fix TVAL register access - Fix AArch32 external abort injection - Fix ITS unmapped collection handling - Various cleanups -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl4y1z0PHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDCsUQALsrDOOzbKPRfcJjk0+XSf3uDd9GvvQ6F48p zB8eerSZOSF4o/BMNHkcRkMaVyLRE9xzHYAfueaHYOFnaEHAO5YpMPE03Rme/SeM F3ZnT+iyt+GkbSRyJbR4u0QCuvhFSu8ve18TLRMDrFO6L8i/MH3AdexO9uWjKByI FBEUVNbq/nVma0I0DBcx2GeCKiu79O/Gf7qquRI8CnptmXvk/FFZz89bCxDLjRaM 3d9OGzXd5Diy4BrAVG5gHbSYaEZ8uId0ltxTuI1spk2ju5kJOW0NStDDMXRr5Dc8 0CXJmeQrw9QgTBRd52n9CL5JZvKyCRDRSx33aGoJaDyqo3d3mJoT9wzJ2+/FVK7q RhlrJHNpYzN31j/Op0wE85coyvrEZCqMmcGLTpuFB6LOLsJ41a/jkvbR431ayT9G phqBmpQ3BrxVDGwA1aRUf8VzimW0EV15YNkV63lOGvG6bpikKiNSwlwWhVF7q4zU UiwlJyNITCzOkavMY0FRJ3VubjpoOYU4XmwLiyavBM4o71cztONd/USb7w7p6Xy2 cix8kpjHo7aYlJKl1Si92kIbndskXNKWrYvBwlOGaeIby9/EA7Jsnh7Ps6HOk+1C POVExwl3ZQrKjRh3N4mxTnB53NU09ATQ4VukP0pBnDNOMLFF87g07R3w2S5EdejV usIVNvlS =mT8t -----END PGP SIGNATURE----- Merge tag 'kvmarm-5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm updates for Linux 5.6 - Fix MMIO sign extension - Fix HYP VA tagging on tag space exhaustion - Fix PSTATE/CPSR handling when generating exception - Fix MMU notifier's advertizing of young pages - Fix poisoned page handling - Fix PMU SW event handling - Fix TVAL register access - Fix AArch32 external abort injection - Fix ITS unmapped collection handling - Various cleanups
This commit is contained in:
commit
621ab20c06
@ -2196,6 +2196,15 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
|
||||
arm64 system registers have the following id bit patterns:
|
||||
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
|
||||
|
||||
WARNING:
|
||||
Two system register IDs do not follow the specified pattern. These
|
||||
are KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT, which map to
|
||||
system registers CNTV_CVAL_EL0 and CNTVCT_EL0 respectively. These
|
||||
two had their values accidentally swapped, which means TIMER_CVAL is
|
||||
derived from the register encoding for CNTVCT_EL0 and TIMER_CNT is
|
||||
derived from the register encoding for CNTV_CVAL_EL0. As this is
|
||||
API, it must remain this way.
|
||||
|
||||
arm64 firmware pseudo-registers have the following bit pattern:
|
||||
0x6030 0000 0014 <regno:16>
|
||||
|
||||
|
@ -9,18 +9,29 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/* arm64 compatibility macros */
|
||||
#define PSR_AA32_MODE_FIQ FIQ_MODE
|
||||
#define PSR_AA32_MODE_SVC SVC_MODE
|
||||
#define PSR_AA32_MODE_ABT ABT_MODE
|
||||
#define PSR_AA32_MODE_UND UND_MODE
|
||||
#define PSR_AA32_T_BIT PSR_T_BIT
|
||||
#define PSR_AA32_F_BIT PSR_F_BIT
|
||||
#define PSR_AA32_I_BIT PSR_I_BIT
|
||||
#define PSR_AA32_A_BIT PSR_A_BIT
|
||||
#define PSR_AA32_E_BIT PSR_E_BIT
|
||||
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
||||
#define PSR_AA32_GE_MASK 0x000f0000
|
||||
#define PSR_AA32_DIT_BIT 0x00200000
|
||||
#define PSR_AA32_PAN_BIT 0x00400000
|
||||
#define PSR_AA32_SSBS_BIT 0x00800000
|
||||
#define PSR_AA32_Q_BIT PSR_Q_BIT
|
||||
#define PSR_AA32_V_BIT PSR_V_BIT
|
||||
#define PSR_AA32_C_BIT PSR_C_BIT
|
||||
#define PSR_AA32_Z_BIT PSR_Z_BIT
|
||||
#define PSR_AA32_N_BIT PSR_N_BIT
|
||||
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
|
||||
@ -41,6 +52,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
||||
*__vcpu_spsr(vcpu) = v;
|
||||
}
|
||||
|
||||
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
|
||||
{
|
||||
return spsr;
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
|
||||
u8 reg_num)
|
||||
{
|
||||
@ -182,6 +198,11 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
||||
@ -198,7 +219,7 @@ static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Get Access Size from a data abort */
|
||||
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
||||
static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
|
||||
case 0:
|
||||
@ -209,7 +230,7 @@ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
||||
return 4;
|
||||
default:
|
||||
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
||||
return -EFAULT;
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/fpstate.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
@ -202,9 +201,6 @@ struct kvm_vcpu_arch {
|
||||
/* Don't run the guest (internal implementation need) */
|
||||
bool pause;
|
||||
|
||||
/* IO related fields */
|
||||
struct kvm_decode mmio_decode;
|
||||
|
||||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
@ -298,6 +294,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index) {}
|
||||
|
||||
/* MMIO helpers */
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/vfp.h>
|
||||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
@ -1,26 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_MMIO_H__
|
||||
#define __ARM_KVM_MMIO_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
struct kvm_decode {
|
||||
unsigned long rt;
|
||||
bool sign_extend;
|
||||
};
|
||||
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
||||
#endif /* __ARM_KVM_MMIO_H__ */
|
@ -17,7 +17,6 @@
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/virt.h>
|
||||
@ -219,6 +218,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
||||
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
|
||||
}
|
||||
|
||||
/*
|
||||
* The layout of SPSR for an AArch32 state is different when observed from an
|
||||
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
|
||||
* view given an AArch64 view.
|
||||
*
|
||||
* In ARM DDI 0487E.a see:
|
||||
*
|
||||
* - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
|
||||
* - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
|
||||
* - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
|
||||
*
|
||||
* Which show the following differences:
|
||||
*
|
||||
* | Bit | AA64 | AA32 | Notes |
|
||||
* +-----+------+------+-----------------------------|
|
||||
* | 24 | DIT | J | J is RES0 in ARMv8 |
|
||||
* | 21 | SS | DIT | SS doesn't exist in AArch32 |
|
||||
*
|
||||
* ... and all other bits are (currently) common.
|
||||
*/
|
||||
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
|
||||
{
|
||||
const unsigned long overlap = BIT(24) | BIT(21);
|
||||
unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
|
||||
|
||||
spsr &= ~overlap;
|
||||
|
||||
spsr |= dit << 21;
|
||||
|
||||
return spsr;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 mode;
|
||||
@ -283,6 +314,11 @@ static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
@ -304,7 +340,7 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||
}
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
@ -325,9 +324,6 @@ struct kvm_vcpu_arch {
|
||||
/* Don't run the guest (internal implementation need) */
|
||||
bool pause;
|
||||
|
||||
/* IO related fields */
|
||||
struct kvm_decode mmio_decode;
|
||||
|
||||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
@ -489,6 +485,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index);
|
||||
|
||||
/* MMIO helpers */
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
||||
int kvm_perf_init(void);
|
||||
int kvm_perf_teardown(void);
|
||||
|
||||
|
@ -1,29 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_KVM_MMIO_H__
|
||||
#define __ARM64_KVM_MMIO_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
/*
|
||||
* This is annoying. The mmio code requires this, even if we don't
|
||||
* need any decoding. To be fixed.
|
||||
*/
|
||||
struct kvm_decode {
|
||||
unsigned long rt;
|
||||
bool sign_extend;
|
||||
};
|
||||
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
||||
#endif /* __ARM64_KVM_MMIO_H__ */
|
@ -62,6 +62,7 @@
|
||||
#define PSR_AA32_I_BIT 0x00000080
|
||||
#define PSR_AA32_A_BIT 0x00000100
|
||||
#define PSR_AA32_E_BIT 0x00000200
|
||||
#define PSR_AA32_PAN_BIT 0x00400000
|
||||
#define PSR_AA32_SSBS_BIT 0x00800000
|
||||
#define PSR_AA32_DIT_BIT 0x01000000
|
||||
#define PSR_AA32_Q_BIT 0x08000000
|
||||
|
@ -220,10 +220,18 @@ struct kvm_vcpu_events {
|
||||
#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
|
||||
#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
|
||||
|
||||
/* EL0 Virtual Timer Registers */
|
||||
/*
|
||||
* EL0 Virtual Timer Registers
|
||||
*
|
||||
* WARNING:
|
||||
* KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
|
||||
* with the appropriate register encodings. Their values have been
|
||||
* accidentally swapped. As this is set API, the definitions here
|
||||
* must be used, rather than ones derived from the encodings.
|
||||
*/
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
|
||||
|
||||
/* KVM-as-firmware specific pseudo-registers */
|
||||
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
|
@ -49,6 +49,7 @@
|
||||
#define PSR_SSBS_BIT 0x00001000
|
||||
#define PSR_PAN_BIT 0x00400000
|
||||
#define PSR_UAO_BIT 0x00800000
|
||||
#define PSR_DIT_BIT 0x01000000
|
||||
#define PSR_V_BIT 0x10000000
|
||||
#define PSR_C_BIT 0x20000000
|
||||
#define PSR_Z_BIT 0x40000000
|
||||
|
@ -44,7 +44,7 @@
|
||||
* u64 __guest_enter(struct kvm_vcpu *vcpu,
|
||||
* struct kvm_cpu_context *host_ctxt);
|
||||
*/
|
||||
ENTRY(__guest_enter)
|
||||
SYM_FUNC_START(__guest_enter)
|
||||
// x0: vcpu
|
||||
// x1: host context
|
||||
// x2-x17: clobbered by macros
|
||||
@ -96,9 +96,8 @@ alternative_else_nop_endif
|
||||
// Do not touch any register after this!
|
||||
eret
|
||||
sb
|
||||
ENDPROC(__guest_enter)
|
||||
|
||||
ENTRY(__guest_exit)
|
||||
SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
||||
// x0: return code
|
||||
// x1: vcpu
|
||||
// x2-x29,lr: vcpu regs
|
||||
@ -192,4 +191,4 @@ abort_guest_exit_end:
|
||||
msr spsr_el2, x4
|
||||
orr x0, x0, x5
|
||||
1: ret
|
||||
ENDPROC(__guest_exit)
|
||||
SYM_FUNC_END(__guest_enter)
|
||||
|
@ -14,9 +14,6 @@
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/esr.h>
|
||||
|
||||
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
|
||||
PSR_I_BIT | PSR_D_BIT)
|
||||
|
||||
#define CURRENT_EL_SP_EL0_VECTOR 0x0
|
||||
#define CURRENT_EL_SP_ELx_VECTOR 0x200
|
||||
#define LOWER_EL_AArch64_VECTOR 0x400
|
||||
@ -50,6 +47,69 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
||||
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
|
||||
}
|
||||
|
||||
/*
|
||||
* When an exception is taken, most PSTATE fields are left unchanged in the
|
||||
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
|
||||
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
|
||||
* layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
|
||||
*
|
||||
* For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
|
||||
* For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
|
||||
*
|
||||
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
|
||||
* MSB to LSB.
|
||||
*/
|
||||
static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
unsigned long old, new;
|
||||
|
||||
old = *vcpu_cpsr(vcpu);
|
||||
new = 0;
|
||||
|
||||
new |= (old & PSR_N_BIT);
|
||||
new |= (old & PSR_Z_BIT);
|
||||
new |= (old & PSR_C_BIT);
|
||||
new |= (old & PSR_V_BIT);
|
||||
|
||||
// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
|
||||
|
||||
new |= (old & PSR_DIT_BIT);
|
||||
|
||||
// PSTATE.UAO is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D5-2579.
|
||||
|
||||
// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
|
||||
// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
|
||||
// See ARM DDI 0487E.a, page D5-2578.
|
||||
new |= (old & PSR_PAN_BIT);
|
||||
if (!(sctlr & SCTLR_EL1_SPAN))
|
||||
new |= PSR_PAN_BIT;
|
||||
|
||||
// PSTATE.SS is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D2-2452.
|
||||
|
||||
// PSTATE.IL is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D1-2306.
|
||||
|
||||
// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D13-3258
|
||||
if (sctlr & SCTLR_ELx_DSSBS)
|
||||
new |= PSR_SSBS_BIT;
|
||||
|
||||
// PSTATE.BTYPE is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
|
||||
|
||||
new |= PSR_D_BIT;
|
||||
new |= PSR_A_BIT;
|
||||
new |= PSR_I_BIT;
|
||||
new |= PSR_F_BIT;
|
||||
|
||||
new |= PSR_MODE_EL1h;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
@ -59,7 +119,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
||||
vcpu_write_spsr(vcpu, cpsr);
|
||||
|
||||
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
|
||||
@ -94,7 +154,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
||||
vcpu_write_spsr(vcpu, cpsr);
|
||||
|
||||
/*
|
||||
|
@ -13,52 +13,46 @@
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
/*
|
||||
* The LSB of the random hyp VA tag or 0 if no randomization is used.
|
||||
* The LSB of the HYP VA tag
|
||||
*/
|
||||
static u8 tag_lsb;
|
||||
/*
|
||||
* The random hyp VA tag value with the region bit if hyp randomization is used
|
||||
* The HYP VA tag value with the region bit
|
||||
*/
|
||||
static u64 tag_val;
|
||||
static u64 va_mask;
|
||||
|
||||
/*
|
||||
* We want to generate a hyp VA with the following format (with V ==
|
||||
* vabits_actual):
|
||||
*
|
||||
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
|
||||
* ---------------------------------------------------------
|
||||
* | 0000000 | hyp_va_msb | random tag | kern linear VA |
|
||||
* |--------- tag_val -----------|----- va_mask ---|
|
||||
*
|
||||
* which does not conflict with the idmap regions.
|
||||
*/
|
||||
__init void kvm_compute_layout(void)
|
||||
{
|
||||
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
|
||||
u64 hyp_va_msb;
|
||||
int kva_msb;
|
||||
|
||||
/* Where is my RAM region? */
|
||||
hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
|
||||
hyp_va_msb ^= BIT(vabits_actual - 1);
|
||||
|
||||
kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
|
||||
tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
|
||||
(u64)(high_memory - 1));
|
||||
|
||||
if (kva_msb == (vabits_actual - 1)) {
|
||||
/*
|
||||
* No space in the address, let's compute the mask so
|
||||
* that it covers (vabits_actual - 1) bits, and the region
|
||||
* bit. The tag stays set to zero.
|
||||
*/
|
||||
va_mask = BIT(vabits_actual - 1) - 1;
|
||||
va_mask |= hyp_va_msb;
|
||||
} else {
|
||||
/*
|
||||
* We do have some free bits to insert a random tag.
|
||||
* Hyp VAs are now created from kernel linear map VAs
|
||||
* using the following formula (with V == vabits_actual):
|
||||
*
|
||||
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
|
||||
* ---------------------------------------------------------
|
||||
* | 0000000 | hyp_va_msb | random tag | kern linear VA |
|
||||
*/
|
||||
tag_lsb = kva_msb;
|
||||
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
||||
tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
||||
tag_val |= hyp_va_msb;
|
||||
tag_val >>= tag_lsb;
|
||||
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
||||
tag_val = hyp_va_msb;
|
||||
|
||||
if (tag_lsb != (vabits_actual - 1)) {
|
||||
/* We have some free bits to insert a random tag. */
|
||||
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
||||
}
|
||||
tag_val >>= tag_lsb;
|
||||
}
|
||||
|
||||
static u32 compute_instruction(int n, u32 rd, u32 rn)
|
||||
@ -117,11 +111,11 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
||||
* VHE doesn't need any address translation, let's NOP
|
||||
* everything.
|
||||
*
|
||||
* Alternatively, if we don't have any spare bits in
|
||||
* the address, NOP everything after masking that
|
||||
* kernel VA.
|
||||
* Alternatively, if the tag is zero (because the layout
|
||||
* dictates it and we don't have any spare bits in the
|
||||
* address), NOP everything after masking the kernel VA.
|
||||
*/
|
||||
if (has_vhe() || (!tag_lsb && i > 0)) {
|
||||
if (has_vhe() || (!tag_val && i > 0)) {
|
||||
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
continue;
|
||||
}
|
||||
|
@ -10,10 +10,15 @@
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
#define DFSR_FSC_EXTABT_LPAE 0x10
|
||||
#define DFSR_FSC_EXTABT_nLPAE 0x08
|
||||
#define DFSR_LPAE BIT(9)
|
||||
|
||||
/*
|
||||
* Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
|
||||
*/
|
||||
@ -28,25 +33,115 @@ static const u8 return_offsets[8][2] = {
|
||||
[7] = { 4, 4 }, /* FIQ, unused */
|
||||
};
|
||||
|
||||
/*
|
||||
* When an exception is taken, most CPSR fields are left unchanged in the
|
||||
* handler. However, some are explicitly overridden (e.g. M[4:0]).
|
||||
*
|
||||
* The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
|
||||
* either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
|
||||
* obsoleted by the ARMv7 virtualization extensions and is RES0.
|
||||
*
|
||||
* For the SPSR layout seen from AArch32, see:
|
||||
* - ARM DDI 0406C.d, page B1-1148
|
||||
* - ARM DDI 0487E.a, page G8-6264
|
||||
*
|
||||
* For the SPSR_ELx layout for AArch32 seen from AArch64, see:
|
||||
* - ARM DDI 0487E.a, page C5-426
|
||||
*
|
||||
* Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
|
||||
* MSB to LSB.
|
||||
*/
|
||||
static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
|
||||
{
|
||||
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
|
||||
unsigned long old, new;
|
||||
|
||||
old = *vcpu_cpsr(vcpu);
|
||||
new = 0;
|
||||
|
||||
new |= (old & PSR_AA32_N_BIT);
|
||||
new |= (old & PSR_AA32_Z_BIT);
|
||||
new |= (old & PSR_AA32_C_BIT);
|
||||
new |= (old & PSR_AA32_V_BIT);
|
||||
new |= (old & PSR_AA32_Q_BIT);
|
||||
|
||||
// CPSR.IT[7:0] are set to zero upon any exception
|
||||
// See ARM DDI 0487E.a, section G1.12.3
|
||||
// See ARM DDI 0406C.d, section B1.8.3
|
||||
|
||||
new |= (old & PSR_AA32_DIT_BIT);
|
||||
|
||||
// CPSR.SSBS is set to SCTLR.DSSBS upon any exception
|
||||
// See ARM DDI 0487E.a, page G8-6244
|
||||
if (sctlr & BIT(31))
|
||||
new |= PSR_AA32_SSBS_BIT;
|
||||
|
||||
// CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
|
||||
// SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
|
||||
// See ARM DDI 0487E.a, page G8-6246
|
||||
new |= (old & PSR_AA32_PAN_BIT);
|
||||
if (!(sctlr & BIT(23)))
|
||||
new |= PSR_AA32_PAN_BIT;
|
||||
|
||||
// SS does not exist in AArch32, so ignore
|
||||
|
||||
// CPSR.IL is set to zero upon any exception
|
||||
// See ARM DDI 0487E.a, page G1-5527
|
||||
|
||||
new |= (old & PSR_AA32_GE_MASK);
|
||||
|
||||
// CPSR.IT[7:0] are set to zero upon any exception
|
||||
// See prior comment above
|
||||
|
||||
// CPSR.E is set to SCTLR.EE upon any exception
|
||||
// See ARM DDI 0487E.a, page G8-6245
|
||||
// See ARM DDI 0406C.d, page B4-1701
|
||||
if (sctlr & BIT(25))
|
||||
new |= PSR_AA32_E_BIT;
|
||||
|
||||
// CPSR.A is unchanged upon an exception to Undefined, Supervisor
|
||||
// CPSR.A is set upon an exception to other modes
|
||||
// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
|
||||
// See ARM DDI 0406C.d, page B1-1182
|
||||
new |= (old & PSR_AA32_A_BIT);
|
||||
if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
|
||||
new |= PSR_AA32_A_BIT;
|
||||
|
||||
// CPSR.I is set upon any exception
|
||||
// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
|
||||
// See ARM DDI 0406C.d, page B1-1182
|
||||
new |= PSR_AA32_I_BIT;
|
||||
|
||||
// CPSR.F is set upon an exception to FIQ
|
||||
// CPSR.F is unchanged upon an exception to other modes
|
||||
// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
|
||||
// See ARM DDI 0406C.d, page B1-1182
|
||||
new |= (old & PSR_AA32_F_BIT);
|
||||
if (mode == PSR_AA32_MODE_FIQ)
|
||||
new |= PSR_AA32_F_BIT;
|
||||
|
||||
// CPSR.T is set to SCTLR.TE upon any exception
|
||||
// See ARM DDI 0487E.a, page G8-5514
|
||||
// See ARM DDI 0406C.d, page B1-1181
|
||||
if (sctlr & BIT(30))
|
||||
new |= PSR_AA32_T_BIT;
|
||||
|
||||
new |= mode;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
{
|
||||
unsigned long cpsr;
|
||||
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
|
||||
bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
|
||||
unsigned long spsr = *vcpu_cpsr(vcpu);
|
||||
bool is_thumb = (spsr & PSR_AA32_T_BIT);
|
||||
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
|
||||
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
|
||||
|
||||
cpsr = mode | PSR_AA32_I_BIT;
|
||||
|
||||
if (sctlr & (1 << 30))
|
||||
cpsr |= PSR_AA32_T_BIT;
|
||||
if (sctlr & (1 << 25))
|
||||
cpsr |= PSR_AA32_E_BIT;
|
||||
|
||||
*vcpu_cpsr(vcpu) = cpsr;
|
||||
*vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
|
||||
|
||||
/* Note: These now point to the banked copies */
|
||||
vcpu_write_spsr(vcpu, new_spsr_value);
|
||||
vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
|
||||
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
/* Branch to exception vector */
|
||||
@ -84,16 +179,18 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
||||
fsr = &vcpu_cp15(vcpu, c5_DFSR);
|
||||
}
|
||||
|
||||
prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
|
||||
prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
|
||||
|
||||
*far = addr;
|
||||
|
||||
/* Give the guest an IMPLEMENTATION DEFINED exception */
|
||||
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
|
||||
if (is_lpae)
|
||||
*fsr = 1 << 9 | 0x34;
|
||||
else
|
||||
*fsr = 0x14;
|
||||
if (is_lpae) {
|
||||
*fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
|
||||
} else {
|
||||
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
||||
*fsr = DFSR_FSC_EXTABT_nLPAE;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
|
@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
|
||||
switch (treg) {
|
||||
case TIMER_REG_TVAL:
|
||||
val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
|
||||
val &= lower_32_bits(val);
|
||||
break;
|
||||
|
||||
case TIMER_REG_CTL:
|
||||
@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
switch (treg) {
|
||||
case TIMER_REG_TVAL:
|
||||
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
|
||||
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
|
||||
break;
|
||||
|
||||
case TIMER_REG_CTL:
|
||||
|
@ -20,8 +20,6 @@
|
||||
#include <linux/irqbypass.h>
|
||||
#include <linux/sched/stat.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#include <kvm/arm_pmu.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
@ -1482,7 +1480,6 @@ static void teardown_hyp_mode(void)
|
||||
free_hyp_pgds();
|
||||
for_each_possible_cpu(cpu)
|
||||
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
|
||||
hyp_cpu_pm_exit();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1696,6 +1693,7 @@ int kvm_arch_init(void *opaque)
|
||||
return 0;
|
||||
|
||||
out_hyp:
|
||||
hyp_cpu_pm_exit();
|
||||
if (!in_hyp_mode)
|
||||
teardown_hyp_mode();
|
||||
out_err:
|
||||
|
@ -5,7 +5,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
@ -92,23 +91,23 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
vcpu->mmio_needed = 0;
|
||||
|
||||
if (!run->mmio.is_write) {
|
||||
len = run->mmio.len;
|
||||
if (len > sizeof(unsigned long))
|
||||
return -EINVAL;
|
||||
|
||||
if (!kvm_vcpu_dabt_iswrite(vcpu)) {
|
||||
len = kvm_vcpu_dabt_get_as(vcpu);
|
||||
data = kvm_mmio_read_buf(run->mmio.data, len);
|
||||
|
||||
if (vcpu->arch.mmio_decode.sign_extend &&
|
||||
if (kvm_vcpu_dabt_issext(vcpu) &&
|
||||
len < sizeof(unsigned long)) {
|
||||
mask = 1U << ((len * 8) - 1);
|
||||
data = (data ^ mask) - mask;
|
||||
}
|
||||
|
||||
if (!kvm_vcpu_dabt_issf(vcpu))
|
||||
data = data & 0xffffffff;
|
||||
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
||||
&data);
|
||||
data = vcpu_data_host_to_guest(vcpu, data, len);
|
||||
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
||||
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -120,33 +119,6 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
|
||||
{
|
||||
unsigned long rt;
|
||||
int access_size;
|
||||
bool sign_extend;
|
||||
|
||||
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
||||
/* page table accesses IO mem: tell guest to fix its TTBR */
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
access_size = kvm_vcpu_dabt_get_as(vcpu);
|
||||
if (unlikely(access_size < 0))
|
||||
return access_size;
|
||||
|
||||
*is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
||||
sign_extend = kvm_vcpu_dabt_issext(vcpu);
|
||||
rt = kvm_vcpu_dabt_get_rd(vcpu);
|
||||
|
||||
*len = access_size;
|
||||
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
||||
vcpu->arch.mmio_decode.rt = rt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa)
|
||||
{
|
||||
@ -158,15 +130,10 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
u8 data_buf[8];
|
||||
|
||||
/*
|
||||
* Prepare MMIO operation. First decode the syndrome data we get
|
||||
* from the CPU. Then try if some in-kernel emulation feels
|
||||
* responsible, otherwise let user space do its magic.
|
||||
* No valid syndrome? Ask userspace for help if it has
|
||||
* voluntered to do so, and bail out otherwise.
|
||||
*/
|
||||
if (kvm_vcpu_dabt_isvalid(vcpu)) {
|
||||
ret = decode_hsr(vcpu, &is_write, &len);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
|
||||
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
||||
run->exit_reason = KVM_EXIT_ARM_NISV;
|
||||
run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
|
||||
@ -178,7 +145,20 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
rt = vcpu->arch.mmio_decode.rt;
|
||||
/* Page table accesses IO mem: tell guest to fix its TTBR */
|
||||
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare MMIO operation. First decode the syndrome data we get
|
||||
* from the CPU. Then try if some in-kernel emulation feels
|
||||
* responsible, otherwise let user space do its magic.
|
||||
*/
|
||||
is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
||||
len = kvm_vcpu_dabt_get_as(vcpu);
|
||||
rt = kvm_vcpu_dabt_get_rd(vcpu);
|
||||
|
||||
if (is_write) {
|
||||
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/kvm_ras.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
@ -1590,16 +1589,8 @@ static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
|
||||
__invalidate_icache_guest_page(pfn, size);
|
||||
}
|
||||
|
||||
static void kvm_send_hwpoison_signal(unsigned long address,
|
||||
struct vm_area_struct *vma)
|
||||
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
|
||||
{
|
||||
short lsb;
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
lsb = huge_page_shift(hstate_vma(vma));
|
||||
else
|
||||
lsb = PAGE_SHIFT;
|
||||
|
||||
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
|
||||
}
|
||||
|
||||
@ -1672,6 +1663,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
|
||||
struct vm_area_struct *vma;
|
||||
short vma_shift;
|
||||
kvm_pfn_t pfn;
|
||||
pgprot_t mem_type = PAGE_S2;
|
||||
bool logging_active = memslot_is_logging(memslot);
|
||||
@ -1695,7 +1687,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
vma_pagesize = vma_kernel_pagesize(vma);
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
vma_shift = huge_page_shift(hstate_vma(vma));
|
||||
else
|
||||
vma_shift = PAGE_SHIFT;
|
||||
|
||||
vma_pagesize = 1ULL << vma_shift;
|
||||
if (logging_active ||
|
||||
(vma->vm_flags & VM_PFNMAP) ||
|
||||
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
|
||||
@ -1735,7 +1732,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
|
||||
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
|
||||
if (pfn == KVM_PFN_ERR_HWPOISON) {
|
||||
kvm_send_hwpoison_signal(hva, vma);
|
||||
kvm_send_hwpoison_signal(hva, vma_shift);
|
||||
return 0;
|
||||
}
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
@ -2141,7 +2138,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
trace_kvm_test_age_hva(hva);
|
||||
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
|
||||
return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
|
||||
kvm_test_age_hva_handler, NULL);
|
||||
}
|
||||
|
||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
|
||||
|
||||
#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
|
||||
|
||||
@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
|
||||
|
||||
return pmc;
|
||||
}
|
||||
static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (kvm_pmu_idx_is_high_counter(pmc->idx))
|
||||
return pmc - 1;
|
||||
else
|
||||
return pmc + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
|
||||
@ -238,10 +247,11 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
|
||||
|
||||
bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
|
||||
@ -294,15 +304,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
|
||||
pmc = &pmu->pmc[i];
|
||||
|
||||
/*
|
||||
* For high counters of chained events we must recreate the
|
||||
* perf event with the long (64bit) attribute set.
|
||||
*/
|
||||
if (kvm_pmu_pmc_is_chained(pmc) &&
|
||||
kvm_pmu_idx_is_high_counter(i)) {
|
||||
kvm_pmu_create_perf_event(vcpu, i);
|
||||
continue;
|
||||
}
|
||||
/* A change in the enable state may affect the chain state */
|
||||
kvm_pmu_update_pmc_chained(vcpu, i);
|
||||
kvm_pmu_create_perf_event(vcpu, i);
|
||||
|
||||
/* At this point, pmc must be the canonical */
|
||||
if (pmc->perf_event) {
|
||||
@ -335,15 +339,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
|
||||
pmc = &pmu->pmc[i];
|
||||
|
||||
/*
|
||||
* For high counters of chained events we must recreate the
|
||||
* perf event with the long (64bit) attribute unset.
|
||||
*/
|
||||
if (kvm_pmu_pmc_is_chained(pmc) &&
|
||||
kvm_pmu_idx_is_high_counter(i)) {
|
||||
kvm_pmu_create_perf_event(vcpu, i);
|
||||
continue;
|
||||
}
|
||||
/* A change in the enable state may affect the chain state */
|
||||
kvm_pmu_update_pmc_chained(vcpu, i);
|
||||
kvm_pmu_create_perf_event(vcpu, i);
|
||||
|
||||
/* At this point, pmc must be the canonical */
|
||||
if (pmc->perf_event)
|
||||
@ -480,25 +478,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
||||
*/
|
||||
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
int i;
|
||||
u64 type, enable, reg;
|
||||
|
||||
if (val == 0)
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||
return;
|
||||
|
||||
enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
/* Weed out disabled counters */
|
||||
val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
|
||||
u64 type, reg;
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
|
||||
& ARMV8_PMU_EVTYPE_EVENT;
|
||||
if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
|
||||
&& (enable & BIT(i))) {
|
||||
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
|
||||
|
||||
/* PMSWINC only applies to ... SW_INC! */
|
||||
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
|
||||
type &= ARMV8_PMU_EVTYPE_EVENT;
|
||||
if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
|
||||
continue;
|
||||
|
||||
/* increment this even SW_INC counter */
|
||||
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
|
||||
reg = lower_32_bits(reg);
|
||||
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
|
||||
|
||||
if (reg) /* no overflow on the low part */
|
||||
continue;
|
||||
|
||||
if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
|
||||
/* increment the high counter */
|
||||
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
|
||||
reg = lower_32_bits(reg);
|
||||
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
|
||||
if (!reg)
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
||||
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
|
||||
if (!reg) /* mark overflow on the high counter */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
|
||||
} else {
|
||||
/* mark overflow on low counter */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -510,10 +528,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
||||
*/
|
||||
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
u64 mask;
|
||||
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
int i;
|
||||
|
||||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
|
||||
@ -525,7 +542,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_counter_value(vcpu, i, 0);
|
||||
}
|
||||
}
|
||||
@ -582,15 +599,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
|
||||
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
||||
|
||||
if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
|
||||
if (kvm_pmu_pmc_is_chained(pmc)) {
|
||||
/**
|
||||
* The initial sample period (overflow count) of an event. For
|
||||
* chained counters we only support overflow interrupts on the
|
||||
* high counter.
|
||||
*/
|
||||
attr.sample_period = (-counter) & GENMASK(63, 0);
|
||||
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
|
||||
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
|
||||
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
|
||||
|
||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||
kvm_pmu_perf_overflow,
|
||||
@ -621,25 +637,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
* @select_idx: The number of selected counter
|
||||
*
|
||||
* Update the chained bitmap based on the event type written in the
|
||||
* typer register.
|
||||
* typer register and the enable state of the odd register.
|
||||
*/
|
||||
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
{
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
|
||||
bool new_state, old_state;
|
||||
|
||||
if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
|
||||
old_state = kvm_pmu_pmc_is_chained(pmc);
|
||||
new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
|
||||
kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
|
||||
|
||||
if (old_state == new_state)
|
||||
return;
|
||||
|
||||
canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
|
||||
kvm_pmu_stop_counter(vcpu, canonical_pmc);
|
||||
if (new_state) {
|
||||
/*
|
||||
* During promotion from !chained to chained we must ensure
|
||||
* the adjacent counter is stopped and its event destroyed
|
||||
*/
|
||||
if (!kvm_pmu_pmc_is_chained(pmc))
|
||||
kvm_pmu_stop_counter(vcpu, pmc);
|
||||
|
||||
kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
|
||||
set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
|
||||
} else {
|
||||
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
|
||||
return;
|
||||
}
|
||||
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -839,9 +839,8 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
|
||||
u32 event_id = its_cmd_get_id(its_cmd);
|
||||
struct its_ite *ite;
|
||||
|
||||
|
||||
ite = find_ite(its, device_id, event_id);
|
||||
if (ite && ite->collection) {
|
||||
if (ite && its_is_collection_mapped(ite->collection)) {
|
||||
/*
|
||||
* Though the spec talks about removing the pending state, we
|
||||
* don't bother here since we clear the ITTE anyway and the
|
||||
@ -2475,7 +2474,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
|
||||
target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
|
||||
coll_id = val & KVM_ITS_CTE_ICID_MASK;
|
||||
|
||||
if (target_addr >= atomic_read(&kvm->online_vcpus))
|
||||
if (target_addr != COLLECTION_NOT_MAPPED &&
|
||||
target_addr >= atomic_read(&kvm->online_vcpus))
|
||||
return -EINVAL;
|
||||
|
||||
collection = find_collection(its, coll_id);
|
||||
|
@ -414,8 +414,11 @@ static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
u64 value = vgic_cpu->pendbaser;
|
||||
|
||||
return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
|
||||
value &= ~GICR_PENDBASER_PTZ;
|
||||
|
||||
return extract_bytes(value, addr & 7, len);
|
||||
}
|
||||
|
||||
static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
|
||||
|
@ -98,11 +98,6 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
|
||||
.uaccess_write = uwr, \
|
||||
}
|
||||
|
||||
int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
struct vgic_register_region *reg_desc,
|
||||
struct vgic_io_device *region,
|
||||
int nr_irqs, bool offset_private);
|
||||
|
||||
unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
|
||||
|
||||
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
|
||||
|
Loading…
Reference in New Issue
Block a user