mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-06 02:24:14 +08:00
KVM: arm64: Treat ESR_EL2 as a 64-bit register
ESR_EL2 was defined as a 32-bit register in the initial release of the ARM Architecture Manual for Armv8-A, and was later extended to 64 bits, with bits [63:32] RES0. ARMv8.7 introduced FEAT_LS64, which makes use of bits [36:32]. KVM treats ESR_EL1 as a 64-bit register when saving and restoring the guest context, but ESR_EL2 is handled as a 32-bit register. Start treating ESR_EL2 as a 64-bit register to allow KVM to make use of the most significant 32 bits in the future. The type chosen to represent ESR_EL2 is u64, as that is consistent with the notation KVM overwhelmingly uses today (u32), and how the rest of the registers are declared. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220425114444.368693-5-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
8d56e5c5a9
commit
0b12620fdd
@ -235,14 +235,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
return mode != PSR_MODE_EL0t;
|
||||
}
|
||||
|
||||
static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.esr_el2;
|
||||
}
|
||||
|
||||
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
if (esr & ESR_ELx_CV)
|
||||
return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
|
||||
@ -373,7 +373,7 @@ static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
|
||||
|
||||
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
return ESR_ELx_SYS64_ISS_RT(esr);
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ struct kvm_arch {
|
||||
};
|
||||
|
||||
struct kvm_vcpu_fault_info {
|
||||
u32 esr_el2; /* Hyp Syndrom Register */
|
||||
u64 esr_el2; /* Hyp Syndrom Register */
|
||||
u64 far_el2; /* Hyp Fault Address Register */
|
||||
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
|
||||
u64 disr_el1; /* Deferred [SError] Status Register */
|
||||
|
@ -14,7 +14,7 @@
|
||||
* Was this synchronous external abort a RAS notification?
|
||||
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
|
||||
*/
|
||||
static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
|
||||
static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr)
|
||||
{
|
||||
/* apei_claim_sea(NULL) expects to mask interrupts itself */
|
||||
lockdep_assert_irqs_enabled();
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *);
|
||||
|
||||
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
|
||||
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
|
||||
{
|
||||
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
|
||||
kvm_inject_vabt(vcpu);
|
||||
@ -117,10 +117,10 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
||||
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
run->exit_reason = KVM_EXIT_DEBUG;
|
||||
run->debug.arch.hsr = esr;
|
||||
run->debug.arch.hsr = lower_32_bits(esr);
|
||||
|
||||
if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
|
||||
run->debug.arch.far = vcpu->arch.fault.far_el2;
|
||||
@ -130,9 +130,9 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
|
||||
kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
|
||||
esr, esr_get_class_string(esr));
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
@ -187,7 +187,7 @@ static exit_handle_fn arm_exit_handlers[] = {
|
||||
|
||||
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u8 esr_ec = ESR_ELx_EC(esr);
|
||||
|
||||
return arm_exit_handlers[esr_ec];
|
||||
@ -334,6 +334,6 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
|
||||
*/
|
||||
kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
|
||||
|
||||
panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
|
||||
panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
|
||||
spsr, elr_virt, esr, far, hpfar, par, vcpu);
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool esr_is_ptrauth_trap(u32 esr)
|
||||
static inline bool esr_is_ptrauth_trap(u64 esr)
|
||||
{
|
||||
switch (esr_sys64_to_sysreg(esr)) {
|
||||
case SYS_APIAKEYLO_EL1:
|
||||
|
@ -33,7 +33,7 @@ u64 id_aa64mmfr2_el1_sys_val;
|
||||
*/
|
||||
static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||
u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
|
||||
|
@ -473,7 +473,7 @@ static int __vgic_v3_bpr_min(void)
|
||||
|
||||
static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
|
||||
|
||||
return crm != 8;
|
||||
@ -1016,7 +1016,7 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
|
||||
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rt;
|
||||
u32 esr;
|
||||
u64 esr;
|
||||
u32 vmcr;
|
||||
void (*fn)(struct kvm_vcpu *, u32, int);
|
||||
bool is_read;
|
||||
|
@ -18,7 +18,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
|
||||
u32 esr = 0;
|
||||
u64 esr = 0;
|
||||
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
|
||||
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
|
||||
@ -50,7 +50,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
|
||||
static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||
u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
|
||||
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
|
||||
|
@ -2304,7 +2304,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
||||
size_t nr_global)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
int Rt2 = (esr >> 10) & 0x1f;
|
||||
|
||||
@ -2354,7 +2354,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
|
||||
size_t nr_global)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
|
||||
params.CRm = (esr >> 1) & 0xf;
|
||||
|
Loading…
Reference in New Issue
Block a user