2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-12-11 00:15:34 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* Derived from arch/arm/kvm/coproc.c:
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Authors: Rusty Russell <rusty@rustcorp.com.au>
|
|
|
|
* Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
2020-02-16 01:07:32 +08:00
|
|
|
#include <linux/bitfield.h>
|
2016-01-22 02:27:04 +08:00
|
|
|
#include <linux/bsearch.h>
|
2023-01-12 10:38:52 +08:00
|
|
|
#include <linux/cacheinfo.h>
|
2024-02-14 21:18:27 +08:00
|
|
|
#include <linux/debugfs.h>
|
2012-12-11 00:15:34 +08:00
|
|
|
#include <linux/kvm_host.h>
|
2014-11-24 21:59:30 +08:00
|
|
|
#include <linux/mm.h>
|
2017-10-31 23:51:18 +08:00
|
|
|
#include <linux/printk.h>
|
2012-12-11 00:15:34 +08:00
|
|
|
#include <linux/uaccess.h>
|
2014-11-24 21:59:30 +08:00
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/cputype.h>
|
2014-04-24 17:24:46 +08:00
|
|
|
#include <asm/debug-monitors.h>
|
2014-11-24 21:59:30 +08:00
|
|
|
#include <asm/esr.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
#include <asm/kvm_emulate.h>
|
2017-12-24 04:53:48 +08:00
|
|
|
#include <asm/kvm_hyp.h>
|
2014-11-24 21:59:30 +08:00
|
|
|
#include <asm/kvm_mmu.h>
|
2023-02-10 01:58:10 +08:00
|
|
|
#include <asm/kvm_nested.h>
|
2015-06-18 16:01:53 +08:00
|
|
|
#include <asm/perf_event.h>
|
2016-09-08 20:55:37 +08:00
|
|
|
#include <asm/sysreg.h>
|
2014-11-24 21:59:30 +08:00
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
#include <trace/events/kvm.h>
|
|
|
|
|
|
|
|
#include "sys_regs.h"
|
2024-08-20 18:03:38 +08:00
|
|
|
#include "vgic/vgic.h"
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2015-07-08 00:30:03 +08:00
|
|
|
#include "trace.h"
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/*
|
2013-02-07 18:32:33 +08:00
|
|
|
* For AArch32, we only take care of what is being trapped. Anything
|
|
|
|
* that has to do with init and userspace access has to go via the
|
|
|
|
* 64bit interface.
|
2012-12-11 00:15:34 +08:00
|
|
|
*/
|
|
|
|
|
2022-02-04 01:41:56 +08:00
|
|
|
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
|
2023-06-10 03:00:51 +08:00
|
|
|
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|
|
|
u64 val);
|
2022-02-04 01:41:56 +08:00
|
|
|
|
2023-11-07 00:42:13 +08:00
|
|
|
static bool bad_trap(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params,
|
|
|
|
const struct sys_reg_desc *r,
|
|
|
|
const char *msg)
|
2017-03-28 00:03:40 +08:00
|
|
|
{
|
2023-11-07 00:42:13 +08:00
|
|
|
WARN_ONCE(1, "Unexpected %s\n", msg);
|
2017-03-28 00:03:40 +08:00
|
|
|
print_sys_reg_instr(params);
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-11-07 00:42:13 +08:00
|
|
|
static bool read_from_write_only(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
return bad_trap(vcpu, params, r,
|
|
|
|
"sys_reg read to write-only register");
|
|
|
|
}
|
|
|
|
|
2017-06-09 19:49:56 +08:00
|
|
|
static bool write_to_read_only(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2023-11-07 00:42:13 +08:00
|
|
|
return bad_trap(vcpu, params, r,
|
|
|
|
"sys_reg write to read-only register");
|
2017-06-09 19:49:56 +08:00
|
|
|
}
|
|
|
|
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
#define PURE_EL2_SYSREG(el2) \
|
|
|
|
case el2: { \
|
|
|
|
*el1r = el2; \
|
|
|
|
return true; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAPPED_EL2_SYSREG(el2, el1, fn) \
|
|
|
|
case el2: { \
|
|
|
|
*xlate = fn; \
|
|
|
|
*el1r = el1; \
|
|
|
|
return true; \
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool get_el2_to_el1_mapping(unsigned int reg,
|
|
|
|
unsigned int *el1r, u64 (**xlate)(u64))
|
|
|
|
{
|
|
|
|
switch (reg) {
|
|
|
|
PURE_EL2_SYSREG( VPIDR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( VMPIDR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( ACTLR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( HCR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( MDCR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( HSTR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( HACR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( VTTBR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( VTCR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( RVBAR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( TPIDR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( HPFAR_EL2 );
|
|
|
|
PURE_EL2_SYSREG( CNTHCTL_EL2 );
|
|
|
|
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
|
|
|
|
translate_sctlr_el2_to_sctlr_el1 );
|
|
|
|
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
|
|
|
|
translate_cptr_el2_to_cpacr_el1 );
|
|
|
|
MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
|
|
|
|
translate_ttbr0_el2_to_ttbr0_el1 );
|
|
|
|
MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
|
|
|
|
translate_tcr_el2_to_tcr_el1 );
|
|
|
|
MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
|
|
|
|
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
|
2024-06-21 00:46:40 +08:00
|
|
|
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2017-06-09 19:49:56 +08:00
|
|
|
}
|
|
|
|
|
2019-06-20 18:17:00 +08:00
|
|
|
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
|
|
|
{
|
|
|
|
u64 val = 0x8badf00d8badf00d;
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
u64 (*xlate)(u64) = NULL;
|
|
|
|
unsigned int el1r;
|
|
|
|
|
|
|
|
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
|
|
|
goto memory_read;
|
2019-06-20 18:17:00 +08:00
|
|
|
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
|
|
|
|
if (!is_hyp_ctxt(vcpu))
|
|
|
|
goto memory_read;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this register does not have an EL1 counterpart,
|
|
|
|
* then read the stored EL2 version.
|
|
|
|
*/
|
|
|
|
if (reg == el1r)
|
|
|
|
goto memory_read;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a non-VHE guest and that the sysreg
|
|
|
|
* requires translation to be used at EL1, use the
|
|
|
|
* in-memory copy instead.
|
|
|
|
*/
|
|
|
|
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
|
|
|
|
goto memory_read;
|
|
|
|
|
|
|
|
/* Get the current version of the EL1 counterpart. */
|
|
|
|
WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
|
2019-06-20 18:17:00 +08:00
|
|
|
return val;
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
}
|
2019-06-20 18:17:00 +08:00
|
|
|
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
/* EL1 register can't be on the CPU if the guest is in vEL2. */
|
|
|
|
if (unlikely(is_hyp_ctxt(vcpu)))
|
|
|
|
goto memory_read;
|
|
|
|
|
|
|
|
if (__vcpu_read_sys_reg_from_cpu(reg, &val))
|
|
|
|
return val;
|
|
|
|
|
|
|
|
memory_read:
|
2019-06-20 18:17:00 +08:00
|
|
|
return __vcpu_sys_reg(vcpu, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|
|
|
{
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
u64 (*xlate)(u64) = NULL;
|
|
|
|
unsigned int el1r;
|
|
|
|
|
|
|
|
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
|
|
|
goto memory_write;
|
|
|
|
|
|
|
|
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
|
|
|
|
if (!is_hyp_ctxt(vcpu))
|
|
|
|
goto memory_write;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Always store a copy of the write to memory to avoid having
|
|
|
|
* to reverse-translate virtual EL2 system registers for a
|
|
|
|
* non-VHE guest hypervisor.
|
|
|
|
*/
|
|
|
|
__vcpu_sys_reg(vcpu, reg) = val;
|
|
|
|
|
|
|
|
/* No EL1 counterpart? We're done here.? */
|
|
|
|
if (reg == el1r)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
|
|
|
|
val = xlate(val);
|
|
|
|
|
|
|
|
/* Redirect this to the EL1 version of the register. */
|
|
|
|
WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* EL1 register can't be on the CPU if the guest is in vEL2. */
|
|
|
|
if (unlikely(is_hyp_ctxt(vcpu)))
|
|
|
|
goto memory_write;
|
|
|
|
|
|
|
|
if (__vcpu_write_sys_reg_to_cpu(val, reg))
|
2019-06-20 18:17:00 +08:00
|
|
|
return;
|
|
|
|
|
KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg()
KVM internally uses accessor functions when reading or writing the
guest's system registers. This takes care of accessing either the stored
copy or using the "live" EL1 system registers when the host uses VHE.
With the introduction of virtual EL2 we add a bunch of EL2 system
registers, which now must also be taken care of:
- If the guest is running in vEL2, and we access an EL1 sysreg, we must
revert to the stored version of that, and not use the CPU's copy.
- If the guest is running in vEL1, and we access an EL2 sysreg, we must
also use the stored version, since the CPU carries the EL1 copy.
- Some EL2 system registers are supposed to affect the current execution
of the system, so we need to put them into their respective EL1
counterparts. For this we need to define a mapping between the two.
- Some EL2 system registers have a different format than their EL1
counterpart, so we need to translate them before writing them to the
CPU. This is done using an (optional) translate function in the map.
All of these cases are now wrapped into the existing accessor functions,
so KVM users wouldn't need to care whether they access EL2 or EL1
registers and also which state the guest is in.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Co-developed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2022-12-17 21:28:40 +08:00
|
|
|
memory_write:
|
|
|
|
__vcpu_sys_reg(vcpu, reg) = val;
|
2017-12-24 04:53:48 +08:00
|
|
|
}
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
|
2020-11-26 21:46:40 +08:00
|
|
|
#define CSSELR_MAX 14
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
/*
|
|
|
|
* Returns the minimum line size for the selected cache, expressed as
|
|
|
|
* Log2(bytes).
|
|
|
|
*/
|
|
|
|
static u8 get_min_cache_line_size(bool icache)
|
|
|
|
{
|
|
|
|
u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
|
|
|
u8 field;
|
|
|
|
|
|
|
|
if (icache)
|
|
|
|
field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
|
|
|
|
else
|
|
|
|
field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cache line size is represented as Log2(words) in CTR_EL0.
|
|
|
|
* Log2(bytes) can be derived with the following:
|
|
|
|
*
|
|
|
|
* Log2(words) + 2 = Log2(bytes / 4) + 2
|
|
|
|
* = Log2(bytes) - 2 + 2
|
|
|
|
* = Log2(bytes)
|
|
|
|
*/
|
|
|
|
return field + 2;
|
|
|
|
}
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/* Which cache CCSIDR represents depends on CSSELR value. */
|
2023-01-12 10:38:52 +08:00
|
|
|
static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
|
|
|
|
{
|
|
|
|
u8 line_size;
|
|
|
|
|
|
|
|
if (vcpu->arch.ccsidr)
|
|
|
|
return vcpu->arch.ccsidr[csselr];
|
|
|
|
|
|
|
|
line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fabricate a CCSIDR value as the overriding value does not exist.
|
|
|
|
* The real CCSIDR value will not be used as it can vary by the
|
|
|
|
* physical CPU which the vcpu currently resides in.
|
|
|
|
*
|
|
|
|
* The line size is determined with get_min_cache_line_size(), which
|
|
|
|
* should be valid for all CPUs even if they have different cache
|
|
|
|
* configuration.
|
|
|
|
*
|
|
|
|
* The associativity bits are cleared, meaning the geometry of all data
|
|
|
|
* and unified caches (which are guaranteed to be PIPT and thus
|
|
|
|
* non-aliasing) are 1 set and 1 way.
|
|
|
|
* Guests should not be doing cache operations by set/way at all, and
|
|
|
|
* for this reason, we trap them and attempt to infer the intent, so
|
|
|
|
* that we can flush the entire guest's address space at the appropriate
|
|
|
|
* time. The exposed geometry minimizes the number of the traps.
|
|
|
|
* [If guests should attempt to infer aliasing properties from the
|
|
|
|
* geometry (which is not permitted by the architecture), they would
|
|
|
|
* only do so for virtually indexed caches.]
|
|
|
|
*
|
|
|
|
* We don't check if the cache level exists as it is allowed to return
|
|
|
|
* an UNKNOWN value if not.
|
|
|
|
*/
|
|
|
|
return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2023-01-12 10:38:52 +08:00
|
|
|
u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
|
|
|
|
u32 *ccsidr = vcpu->arch.ccsidr;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
if ((val & CCSIDR_EL1_RES0) ||
|
|
|
|
line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!ccsidr) {
|
|
|
|
if (val == get_ccsidr(vcpu, csselr))
|
|
|
|
return 0;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-02-07 07:52:29 +08:00
|
|
|
ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
|
2023-01-12 10:38:52 +08:00
|
|
|
if (!ccsidr)
|
|
|
|
return -ENOMEM;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
for (i = 0; i < CSSELR_MAX; i++)
|
|
|
|
ccsidr[i] = get_ccsidr(vcpu, i);
|
|
|
|
|
|
|
|
vcpu->arch.ccsidr = ccsidr;
|
|
|
|
}
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
ccsidr[csselr] = val;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
return 0;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
static bool access_rw(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
|
|
|
|
else
|
|
|
|
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-20 00:05:31 +08:00
|
|
|
/*
|
|
|
|
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
|
|
|
|
*/
|
2012-12-11 00:15:34 +08:00
|
|
|
static bool access_dcsw(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2012-12-11 00:15:34 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (!p->is_write)
|
2017-06-09 19:49:55 +08:00
|
|
|
return read_from_write_only(vcpu, p, r);
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2018-05-17 18:05:08 +08:00
|
|
|
/*
|
|
|
|
* Only track S/W ops if we don't have FWB. It still indicates
|
|
|
|
* that the guest is a bit broken (S/W operations should only
|
|
|
|
* be done by firmware, knowing that there is only a single
|
|
|
|
* CPU left in the system, and certainly not from non-secure
|
|
|
|
* software).
|
|
|
|
*/
|
arm64: kvm: Use cpus_have_final_cap() explicitly
Much of the arm64 KVM code uses cpus_have_const_cap() to check for
cpucaps, but this is unnecessary and it would be preferable to use
cpus_have_final_cap().
For historical reasons, cpus_have_const_cap() is more complicated than
it needs to be. Before cpucaps are finalized, it will perform a bitmap
test of the system_cpucaps bitmap, and once cpucaps are finalized it
will use an alternative branch. This used to be necessary to handle some
race conditions in the window between cpucap detection and the
subsequent patching of alternatives and static branches, where different
branches could be out-of-sync with one another (or w.r.t. alternative
sequences). Now that we use alternative branches instead of static
branches, these are all patched atomically w.r.t. one another, and there
are only a handful of cases that need special care in the window between
cpucap detection and alternative patching.
Due to the above, it would be nice to remove cpus_have_const_cap(), and
migrate callers over to alternative_has_cap_*(), cpus_have_final_cap(),
or cpus_have_cap() depending on when their requirements. This will
remove redundant instructions and improve code generation, and will make
it easier to determine how each callsite will behave before, during, and
after alternative patching.
KVM is initialized after cpucaps have been finalized and alternatives
have been patched. Since commit:
d86de40decaa14e6 ("arm64: cpufeature: upgrade hyp caps to final")
... use of cpus_have_const_cap() in hyp code is automatically converted
to use cpus_have_final_cap():
| static __always_inline bool cpus_have_const_cap(int num)
| {
| if (is_hyp_code())
| return cpus_have_final_cap(num);
| else if (system_capabilities_finalized())
| return __cpus_have_const_cap(num);
| else
| return cpus_have_cap(num);
| }
Thus, converting hyp code to use cpus_have_final_cap() directly will not
result in any functional change.
Non-hyp KVM code is also not executed until cpucaps have been finalized,
and it would be preferable to extent the same treatment to this code and
use cpus_have_final_cap() directly.
This patch converts instances of cpus_have_const_cap() in KVM-only code
over to cpus_have_final_cap(). As all of this code runs after cpucaps
have been finalized, there should be no functional change as a result of
this patch, but the redundant instructions generated by
cpus_have_const_cap() will be removed from the non-hyp KVM code.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-10-16 18:24:32 +08:00
|
|
|
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
2018-05-17 18:05:08 +08:00
|
|
|
kvm_set_way_flush(vcpu);
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-05-16 04:46:01 +08:00
|
|
|
static bool access_dcgsw(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (!kvm_has_mte(vcpu->kvm)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
|
|
|
|
return access_dcsw(vcpu, p, r);
|
|
|
|
}
|
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
|
|
|
|
{
|
|
|
|
switch (r->aarch32_map) {
|
|
|
|
case AA32_LO:
|
|
|
|
*mask = GENMASK_ULL(31, 0);
|
|
|
|
*shift = 0;
|
|
|
|
break;
|
|
|
|
case AA32_HI:
|
|
|
|
*mask = GENMASK_ULL(63, 32);
|
|
|
|
*shift = 32;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
*mask = GENMASK_ULL(63, 0);
|
|
|
|
*shift = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-15 02:00:55 +08:00
|
|
|
/*
|
|
|
|
* Generic accessor for VM registers. Only called as long as HCR_TVM
|
2014-12-20 00:05:31 +08:00
|
|
|
* is set. If the guest enables the MMU, we stop trapping the VM
|
|
|
|
* sys_regs and leave it in complete control of the caches.
|
2014-01-15 02:00:55 +08:00
|
|
|
*/
|
|
|
|
static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-01-15 02:00:55 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2014-12-20 00:05:31 +08:00
|
|
|
bool was_enabled = vcpu_has_cache_enabled(vcpu);
|
2020-10-30 01:14:20 +08:00
|
|
|
u64 val, mask, shift;
|
2014-01-15 02:00:55 +08:00
|
|
|
|
2024-06-25 21:00:37 +08:00
|
|
|
if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
|
|
|
|
!kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-01-15 02:00:55 +08:00
|
|
|
BUG_ON(!p->is_write);
|
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
get_access_mask(r, &mask, &shift);
|
2017-10-11 21:20:41 +08:00
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
if (~mask) {
|
|
|
|
val = vcpu_read_sys_reg(vcpu, r->reg);
|
|
|
|
val &= ~mask;
|
2014-08-01 19:00:36 +08:00
|
|
|
} else {
|
2020-10-30 01:14:20 +08:00
|
|
|
val = 0;
|
2014-08-01 19:00:36 +08:00
|
|
|
}
|
2020-10-30 01:14:20 +08:00
|
|
|
|
|
|
|
val |= (p->regval & (mask >> shift)) << shift;
|
|
|
|
vcpu_write_sys_reg(vcpu, val, r->reg);
|
2014-07-03 00:19:30 +08:00
|
|
|
|
2014-12-20 00:05:31 +08:00
|
|
|
kvm_toggle_cache(vcpu, was_enabled);
|
2014-01-15 02:00:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-06-22 19:33:15 +08:00
|
|
|
static bool access_actlr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2020-10-30 01:14:20 +08:00
|
|
|
u64 mask, shift;
|
|
|
|
|
2020-06-22 19:33:15 +08:00
|
|
|
if (p->is_write)
|
|
|
|
return ignore_write(vcpu, p);
|
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
get_access_mask(r, &mask, &shift);
|
|
|
|
p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
|
2020-06-22 19:33:15 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-06-03 16:13:13 +08:00
|
|
|
/*
|
|
|
|
* Trap handler for the GICv3 SGI generation system register.
|
|
|
|
* Forward the request to the VGIC emulation.
|
|
|
|
* The cp15_64 code makes sure this automatically works
|
|
|
|
* for both AArch64 and AArch32 accesses.
|
|
|
|
*/
|
|
|
|
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-06-03 16:13:13 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2018-08-06 20:03:36 +08:00
|
|
|
bool g1;
|
|
|
|
|
2024-08-20 18:03:38 +08:00
|
|
|
if (!kvm_has_gicv3(vcpu->kvm)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-06-03 16:13:13 +08:00
|
|
|
if (!p->is_write)
|
2017-06-09 19:49:55 +08:00
|
|
|
return read_from_write_only(vcpu, p, r);
|
2014-06-03 16:13:13 +08:00
|
|
|
|
2018-08-06 20:03:36 +08:00
|
|
|
/*
|
|
|
|
* In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
|
|
|
|
* Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
|
|
|
|
* depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
|
|
|
|
* equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
|
|
|
|
* group.
|
|
|
|
*/
|
2020-10-30 01:20:49 +08:00
|
|
|
if (p->Op0 == 0) { /* AArch32 */
|
2018-08-06 20:03:36 +08:00
|
|
|
switch (p->Op1) {
|
|
|
|
default: /* Keep GCC quiet */
|
|
|
|
case 0: /* ICC_SGI1R */
|
|
|
|
g1 = true;
|
|
|
|
break;
|
|
|
|
case 1: /* ICC_ASGI1R */
|
|
|
|
case 2: /* ICC_SGI0R */
|
|
|
|
g1 = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-10-30 01:20:49 +08:00
|
|
|
} else { /* AArch64 */
|
2018-08-06 20:03:36 +08:00
|
|
|
switch (p->Op2) {
|
|
|
|
default: /* Keep GCC quiet */
|
|
|
|
case 5: /* ICC_SGI1R_EL1 */
|
|
|
|
g1 = true;
|
|
|
|
break;
|
|
|
|
case 6: /* ICC_ASGI1R_EL1 */
|
|
|
|
case 7: /* ICC_SGI0R_EL1 */
|
|
|
|
g1 = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
|
2014-06-03 16:13:13 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-25 22:26:37 +08:00
|
|
|
static bool access_gic_sre(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
return ignore_write(vcpu, p);
|
|
|
|
|
|
|
|
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-24 17:21:16 +08:00
|
|
|
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-04-24 17:21:16 +08:00
|
|
|
const struct sys_reg_desc *r)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
return ignore_write(vcpu, p);
|
|
|
|
else
|
|
|
|
return read_zero(vcpu, p);
|
|
|
|
}
|
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
static bool trap_undef(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-05 23:30:26 +08:00
|
|
|
/*
|
|
|
|
* ARMv8.1 mandates at least a trivial LORegion implementation, where all the
|
|
|
|
* RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
|
|
|
|
* system, these registers should UNDEF. LORID_EL1 being a RO register, we
|
|
|
|
* treat it separately.
|
|
|
|
*/
|
|
|
|
static bool trap_loregion(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
arm64/kvm: Prohibit guest LOR accesses
We don't currently limit guest accesses to the LOR registers, which we
neither virtualize nor context-switch. As such, guests are provided with
unusable information/controls, and are not isolated from each other (or
the host).
To prevent these issues, we can trap register accesses and present the
illusion LORegions are unssupported by the CPU. To do this, we mask
ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following
registers:
* LORC_EL1
* LOREA_EL1
* LORID_EL1
* LORN_EL1
* LORSA_EL1
... when trapped, we inject an UNDEFINED exception to EL1, simulating
their non-existence.
As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire
and StoreLORelease must behave as LoadAcquire and StoreRelease
respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's
EL2 is first initialized, as the host kernel will not modify this.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-02-13 21:39:23 +08:00
|
|
|
{
|
2021-01-06 22:42:18 +08:00
|
|
|
u32 sr = reg_to_encoding(r);
|
2018-11-05 23:30:26 +08:00
|
|
|
|
2024-02-14 21:18:03 +08:00
|
|
|
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
|
2018-11-05 23:30:26 +08:00
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->is_write && sr == SYS_LORID_EL1)
|
|
|
|
return write_to_read_only(vcpu, p, r);
|
|
|
|
|
|
|
|
return trap_raz_wi(vcpu, p, r);
|
arm64/kvm: Prohibit guest LOR accesses
We don't currently limit guest accesses to the LOR registers, which we
neither virtualize nor context-switch. As such, guests are provided with
unusable information/controls, and are not isolated from each other (or
the host).
To prevent these issues, we can trap register accesses and present the
illusion LORegions are unssupported by the CPU. To do this, we mask
ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following
registers:
* LORC_EL1
* LOREA_EL1
* LORID_EL1
* LORN_EL1
* LORSA_EL1
... when trapped, we inject an UNDEFINED exception to EL1, simulating
their non-existence.
As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire
and StoreLORelease must behave as LoadAcquire and StoreRelease
respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's
EL2 is first initialized, as the host kernel will not modify this.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-02-13 21:39:23 +08:00
|
|
|
}
|
|
|
|
|
2022-02-04 01:41:56 +08:00
|
|
|
static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 oslsr;
|
|
|
|
|
|
|
|
if (!p->is_write)
|
|
|
|
return read_from_write_only(vcpu, p, r);
|
|
|
|
|
|
|
|
/* Forward the OSLK bit to OSLSR */
|
2023-05-24 02:37:01 +08:00
|
|
|
oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
|
|
|
|
if (p->regval & OSLAR_EL1_OSLK)
|
|
|
|
oslsr |= OSLSR_EL1_OSLK;
|
2022-02-04 01:41:56 +08:00
|
|
|
|
|
|
|
__vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-24 17:24:46 +08:00
|
|
|
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-04-24 17:24:46 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2022-02-04 01:41:55 +08:00
|
|
|
if (p->is_write)
|
2022-02-04 01:41:54 +08:00
|
|
|
return write_to_read_only(vcpu, p, r);
|
2022-02-04 01:41:55 +08:00
|
|
|
|
|
|
|
p->regval = __vcpu_sys_reg(vcpu, r->reg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
2022-02-04 01:41:55 +08:00
|
|
|
{
|
2022-02-04 01:41:56 +08:00
|
|
|
/*
|
|
|
|
* The only modifiable bit is the OSLK bit. Refuse the write if
|
|
|
|
* userspace attempts to change any other bit in the register.
|
|
|
|
*/
|
2023-05-24 02:37:01 +08:00
|
|
|
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
|
2022-02-04 01:41:55 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2022-02-04 01:41:56 +08:00
|
|
|
__vcpu_sys_reg(vcpu, rd->reg) = val;
|
2022-02-04 01:41:55 +08:00
|
|
|
return 0;
|
2014-04-24 17:24:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-04-24 17:24:46 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write) {
|
|
|
|
return ignore_write(vcpu, p);
|
|
|
|
} else {
|
2016-09-08 20:55:37 +08:00
|
|
|
p->regval = read_sysreg(dbgauthstatus_el1);
|
2014-04-24 17:24:46 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to avoid world-switching all the DBG registers all the
|
|
|
|
* time:
|
2021-08-17 16:11:21 +08:00
|
|
|
*
|
2014-04-24 17:24:46 +08:00
|
|
|
* - If we've touched any debug register, it is likely that we're
|
|
|
|
* going to touch more of them. It then makes sense to disable the
|
|
|
|
* traps and start doing the save/restore dance
|
|
|
|
* - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
|
|
|
|
* then mandatory to save/restore the registers, as the guest
|
|
|
|
* depends on them.
|
2021-08-17 16:11:21 +08:00
|
|
|
*
|
2014-04-24 17:24:46 +08:00
|
|
|
* For this, we use a DIRTY bit, indicating the guest has modified the
|
|
|
|
* debug registers, used as follow:
|
|
|
|
*
|
|
|
|
* On guest entry:
|
|
|
|
* - If the dirty bit is set (because we're coming back from trapping),
|
|
|
|
* disable the traps, save host registers, restore guest registers.
|
|
|
|
* - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
|
|
|
|
* set the dirty bit, disable the traps, save host registers,
|
|
|
|
* restore guest registers.
|
|
|
|
* - Otherwise, enable the traps
|
|
|
|
*
|
|
|
|
* On guest exit:
|
|
|
|
* - If the dirty bit is set, save guest registers, restore host
|
|
|
|
* registers and clear the dirty bit. This ensure that the host can
|
|
|
|
* now use the debug registers.
|
|
|
|
*/
|
|
|
|
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-04-24 17:24:46 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2023-02-10 01:58:10 +08:00
|
|
|
access_rw(vcpu, p, r);
|
|
|
|
if (p->is_write)
|
2022-05-28 19:38:19 +08:00
|
|
|
vcpu_set_flag(vcpu, DEBUG_DIRTY);
|
2014-04-24 17:24:46 +08:00
|
|
|
|
2015-12-04 20:03:13 +08:00
|
|
|
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
|
2015-07-08 00:30:03 +08:00
|
|
|
|
2014-04-24 17:24:46 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-07-08 00:30:00 +08:00
|
|
|
/*
|
|
|
|
* reg_to_dbg/dbg_to_reg
|
|
|
|
*
|
|
|
|
* A 32 bit write to a debug register leave top bits alone
|
|
|
|
* A 32 bit read from a debug register only returns the bottom bits
|
|
|
|
*
|
2022-05-28 19:38:19 +08:00
|
|
|
* All writes will set the DEBUG_DIRTY flag to ensure the hyp code
|
|
|
|
* switches between host and guest values in future.
|
2015-07-08 00:30:00 +08:00
|
|
|
*/
|
2015-12-16 23:41:12 +08:00
|
|
|
static void reg_to_dbg(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
2020-10-30 01:17:23 +08:00
|
|
|
const struct sys_reg_desc *rd,
|
2015-12-16 23:41:12 +08:00
|
|
|
u64 *dbg_reg)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2020-10-30 01:17:23 +08:00
|
|
|
u64 mask, shift, val;
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2020-10-30 01:17:23 +08:00
|
|
|
get_access_mask(rd, &mask, &shift);
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2020-10-30 01:17:23 +08:00
|
|
|
val = *dbg_reg;
|
|
|
|
val &= ~mask;
|
|
|
|
val |= (p->regval & (mask >> shift)) << shift;
|
2015-07-08 00:30:00 +08:00
|
|
|
*dbg_reg = val;
|
2020-10-30 01:17:23 +08:00
|
|
|
|
2022-05-28 19:38:19 +08:00
|
|
|
vcpu_set_flag(vcpu, DEBUG_DIRTY);
|
2015-07-08 00:30:00 +08:00
|
|
|
}
|
|
|
|
|
2015-12-16 23:41:12 +08:00
|
|
|
static void dbg_to_reg(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
2020-10-30 01:17:23 +08:00
|
|
|
const struct sys_reg_desc *rd,
|
2015-12-16 23:41:12 +08:00
|
|
|
u64 *dbg_reg)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2020-10-30 01:17:23 +08:00
|
|
|
u64 mask, shift;
|
|
|
|
|
|
|
|
get_access_mask(rd, &mask, &shift);
|
|
|
|
p->regval = (*dbg_reg & mask) >> shift;
|
2015-07-08 00:30:00 +08:00
|
|
|
}
|
|
|
|
|
2015-12-16 23:41:12 +08:00
|
|
|
static bool trap_bvr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
|
|
|
|
if (p->is_write)
|
2020-10-30 01:17:23 +08:00
|
|
|
reg_to_dbg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
else
|
2020-10-30 01:17:23 +08:00
|
|
|
dbg_to_reg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2021-05-14 16:05:41 +08:00
|
|
|
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
|
2015-07-08 00:30:03 +08:00
|
|
|
|
2015-07-08 00:30:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 *val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
*val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_bvr(struct kvm_vcpu *vcpu,
|
2015-12-16 23:41:12 +08:00
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
|
2023-06-10 03:00:48 +08:00
|
|
|
return rd->val;
|
2015-07-08 00:30:00 +08:00
|
|
|
}
|
|
|
|
|
2015-12-16 23:41:12 +08:00
|
|
|
static bool trap_bcr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
|
|
|
|
if (p->is_write)
|
2020-10-30 01:17:23 +08:00
|
|
|
reg_to_dbg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
else
|
2020-10-30 01:17:23 +08:00
|
|
|
dbg_to_reg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2021-05-14 16:05:41 +08:00
|
|
|
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
|
2015-07-08 00:30:03 +08:00
|
|
|
|
2015-07-08 00:30:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 *val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
*val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_bcr(struct kvm_vcpu *vcpu,
|
2015-12-16 23:41:12 +08:00
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
|
2023-06-10 03:00:48 +08:00
|
|
|
return rd->val;
|
2015-07-08 00:30:00 +08:00
|
|
|
}
|
|
|
|
|
2015-12-16 23:41:12 +08:00
|
|
|
static bool trap_wvr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
|
|
|
|
if (p->is_write)
|
2020-10-30 01:17:23 +08:00
|
|
|
reg_to_dbg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
else
|
2020-10-30 01:17:23 +08:00
|
|
|
dbg_to_reg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2021-05-14 16:05:41 +08:00
|
|
|
trace_trap_reg(__func__, rd->CRm, p->is_write,
|
|
|
|
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
|
2015-07-08 00:30:03 +08:00
|
|
|
|
2015-07-08 00:30:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 *val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
*val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_wvr(struct kvm_vcpu *vcpu,
|
2015-12-16 23:41:12 +08:00
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
|
2023-06-10 03:00:48 +08:00
|
|
|
return rd->val;
|
2015-07-08 00:30:00 +08:00
|
|
|
}
|
|
|
|
|
2015-12-16 23:41:12 +08:00
|
|
|
static bool trap_wcr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
|
|
|
|
if (p->is_write)
|
2020-10-30 01:17:23 +08:00
|
|
|
reg_to_dbg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
else
|
2020-10-30 01:17:23 +08:00
|
|
|
dbg_to_reg(vcpu, p, rd, dbg_reg);
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2021-05-14 16:05:41 +08:00
|
|
|
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
|
2015-07-08 00:30:03 +08:00
|
|
|
|
2015-07-08 00:30:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 *val)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
*val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
|
2015-07-08 00:30:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_wcr(struct kvm_vcpu *vcpu,
|
2015-12-16 23:41:12 +08:00
|
|
|
const struct sys_reg_desc *rd)
|
2015-07-08 00:30:00 +08:00
|
|
|
{
|
2021-05-14 16:05:41 +08:00
|
|
|
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
|
2023-06-10 03:00:48 +08:00
|
|
|
return rd->val;
|
2015-07-08 00:30:00 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2016-03-16 22:38:53 +08:00
|
|
|
u64 amair = read_sysreg(amair_el1);
|
|
|
|
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
|
2023-06-10 03:00:48 +08:00
|
|
|
return amair;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2020-06-22 19:33:15 +08:00
|
|
|
{
|
|
|
|
u64 actlr = read_sysreg(actlr_el1);
|
|
|
|
vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
|
2023-06-10 03:00:48 +08:00
|
|
|
return actlr;
|
2020-06-22 19:33:15 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2014-06-02 21:37:13 +08:00
|
|
|
u64 mpidr;
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/*
|
2014-06-02 21:37:13 +08:00
|
|
|
* Map the vcpu_id into the first three affinity level fields of
|
|
|
|
* the MPIDR. We limit the number of VCPUs in level 0 due to a
|
|
|
|
* limitation to 16 CPUs in that level in the ICC_SGIxR registers
|
|
|
|
* of the GICv3 to be able to address each CPU directly when
|
|
|
|
* sending IPIs.
|
2012-12-11 00:15:34 +08:00
|
|
|
*/
|
2014-06-02 21:37:13 +08:00
|
|
|
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
|
|
|
|
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
|
|
|
|
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
|
2023-06-10 03:00:48 +08:00
|
|
|
mpidr |= (1ULL << 31);
|
|
|
|
vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
|
|
|
|
|
|
|
|
return mpidr;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
2021-01-07 01:22:27 +08:00
|
|
|
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_has_pmu(vcpu))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return REG_HIDDEN;
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2021-07-19 20:38:59 +08:00
|
|
|
{
|
2023-10-21 05:40:47 +08:00
|
|
|
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
|
|
|
u8 n = vcpu->kvm->arch.pmcr_n;
|
2021-07-19 20:38:59 +08:00
|
|
|
|
|
|
|
if (n)
|
|
|
|
mask |= GENMASK(n - 1, 0);
|
|
|
|
|
|
|
|
reset_unknown(vcpu, r);
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) &= mask;
|
2023-06-10 03:00:48 +08:00
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg);
|
2021-07-19 20:38:59 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2021-07-19 20:38:59 +08:00
|
|
|
{
|
|
|
|
reset_unknown(vcpu, r);
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
|
2023-06-10 03:00:48 +08:00
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg);
|
2021-07-19 20:38:59 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2021-07-19 20:38:59 +08:00
|
|
|
{
|
2023-10-20 02:56:17 +08:00
|
|
|
/* This thing will UNDEF, who cares about the reset value? */
|
|
|
|
if (!kvm_vcpu_has_pmu(vcpu))
|
|
|
|
return 0;
|
|
|
|
|
2021-07-19 20:38:59 +08:00
|
|
|
reset_unknown(vcpu, r);
|
2023-10-20 02:56:17 +08:00
|
|
|
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
|
2023-06-10 03:00:48 +08:00
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg);
|
2021-07-19 20:38:59 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2021-07-19 20:38:59 +08:00
|
|
|
{
|
|
|
|
reset_unknown(vcpu, r);
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
|
2023-06-10 03:00:48 +08:00
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg);
|
2021-07-19 20:38:59 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2015-06-18 16:01:53 +08:00
|
|
|
{
|
2023-10-21 05:40:44 +08:00
|
|
|
u64 pmcr = 0;
|
2015-06-18 16:01:53 +08:00
|
|
|
|
2022-08-17 03:25:53 +08:00
|
|
|
if (!kvm_supports_32bit_el0())
|
2022-11-24 18:40:02 +08:00
|
|
|
pmcr |= ARMV8_PMU_PMCR_LC;
|
|
|
|
|
2023-10-21 05:40:44 +08:00
|
|
|
/*
|
|
|
|
* The value of PMCR.N field is included when the
|
|
|
|
* vCPU register is read via kvm_vcpu_read_pmcr().
|
|
|
|
*/
|
2022-11-24 18:40:02 +08:00
|
|
|
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
|
2023-06-10 03:00:48 +08:00
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg);
|
2015-06-18 16:01:53 +08:00
|
|
|
}
|
|
|
|
|
2017-03-28 00:03:37 +08:00
|
|
|
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
2015-09-08 15:15:56 +08:00
|
|
|
{
|
2016-03-16 22:38:53 +08:00
|
|
|
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
2021-01-07 01:22:28 +08:00
|
|
|
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
|
2015-09-08 15:15:56 +08:00
|
|
|
|
2017-03-28 00:03:38 +08:00
|
|
|
if (!enabled)
|
|
|
|
kvm_inject_undefined(vcpu);
|
2015-09-08 15:15:56 +08:00
|
|
|
|
2017-03-28 00:03:37 +08:00
|
|
|
return !enabled;
|
2015-09-08 15:15:56 +08:00
|
|
|
}
|
|
|
|
|
2017-03-28 00:03:37 +08:00
|
|
|
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
|
2015-09-08 15:15:56 +08:00
|
|
|
{
|
2017-03-28 00:03:37 +08:00
|
|
|
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
|
|
|
|
}
|
2015-09-08 15:15:56 +08:00
|
|
|
|
2017-03-28 00:03:37 +08:00
|
|
|
static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
|
2015-09-08 15:15:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2017-03-28 00:03:37 +08:00
|
|
|
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
|
2015-09-08 15:15:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2017-03-28 00:03:37 +08:00
|
|
|
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
|
2015-09-08 15:15:56 +08:00
|
|
|
}
|
|
|
|
|
2015-06-18 16:01:53 +08:00
|
|
|
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-06-18 16:01:53 +08:00
|
|
|
if (p->is_write) {
|
2022-11-24 18:44:59 +08:00
|
|
|
/*
|
|
|
|
* Only update writeable bits of PMCR (continuing into
|
|
|
|
* kvm_pmu_handle_pmcr() as well)
|
|
|
|
*/
|
2023-10-21 05:40:43 +08:00
|
|
|
val = kvm_vcpu_read_pmcr(vcpu);
|
2015-06-18 16:01:53 +08:00
|
|
|
val &= ~ARMV8_PMU_PMCR_MASK;
|
|
|
|
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
2022-08-17 03:25:53 +08:00
|
|
|
if (!kvm_supports_32bit_el0())
|
2019-10-04 18:03:09 +08:00
|
|
|
val |= ARMV8_PMU_PMCR_LC;
|
2015-10-28 12:10:30 +08:00
|
|
|
kvm_pmu_handle_pmcr(vcpu, val);
|
2015-06-18 16:01:53 +08:00
|
|
|
} else {
|
|
|
|
/* PMCR.P & PMCR.C are RAZ */
|
2023-10-21 05:40:43 +08:00
|
|
|
val = kvm_vcpu_read_pmcr(vcpu)
|
2015-06-18 16:01:53 +08:00
|
|
|
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
|
|
|
|
p->regval = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-08-31 17:20:22 +08:00
|
|
|
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-08-31 17:20:22 +08:00
|
|
|
if (p->is_write)
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
|
2015-08-31 17:20:22 +08:00
|
|
|
else
|
|
|
|
/* return PMSELR.SEL field */
|
2016-03-16 22:38:53 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
|
2015-08-31 17:20:22 +08:00
|
|
|
& ARMV8_PMU_COUNTER_MASK;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-07 16:11:12 +08:00
|
|
|
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2020-12-31 19:39:01 +08:00
|
|
|
u64 pmceid, mask, shift;
|
2015-09-07 16:11:12 +08:00
|
|
|
|
|
|
|
BUG_ON(p->is_write);
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2020-12-31 19:39:01 +08:00
|
|
|
get_access_mask(r, &mask, &shift);
|
|
|
|
|
2020-03-13 00:11:24 +08:00
|
|
|
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
|
2020-12-31 19:39:01 +08:00
|
|
|
pmceid &= mask;
|
|
|
|
pmceid >>= shift;
|
2015-09-07 16:11:12 +08:00
|
|
|
|
|
|
|
p->regval = pmceid;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
|
|
|
|
{
|
|
|
|
u64 pmcr, val;
|
|
|
|
|
2023-10-21 05:40:43 +08:00
|
|
|
pmcr = kvm_vcpu_read_pmcr(vcpu);
|
2023-12-12 00:13:14 +08:00
|
|
|
val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
|
2017-03-28 00:03:38 +08:00
|
|
|
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
2015-12-08 15:29:06 +08:00
|
|
|
return false;
|
2017-03-28 00:03:38 +08:00
|
|
|
}
|
2015-12-08 15:29:06 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-13 11:32:08 +08:00
|
|
|
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|
|
|
u64 *val)
|
|
|
|
{
|
|
|
|
u64 idx;
|
|
|
|
|
|
|
|
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
|
|
|
|
/* PMCCNTR_EL0 */
|
|
|
|
idx = ARMV8_PMU_CYCLE_IDX;
|
|
|
|
else
|
|
|
|
/* PMEVCNTRn_EL0 */
|
|
|
|
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
|
|
|
|
|
|
|
*val = kvm_pmu_get_counter_value(vcpu, idx);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2020-11-13 02:50:06 +08:00
|
|
|
u64 idx = ~0UL;
|
2015-12-08 15:29:06 +08:00
|
|
|
|
|
|
|
if (r->CRn == 9 && r->CRm == 13) {
|
|
|
|
if (r->Op2 == 2) {
|
|
|
|
/* PMXEVCNTR_EL0 */
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2016-03-16 22:38:53 +08:00
|
|
|
idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
|
2015-12-08 15:29:06 +08:00
|
|
|
& ARMV8_PMU_COUNTER_MASK;
|
|
|
|
} else if (r->Op2 == 0) {
|
|
|
|
/* PMCCNTR_EL0 */
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_cycle_counter_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
idx = ARMV8_PMU_CYCLE_IDX;
|
|
|
|
}
|
2016-11-16 17:20:57 +08:00
|
|
|
} else if (r->CRn == 0 && r->CRm == 9) {
|
|
|
|
/* PMCCNTR */
|
|
|
|
if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
idx = ARMV8_PMU_CYCLE_IDX;
|
2015-12-08 15:29:06 +08:00
|
|
|
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
|
|
|
|
/* PMEVCNTRn_EL0 */
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
|
|
|
}
|
|
|
|
|
2020-11-13 02:50:06 +08:00
|
|
|
/* Catch any decoding mistake */
|
|
|
|
WARN_ON(idx == ~0UL);
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
if (!pmu_counter_idx_valid(vcpu, idx))
|
|
|
|
return false;
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (p->is_write) {
|
|
|
|
if (pmu_access_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
|
2015-09-08 15:15:56 +08:00
|
|
|
} else {
|
2015-12-08 15:29:06 +08:00
|
|
|
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
|
2015-09-08 15:15:56 +08:00
|
|
|
}
|
2015-12-08 15:29:06 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-23 11:11:27 +08:00
|
|
|
static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 idx, reg;
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2016-02-23 11:11:27 +08:00
|
|
|
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
|
|
|
|
/* PMXEVTYPER_EL0 */
|
2016-03-16 22:38:53 +08:00
|
|
|
idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
|
2016-02-23 11:11:27 +08:00
|
|
|
reg = PMEVTYPER0_EL0 + idx;
|
|
|
|
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
|
|
|
|
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
|
|
|
if (idx == ARMV8_PMU_CYCLE_IDX)
|
|
|
|
reg = PMCCFILTR_EL0;
|
|
|
|
else
|
|
|
|
/* PMEVTYPERn_EL0 */
|
|
|
|
reg = PMEVTYPER0_EL0 + idx;
|
|
|
|
} else {
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pmu_counter_idx_valid(vcpu, idx))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (p->is_write) {
|
|
|
|
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
|
2019-04-10 03:22:15 +08:00
|
|
|
kvm_vcpu_pmu_restore_guest(vcpu);
|
2016-02-23 11:11:27 +08:00
|
|
|
} else {
|
2023-10-20 02:56:17 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, reg);
|
2016-02-23 11:11:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
|
|
|
|
{
|
|
|
|
bool set;
|
|
|
|
|
|
|
|
val &= kvm_pmu_valid_counter_mask(vcpu);
|
|
|
|
|
|
|
|
switch (r->reg) {
|
|
|
|
case PMOVSSET_EL0:
|
|
|
|
/* CRm[1] being set indicates a SET register, and CLR otherwise */
|
|
|
|
set = r->CRm & 2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Op2[0] being set indicates a SET register, and CLR otherwise */
|
|
|
|
set = r->Op2 & 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (set)
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) |= val;
|
|
|
|
else
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) &= ~val;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
|
|
|
|
{
|
|
|
|
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
|
|
|
|
|
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-08 12:26:13 +08:00
|
|
|
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 val, mask;
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-09-08 12:26:13 +08:00
|
|
|
mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
|
if (p->is_write) {
|
|
|
|
val = p->regval & mask;
|
|
|
|
if (r->Op2 & 0x1) {
|
|
|
|
/* accessing PMCNTENSET_EL0 */
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
2019-06-18 03:01:01 +08:00
|
|
|
kvm_pmu_enable_counter_mask(vcpu, val);
|
2019-04-10 03:22:15 +08:00
|
|
|
kvm_vcpu_pmu_restore_guest(vcpu);
|
2015-09-08 12:26:13 +08:00
|
|
|
} else {
|
|
|
|
/* accessing PMCNTENCLR_EL0 */
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
2019-06-18 03:01:01 +08:00
|
|
|
kvm_pmu_disable_counter_mask(vcpu, val);
|
2015-09-08 12:26:13 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-07-19 20:39:00 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
2015-09-08 12:26:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-08 14:40:20 +08:00
|
|
|
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
|
|
2020-11-13 02:49:28 +08:00
|
|
|
if (check_pmu_access_disabled(vcpu, 0))
|
2015-09-08 15:15:56 +08:00
|
|
|
return false;
|
|
|
|
|
2015-09-08 14:40:20 +08:00
|
|
|
if (p->is_write) {
|
|
|
|
u64 val = p->regval & mask;
|
|
|
|
|
|
|
|
if (r->Op2 & 0x1)
|
|
|
|
/* accessing PMINTENSET_EL1 */
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
|
2015-09-08 14:40:20 +08:00
|
|
|
else
|
|
|
|
/* accessing PMINTENCLR_EL1 */
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
2015-09-08 14:40:20 +08:00
|
|
|
} else {
|
2021-07-19 20:39:00 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
2015-09-08 14:40:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-08 15:03:26 +08:00
|
|
|
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_access_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2015-09-08 15:03:26 +08:00
|
|
|
if (p->is_write) {
|
|
|
|
if (r->CRm & 0x2)
|
|
|
|
/* accessing PMOVSSET_EL0 */
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
|
2015-09-08 15:03:26 +08:00
|
|
|
else
|
|
|
|
/* accessing PMOVSCLR_EL0 */
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
2015-09-08 15:03:26 +08:00
|
|
|
} else {
|
2021-07-19 20:39:00 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
2015-09-08 15:03:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-08 15:49:39 +08:00
|
|
|
static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
|
2017-03-28 00:03:41 +08:00
|
|
|
if (!p->is_write)
|
2017-06-09 19:49:55 +08:00
|
|
|
return read_from_write_only(vcpu, p, r);
|
2017-03-28 00:03:41 +08:00
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
if (pmu_write_swinc_el0_disabled(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2017-03-28 00:03:41 +08:00
|
|
|
mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
|
kvm_pmu_software_increment(vcpu, p->regval & mask);
|
|
|
|
return true;
|
2015-09-08 15:49:39 +08:00
|
|
|
}
|
|
|
|
|
2015-09-08 15:15:56 +08:00
|
|
|
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write) {
|
2017-03-28 00:03:39 +08:00
|
|
|
if (!vcpu_mode_priv(vcpu)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
2015-09-08 15:15:56 +08:00
|
|
|
return false;
|
2017-03-28 00:03:39 +08:00
|
|
|
}
|
2015-09-08 15:15:56 +08:00
|
|
|
|
2016-03-16 22:38:53 +08:00
|
|
|
__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
|
|
|
|
p->regval & ARMV8_PMU_USERENR_MASK;
|
2015-09-08 15:15:56 +08:00
|
|
|
} else {
|
2016-03-16 22:38:53 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
|
2015-09-08 15:15:56 +08:00
|
|
|
& ARMV8_PMU_USERENR_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-10-21 05:40:44 +08:00
|
|
|
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|
|
|
u64 *val)
|
|
|
|
{
|
|
|
|
*val = kvm_vcpu_read_pmcr(vcpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-21 05:40:47 +08:00
|
|
|
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|
|
|
u64 val)
|
|
|
|
{
|
2023-12-12 00:13:14 +08:00
|
|
|
u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
|
2023-10-21 05:40:47 +08:00
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->arch.config_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The vCPU can't have more counters than the PMU hardware
|
|
|
|
* implements. Ignore this error to maintain compatibility
|
|
|
|
* with the existing KVM behavior.
|
|
|
|
*/
|
|
|
|
if (!kvm_vm_has_ran_once(kvm) &&
|
|
|
|
new_n <= kvm_arm_pmu_get_max_counters(kvm))
|
|
|
|
kvm->arch.pmcr_n = new_n;
|
|
|
|
|
|
|
|
mutex_unlock(&kvm->arch.config_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore writes to RES0 bits, read only bits that are cleared on
|
|
|
|
* vCPU reset, and writable bits that KVM doesn't support yet.
|
|
|
|
* (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
|
|
|
|
* The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
|
|
|
|
* But, we leave the bit as it is here, as the vCPU's PMUver might
|
|
|
|
* be changed later (NOTE: the bit will be cleared on first vCPU run
|
|
|
|
* if necessary).
|
|
|
|
*/
|
|
|
|
val &= ARMV8_PMU_PMCR_MASK;
|
|
|
|
|
|
|
|
/* The LC bit is RES1 when AArch32 is not supported */
|
|
|
|
if (!kvm_supports_32bit_el0())
|
|
|
|
val |= ARMV8_PMU_PMCR_LC;
|
|
|
|
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) = val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-24 17:24:46 +08:00
|
|
|
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
|
|
|
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
2019-08-05 17:34:51 +08:00
|
|
|
trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
|
2019-08-05 17:34:51 +08:00
|
|
|
trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
|
2019-08-05 17:34:51 +08:00
|
|
|
trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
|
2019-08-05 17:34:51 +08:00
|
|
|
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
|
2014-04-24 17:24:46 +08:00
|
|
|
|
2023-07-14 11:38:40 +08:00
|
|
|
#define PMU_SYS_REG(name) \
|
|
|
|
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
|
|
|
|
.visibility = pmu_visibility
|
2021-01-07 01:22:27 +08:00
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
/* Macro to expand the PMEVCNTRn_EL0 register */
|
|
|
|
#define PMU_PMEVCNTR_EL0(n) \
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
|
2023-03-13 11:32:08 +08:00
|
|
|
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
|
2021-01-07 01:22:27 +08:00
|
|
|
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
|
2015-12-08 15:29:06 +08:00
|
|
|
|
2016-02-23 11:11:27 +08:00
|
|
|
/* Macro to expand the PMEVTYPERn_EL0 register */
|
|
|
|
#define PMU_PMEVTYPER_EL0(n) \
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
|
2021-07-19 20:38:59 +08:00
|
|
|
.reset = reset_pmevtyper, \
|
2021-01-07 01:22:27 +08:00
|
|
|
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
|
2016-02-23 11:11:27 +08:00
|
|
|
|
2020-11-10 22:13:07 +08:00
|
|
|
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
2020-03-05 17:06:23 +08:00
|
|
|
{
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Macro to expand the AMU counter and type registers*/
|
2020-11-10 22:13:07 +08:00
|
|
|
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
|
|
|
|
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
|
|
|
|
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
|
|
|
|
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 12:42:35 +08:00
|
|
|
|
|
|
|
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
2020-11-05 17:10:20 +08:00
|
|
|
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 12:42:35 +08:00
|
|
|
}
|
|
|
|
|
2020-11-10 22:13:07 +08:00
|
|
|
/*
|
|
|
|
* If we land here on a PtrAuth access, that is because we didn't
|
|
|
|
* fixup the access on exit by allowing the PtrAuth sysregs. The only
|
|
|
|
* way this happens is when the guest does not have PtrAuth support
|
|
|
|
* enabled.
|
|
|
|
*/
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 12:42:35 +08:00
|
|
|
#define __PTRAUTH_KEY(k) \
|
2020-11-10 22:13:07 +08:00
|
|
|
{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 12:42:35 +08:00
|
|
|
.visibility = ptrauth_visibility}
|
|
|
|
|
|
|
|
#define PTRAUTH_KEY(k) \
|
|
|
|
__PTRAUTH_KEY(k ## KEYLO_EL1), \
|
|
|
|
__PTRAUTH_KEY(k ## KEYHI_EL1)
|
|
|
|
|
2018-07-05 23:48:23 +08:00
|
|
|
static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
2017-02-03 23:20:07 +08:00
|
|
|
{
|
2018-07-05 23:48:23 +08:00
|
|
|
enum kvm_arch_timers tmr;
|
|
|
|
enum kvm_arch_timer_regs treg;
|
|
|
|
u64 reg = reg_to_encoding(r);
|
2017-02-03 23:20:08 +08:00
|
|
|
|
2018-07-05 23:48:23 +08:00
|
|
|
switch (reg) {
|
|
|
|
case SYS_CNTP_TVAL_EL0:
|
|
|
|
case SYS_AARCH32_CNTP_TVAL:
|
|
|
|
tmr = TIMER_PTIMER;
|
|
|
|
treg = TIMER_REG_TVAL;
|
|
|
|
break;
|
|
|
|
case SYS_CNTP_CTL_EL0:
|
|
|
|
case SYS_AARCH32_CNTP_CTL:
|
|
|
|
tmr = TIMER_PTIMER;
|
|
|
|
treg = TIMER_REG_CTL;
|
|
|
|
break;
|
|
|
|
case SYS_CNTP_CVAL_EL0:
|
|
|
|
case SYS_AARCH32_CNTP_CVAL:
|
|
|
|
tmr = TIMER_PTIMER;
|
|
|
|
treg = TIMER_REG_CVAL;
|
|
|
|
break;
|
2023-03-31 01:47:45 +08:00
|
|
|
case SYS_CNTPCT_EL0:
|
|
|
|
case SYS_CNTPCTSS_EL0:
|
|
|
|
case SYS_AARCH32_CNTPCT:
|
|
|
|
tmr = TIMER_PTIMER;
|
|
|
|
treg = TIMER_REG_CNT;
|
|
|
|
break;
|
2018-07-05 23:48:23 +08:00
|
|
|
default:
|
2023-01-12 20:38:29 +08:00
|
|
|
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
2017-06-17 14:12:06 +08:00
|
|
|
}
|
2017-02-03 23:20:08 +08:00
|
|
|
|
|
|
|
if (p->is_write)
|
2018-07-05 23:48:23 +08:00
|
|
|
kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
|
2017-02-03 23:20:08 +08:00
|
|
|
else
|
2018-07-05 23:48:23 +08:00
|
|
|
p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
|
2017-02-03 23:20:08 +08:00
|
|
|
|
2017-02-03 23:20:07 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:50 +08:00
|
|
|
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
|
|
|
s64 new, s64 cur)
|
2022-11-14 00:38:26 +08:00
|
|
|
{
|
2023-06-10 03:00:50 +08:00
|
|
|
struct arm64_ftr_bits kvm_ftr = *ftrp;
|
|
|
|
|
|
|
|
/* Some features have different safe value type in KVM than host features */
|
|
|
|
switch (id) {
|
|
|
|
case SYS_ID_AA64DFR0_EL1:
|
2023-10-04 07:04:00 +08:00
|
|
|
switch (kvm_ftr.shift) {
|
|
|
|
case ID_AA64DFR0_EL1_PMUVer_SHIFT:
|
2023-06-10 03:00:50 +08:00
|
|
|
kvm_ftr.type = FTR_LOWER_SAFE;
|
2023-10-04 07:04:00 +08:00
|
|
|
break;
|
|
|
|
case ID_AA64DFR0_EL1_DebugVer_SHIFT:
|
2023-06-10 03:00:50 +08:00
|
|
|
kvm_ftr.type = FTR_LOWER_SAFE;
|
2023-10-04 07:04:00 +08:00
|
|
|
break;
|
|
|
|
}
|
2023-06-10 03:00:50 +08:00
|
|
|
break;
|
|
|
|
case SYS_ID_DFR0_EL1:
|
|
|
|
if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
|
|
|
|
kvm_ftr.type = FTR_LOWER_SAFE;
|
|
|
|
break;
|
|
|
|
}
|
2022-11-14 00:38:26 +08:00
|
|
|
|
2023-06-10 03:00:50 +08:00
|
|
|
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
|
2022-11-14 00:38:26 +08:00
|
|
|
}
|
|
|
|
|
2023-09-14 00:56:44 +08:00
|
|
|
/*
|
2023-06-10 03:00:50 +08:00
|
|
|
* arm64_check_features() - Check if a feature register value constitutes
|
|
|
|
* a subset of features indicated by the idreg's KVM sanitised limit.
|
|
|
|
*
|
|
|
|
* This function will check if each feature field of @val is the "safe" value
|
|
|
|
* against idreg's KVM sanitised limit return from reset() callback.
|
|
|
|
* If a field value in @val is the same as the one in limit, it is always
|
|
|
|
* considered the safe value regardless For register fields that are not in
|
|
|
|
* writable, only the value in limit is considered the safe value.
|
|
|
|
*
|
|
|
|
* Return: 0 if all the fields are safe. Otherwise, return negative errno.
|
|
|
|
*/
|
|
|
|
static int arm64_check_features(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd,
|
|
|
|
u64 val)
|
2022-11-14 00:38:28 +08:00
|
|
|
{
|
2023-06-10 03:00:50 +08:00
|
|
|
const struct arm64_ftr_reg *ftr_reg;
|
|
|
|
const struct arm64_ftr_bits *ftrp = NULL;
|
|
|
|
u32 id = reg_to_encoding(rd);
|
|
|
|
u64 writable_mask = rd->val;
|
|
|
|
u64 limit = rd->reset(vcpu, rd);
|
|
|
|
u64 mask = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hidden and unallocated ID registers may not have a corresponding
|
|
|
|
* struct arm64_ftr_reg. Of course, if the register is RAZ we know the
|
|
|
|
* only safe value is 0.
|
|
|
|
*/
|
|
|
|
if (sysreg_visible_as_raz(vcpu, rd))
|
|
|
|
return val ? -E2BIG : 0;
|
|
|
|
|
|
|
|
ftr_reg = get_arm64_ftr_reg(id);
|
|
|
|
if (!ftr_reg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ftrp = ftr_reg->ftr_bits;
|
|
|
|
|
|
|
|
for (; ftrp && ftrp->width; ftrp++) {
|
|
|
|
s64 f_val, f_lim, safe_val;
|
|
|
|
u64 ftr_mask;
|
|
|
|
|
|
|
|
ftr_mask = arm64_ftr_mask(ftrp);
|
|
|
|
if ((ftr_mask & writable_mask) != ftr_mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
f_val = arm64_ftr_value(ftrp, val);
|
|
|
|
f_lim = arm64_ftr_value(ftrp, limit);
|
|
|
|
mask |= ftr_mask;
|
|
|
|
|
|
|
|
if (f_val == f_lim)
|
|
|
|
safe_val = f_val;
|
|
|
|
else
|
|
|
|
safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
|
|
|
|
|
|
|
|
if (safe_val != f_val)
|
|
|
|
return -E2BIG;
|
2022-11-14 00:38:28 +08:00
|
|
|
}
|
2023-06-10 03:00:50 +08:00
|
|
|
|
|
|
|
/* For fields that are not writable, values in limit are the safe values. */
|
|
|
|
if ((val & ~mask) != (limit & ~mask))
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
return 0;
|
2022-11-14 00:38:28 +08:00
|
|
|
}
|
|
|
|
|
2022-11-14 00:38:26 +08:00
|
|
|
static u8 pmuver_to_perfmon(u8 pmuver)
|
|
|
|
{
|
|
|
|
switch (pmuver) {
|
|
|
|
case ID_AA64DFR0_EL1_PMUVer_IMP:
|
2022-12-05 22:34:32 +08:00
|
|
|
return ID_DFR0_EL1_PerfMon_PMUv3;
|
2022-11-14 00:38:26 +08:00
|
|
|
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
|
2022-12-05 22:34:32 +08:00
|
|
|
return ID_DFR0_EL1_PerfMon_IMPDEF;
|
2022-11-14 00:38:26 +08:00
|
|
|
default:
|
|
|
|
/* Anything ARMv8.1+ and NI have the same value. For now. */
|
|
|
|
return pmuver;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *r)
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
{
|
2021-01-06 22:42:18 +08:00
|
|
|
u32 id = reg_to_encoding(r);
|
2021-10-11 18:58:38 +08:00
|
|
|
u64 val;
|
|
|
|
|
2022-09-13 17:44:36 +08:00
|
|
|
if (sysreg_visible_as_raz(vcpu, r))
|
2021-10-11 18:58:38 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
val = read_sanitised_ftr_reg(id);
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
|
2020-02-16 01:07:32 +08:00
|
|
|
switch (id) {
|
|
|
|
case SYS_ID_AA64PFR1_EL1:
|
2021-10-10 22:56:31 +08:00
|
|
|
if (!kvm_has_mte(vcpu->kvm))
|
2022-09-06 06:54:04 +08:00
|
|
|
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
2022-04-19 19:22:32 +08:00
|
|
|
|
2022-09-06 06:54:04 +08:00
|
|
|
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
|
2020-02-16 01:07:32 +08:00
|
|
|
break;
|
|
|
|
case SYS_ID_AA64ISAR1_EL1:
|
|
|
|
if (!vcpu_has_ptrauth(vcpu))
|
2022-07-05 01:02:49 +08:00
|
|
|
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
|
|
|
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
|
|
|
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
|
|
|
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
|
2020-02-16 01:07:32 +08:00
|
|
|
break;
|
2022-02-24 20:49:52 +08:00
|
|
|
case SYS_ID_AA64ISAR2_EL1:
|
|
|
|
if (!vcpu_has_ptrauth(vcpu))
|
2022-07-05 01:02:50 +08:00
|
|
|
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
|
|
|
|
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
2022-04-20 02:27:52 +08:00
|
|
|
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
|
2022-07-05 01:02:50 +08:00
|
|
|
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
|
2022-02-24 20:49:52 +08:00
|
|
|
break;
|
2023-01-12 10:38:51 +08:00
|
|
|
case SYS_ID_AA64MMFR2_EL1:
|
|
|
|
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
|
|
|
|
break;
|
|
|
|
case SYS_ID_MMFR4_EL1:
|
|
|
|
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
|
|
|
|
break;
|
2017-10-31 23:51:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
return __kvm_read_sanitised_id_reg(vcpu, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
|
|
{
|
2024-06-20 01:40:29 +08:00
|
|
|
return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
|
2023-06-10 03:00:48 +08:00
|
|
|
}
|
|
|
|
|
2024-05-03 07:35:25 +08:00
|
|
|
static bool is_feature_id_reg(u32 encoding)
|
|
|
|
{
|
|
|
|
return (sys_reg_Op0(encoding) == 3 &&
|
|
|
|
(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
|
|
|
|
sys_reg_CRn(encoding) == 0 &&
|
|
|
|
sys_reg_CRm(encoding) <= 7);
|
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:49 +08:00
|
|
|
/*
|
|
|
|
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
|
2024-05-03 07:35:23 +08:00
|
|
|
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
|
|
|
|
* registers KVM maintains on a per-VM basis.
|
2023-06-10 03:00:49 +08:00
|
|
|
*/
|
2024-05-03 07:35:23 +08:00
|
|
|
static inline bool is_vm_ftr_id_reg(u32 id)
|
2023-06-10 03:00:49 +08:00
|
|
|
{
|
2024-06-20 01:40:33 +08:00
|
|
|
if (id == SYS_CTR_EL0)
|
|
|
|
return true;
|
|
|
|
|
2023-06-10 03:00:49 +08:00
|
|
|
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
|
|
|
|
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
|
|
|
|
sys_reg_CRm(id) < 8);
|
|
|
|
}
|
|
|
|
|
2024-05-03 07:35:25 +08:00
|
|
|
static inline bool is_vcpu_ftr_id_reg(u32 id)
|
|
|
|
{
|
|
|
|
return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
|
|
|
|
}
|
|
|
|
|
KVM: arm64: Allow userspace to get the writable masks for feature ID registers
While the Feature ID range is well defined and pretty large, it isn't
inconceivable that the architecture will eventually grow some other
ranges that will need to similarly be described to userspace.
Add a VM ioctl to allow userspace to get writable masks for feature ID
registers in below system register space:
op0 = 3, op1 = {0, 1, 3}, CRn = 0, CRm = {0 - 7}, op2 = {0 - 7}
This is used to support mix-and-match userspace and kernels for writable
ID registers, where userspace may want to know upfront whether it can
actually tweak the contents of an idreg or not.
Add a new capability (KVM_CAP_ARM_SUPPORTED_FEATURE_ID_RANGES) that
returns a bitmap of the valid ranges, which can subsequently be
retrieved, one at a time by setting the index of the set bit as the
range identifier.
Suggested-by: Marc Zyngier <maz@kernel.org>
Suggested-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Jing Zhang <jingzhangos@google.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231003230408.3405722-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-04 07:03:57 +08:00
|
|
|
static inline bool is_aa32_id_reg(u32 id)
|
|
|
|
{
|
|
|
|
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
|
|
|
|
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
|
|
|
|
sys_reg_CRm(id) <= 3);
|
|
|
|
}
|
|
|
|
|
2020-11-05 17:10:21 +08:00
|
|
|
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2021-01-06 22:42:18 +08:00
|
|
|
u32 id = reg_to_encoding(r);
|
2020-11-05 17:10:22 +08:00
|
|
|
|
|
|
|
switch (id) {
|
|
|
|
case SYS_ID_AA64ZFR0_EL1:
|
|
|
|
if (!vcpu_has_sve(vcpu))
|
|
|
|
return REG_RAZ;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-11-05 17:10:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-09-13 17:44:39 +08:00
|
|
|
static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
|
|
|
|
* EL. Promote to RAZ/WI in order to guarantee consistency between
|
|
|
|
* systems.
|
|
|
|
*/
|
|
|
|
if (!kvm_supports_32bit_el0())
|
|
|
|
return REG_RAZ | REG_USER_WI;
|
|
|
|
|
|
|
|
return id_visibility(vcpu, r);
|
|
|
|
}
|
|
|
|
|
2022-09-13 17:44:34 +08:00
|
|
|
static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
return REG_RAZ;
|
|
|
|
}
|
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
/* cpufeature ID register access trap handlers */
|
|
|
|
|
|
|
|
static bool access_id_reg(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2022-09-13 17:44:35 +08:00
|
|
|
if (p->is_write)
|
|
|
|
return write_to_read_only(vcpu, p, r);
|
|
|
|
|
2022-09-13 17:44:36 +08:00
|
|
|
p->regval = read_id_reg(vcpu, r);
|
2023-02-10 01:58:19 +08:00
|
|
|
|
2022-09-13 17:44:35 +08:00
|
|
|
return true;
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
2018-09-28 21:39:16 +08:00
|
|
|
/* Visibility overrides for SVE-specific control registers */
|
|
|
|
static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
|
|
|
if (vcpu_has_sve(vcpu))
|
|
|
|
return 0;
|
|
|
|
|
2020-11-05 17:10:20 +08:00
|
|
|
return REG_HIDDEN;
|
2018-09-28 21:39:16 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:52 +08:00
|
|
|
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
2020-11-10 22:13:06 +08:00
|
|
|
{
|
2023-06-10 03:00:52 +08:00
|
|
|
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
|
|
|
|
|
|
|
if (!vcpu_has_sve(vcpu))
|
|
|
|
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
|
2020-11-10 22:13:06 +08:00
|
|
|
|
|
|
|
/*
|
2023-06-10 03:00:52 +08:00
|
|
|
* The default is to expose CSV2 == 1 if the HW isn't affected.
|
|
|
|
* Although this is a per-CPU feature, we make it global because
|
|
|
|
* asymmetric systems are just a nuisance.
|
|
|
|
*
|
|
|
|
* Userspace can override this as long as it doesn't promise
|
|
|
|
* the impossible.
|
2020-11-10 22:13:06 +08:00
|
|
|
*/
|
2023-06-10 03:00:52 +08:00
|
|
|
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
|
|
|
|
val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
|
|
|
|
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
|
|
|
|
}
|
|
|
|
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
|
|
|
|
val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
|
|
|
|
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
|
|
|
|
}
|
2020-11-10 22:13:06 +08:00
|
|
|
|
2023-06-10 03:00:52 +08:00
|
|
|
if (kvm_vgic_global_state.type == VGIC_V3) {
|
|
|
|
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
|
|
|
|
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
|
|
|
|
}
|
2020-11-27 01:27:13 +08:00
|
|
|
|
2023-06-10 03:00:52 +08:00
|
|
|
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
|
2020-11-10 22:13:06 +08:00
|
|
|
|
2023-06-10 03:00:52 +08:00
|
|
|
return val;
|
|
|
|
}
|
2020-11-10 22:13:06 +08:00
|
|
|
|
2023-10-04 07:04:00 +08:00
|
|
|
#define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
|
|
|
|
({ \
|
|
|
|
u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
|
|
|
|
(val) &= ~reg##_##field##_MASK; \
|
|
|
|
(val) |= FIELD_PREP(reg##_##field##_MASK, \
|
2024-01-23 02:13:35 +08:00
|
|
|
min(__f_val, \
|
|
|
|
(u64)SYS_FIELD_VALUE(reg, field, limit))); \
|
2023-10-04 07:04:00 +08:00
|
|
|
(val); \
|
|
|
|
})
|
|
|
|
|
2023-06-10 03:00:51 +08:00
|
|
|
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
|
|
|
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
|
|
|
|
|
2023-10-04 07:04:01 +08:00
|
|
|
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
|
2023-06-10 03:00:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only initialize the PMU version if the vCPU was configured with one.
|
|
|
|
*/
|
|
|
|
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
|
|
|
|
if (kvm_vcpu_has_pmu(vcpu))
|
|
|
|
val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
|
|
|
|
kvm_arm_pmu_get_pmuver_limit());
|
|
|
|
|
|
|
|
/* Hide SPE from guests */
|
|
|
|
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
|
|
|
|
|
|
|
|
return val;
|
2020-11-10 22:13:06 +08:00
|
|
|
}
|
|
|
|
|
2022-11-14 00:38:27 +08:00
|
|
|
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd,
|
|
|
|
u64 val)
|
|
|
|
{
|
2023-10-04 07:04:00 +08:00
|
|
|
u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
|
2023-06-10 03:00:51 +08:00
|
|
|
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
|
2022-11-14 00:38:27 +08:00
|
|
|
|
|
|
|
/*
|
2023-06-10 03:00:47 +08:00
|
|
|
* Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
|
|
|
|
* ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
|
|
|
|
* exposed an IMP_DEF PMU to userspace and the guest on systems w/
|
|
|
|
* non-architectural PMUs. Of course, PMUv3 is the only game in town for
|
|
|
|
* PMU virtualization, so the IMP_DEF value was rather user-hostile.
|
|
|
|
*
|
|
|
|
* At minimum, we're on the hook to allow values that were given to
|
|
|
|
* userspace by KVM. Cover our tracks here and replace the IMP_DEF value
|
|
|
|
* with a more sensible NI. The value of an ID register changing under
|
|
|
|
* the nose of the guest is unfortunate, but is certainly no more
|
|
|
|
* surprising than an ill-guided PMU driver poking at impdef system
|
|
|
|
* registers that end in an UNDEF...
|
2022-11-14 00:38:27 +08:00
|
|
|
*/
|
2023-06-10 03:00:54 +08:00
|
|
|
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
|
2023-06-10 03:00:47 +08:00
|
|
|
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
|
2022-11-14 00:38:27 +08:00
|
|
|
|
2023-10-04 07:04:00 +08:00
|
|
|
/*
|
|
|
|
* ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
|
|
|
|
* nonzero minimum safe value.
|
|
|
|
*/
|
|
|
|
if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-06-10 03:00:54 +08:00
|
|
|
return set_id_reg(vcpu, rd, val);
|
2023-06-10 03:00:51 +08:00
|
|
|
}
|
2022-11-14 00:38:27 +08:00
|
|
|
|
2023-06-10 03:00:51 +08:00
|
|
|
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
|
|
|
u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
|
|
|
|
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
|
2022-11-14 00:38:27 +08:00
|
|
|
|
2023-06-10 03:00:51 +08:00
|
|
|
val &= ~ID_DFR0_EL1_PerfMon_MASK;
|
|
|
|
if (kvm_vcpu_has_pmu(vcpu))
|
|
|
|
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
|
2022-11-14 00:38:27 +08:00
|
|
|
|
2023-10-04 07:04:01 +08:00
|
|
|
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
|
|
|
|
|
2023-06-10 03:00:51 +08:00
|
|
|
return val;
|
2022-11-14 00:38:27 +08:00
|
|
|
}
|
|
|
|
|
2022-11-14 00:38:28 +08:00
|
|
|
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd,
|
|
|
|
u64 val)
|
|
|
|
{
|
2023-06-10 03:00:51 +08:00
|
|
|
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
|
2023-10-04 07:04:00 +08:00
|
|
|
u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
|
2022-11-14 00:38:28 +08:00
|
|
|
|
2023-06-10 03:00:47 +08:00
|
|
|
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
|
|
|
|
val &= ~ID_DFR0_EL1_PerfMon_MASK;
|
|
|
|
perfmon = 0;
|
|
|
|
}
|
2022-11-14 00:38:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
|
|
|
|
* it doesn't promise more than what the HW gives us on the
|
|
|
|
* AArch64 side (as everything is emulated with that), and
|
|
|
|
* that this is a PMUv3.
|
|
|
|
*/
|
2023-06-10 03:00:51 +08:00
|
|
|
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
|
2022-11-14 00:38:28 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2023-10-04 07:04:00 +08:00
|
|
|
if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-06-10 03:00:54 +08:00
|
|
|
return set_id_reg(vcpu, rd, val);
|
2022-11-14 00:38:28 +08:00
|
|
|
}
|
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
/*
|
|
|
|
* cpufeature ID register user accessors
|
|
|
|
*
|
|
|
|
* For now, these registers are immutable for userspace, so no values
|
|
|
|
* are stored, and for set_id_reg() we don't allow the effective value
|
|
|
|
* to be changed.
|
|
|
|
*/
|
|
|
|
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 *val)
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
{
|
2023-06-10 03:00:53 +08:00
|
|
|
/*
|
|
|
|
* Avoid locking if the VM has already started, as the ID registers are
|
|
|
|
* guaranteed to be invariant at that point.
|
|
|
|
*/
|
|
|
|
if (kvm_vm_has_ran_once(vcpu->kvm)) {
|
|
|
|
*val = read_id_reg(vcpu, rd);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&vcpu->kvm->arch.config_lock);
|
2022-09-13 17:44:36 +08:00
|
|
|
*val = read_id_reg(vcpu, rd);
|
2023-06-10 03:00:53 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
|
|
|
|
2022-09-13 17:44:35 +08:00
|
|
|
return 0;
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
{
|
2023-06-10 03:00:50 +08:00
|
|
|
u32 id = reg_to_encoding(rd);
|
|
|
|
int ret;
|
2022-09-13 17:44:35 +08:00
|
|
|
|
2023-06-10 03:00:50 +08:00
|
|
|
mutex_lock(&vcpu->kvm->arch.config_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once the VM has started the ID registers are immutable. Reject any
|
|
|
|
* write that does not match the final register value.
|
|
|
|
*/
|
|
|
|
if (kvm_vm_has_ran_once(vcpu->kvm)) {
|
|
|
|
if (val != read_id_reg(vcpu, rd))
|
|
|
|
ret = -EBUSY;
|
|
|
|
else
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = arm64_check_features(vcpu, rd, val);
|
|
|
|
if (!ret)
|
2024-06-20 01:40:30 +08:00
|
|
|
kvm_set_vm_id_reg(vcpu->kvm, id, val);
|
2023-06-10 03:00:50 +08:00
|
|
|
|
|
|
|
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* arm64_check_features() returns -E2BIG to indicate the register's
|
|
|
|
* feature set is a superset of the maximally-allowed register value.
|
|
|
|
* While it would be nice to precisely describe this to userspace, the
|
|
|
|
* existing UAPI for KVM_SET_ONE_REG has it that invalid register
|
|
|
|
* writes return -EINVAL.
|
|
|
|
*/
|
|
|
|
if (ret == -E2BIG)
|
|
|
|
ret = -EINVAL;
|
|
|
|
return ret;
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
2024-06-20 01:40:30 +08:00
|
|
|
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
|
|
|
|
{
|
|
|
|
u64 *p = __vm_id_reg(&kvm->arch, reg);
|
|
|
|
|
|
|
|
lockdep_assert_held(&kvm->arch.config_lock);
|
|
|
|
|
|
|
|
if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
|
|
|
|
return;
|
|
|
|
|
|
|
|
*p = val;
|
|
|
|
}
|
|
|
|
|
KVM: arm64: Use get_raz_reg() for userspace reads of PMSWINC_EL0
PMSWINC_EL0 is a write-only register and was initially part of the VCPU
register state, but was later removed in commit 7a3ba3095a32 ("KVM:
arm64: Remove PMSWINC_EL0 shadow register"). To prevent regressions, the
register was kept accessible from userspace as Read-As-Zero (RAZ).
The read function that is used to handle userspace reads of this
register is get_raz_id_reg(), which, while technically correct, as it
returns 0, it is not semantically correct, as PMSWINC_EL0 is not an ID
register as the function name suggests.
Add a new function, get_raz_reg(), to use it as the accessor for
PMSWINC_EL0, as to not conflate get_raz_id_reg() to handle other types
of registers.
No functional change intended.
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211011105840.155815-3-alexandru.elisei@arm.com
2021-10-11 18:58:39 +08:00
|
|
|
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 *val)
|
KVM: arm64: Use get_raz_reg() for userspace reads of PMSWINC_EL0
PMSWINC_EL0 is a write-only register and was initially part of the VCPU
register state, but was later removed in commit 7a3ba3095a32 ("KVM:
arm64: Remove PMSWINC_EL0 shadow register"). To prevent regressions, the
register was kept accessible from userspace as Read-As-Zero (RAZ).
The read function that is used to handle userspace reads of this
register is get_raz_id_reg(), which, while technically correct, as it
returns 0, it is not semantically correct, as PMSWINC_EL0 is not an ID
register as the function name suggests.
Add a new function, get_raz_reg(), to use it as the accessor for
PMSWINC_EL0, as to not conflate get_raz_id_reg() to handle other types
of registers.
No functional change intended.
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211011105840.155815-3-alexandru.elisei@arm.com
2021-10-11 18:58:39 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
*val = 0;
|
|
|
|
return 0;
|
KVM: arm64: Use get_raz_reg() for userspace reads of PMSWINC_EL0
PMSWINC_EL0 is a write-only register and was initially part of the VCPU
register state, but was later removed in commit 7a3ba3095a32 ("KVM:
arm64: Remove PMSWINC_EL0 shadow register"). To prevent regressions, the
register was kept accessible from userspace as Read-As-Zero (RAZ).
The read function that is used to handle userspace reads of this
register is get_raz_id_reg(), which, while technically correct, as it
returns 0, it is not semantically correct, as PMSWINC_EL0 is not an ID
register as the function name suggests.
Add a new function, get_raz_reg(), to use it as the accessor for
PMSWINC_EL0, as to not conflate get_raz_id_reg() to handle other types
of registers.
No functional change intended.
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211011105840.155815-3-alexandru.elisei@arm.com
2021-10-11 18:58:39 +08:00
|
|
|
}
|
|
|
|
|
2021-07-19 20:39:02 +08:00
|
|
|
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val)
|
2021-07-19 20:39:02 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-31 21:17:17 +08:00
|
|
|
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
return write_to_read_only(vcpu, p, r);
|
|
|
|
|
2024-06-20 01:40:33 +08:00
|
|
|
p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
|
2019-01-31 21:17:17 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
return write_to_read_only(vcpu, p, r);
|
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
p->regval = __vcpu_sys_reg(vcpu, r->reg);
|
2019-01-31 21:17:17 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
/*
|
|
|
|
* Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
|
|
|
|
* by the physical CPU which the vcpu currently resides in.
|
|
|
|
*/
|
2023-06-10 03:00:48 +08:00
|
|
|
static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
2023-01-12 10:38:52 +08:00
|
|
|
{
|
|
|
|
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
|
|
|
u64 clidr;
|
|
|
|
u8 loc;
|
|
|
|
|
|
|
|
if ((ctr_el0 & CTR_EL0_IDC)) {
|
|
|
|
/*
|
|
|
|
* Data cache clean to the PoU is not required so LoUU and LoUIS
|
|
|
|
* will not be set and a unified cache, which will be marked as
|
|
|
|
* LoC, will be added.
|
|
|
|
*
|
|
|
|
* If not DIC, let the unified cache L2 so that an instruction
|
|
|
|
* cache can be added as L1 later.
|
|
|
|
*/
|
|
|
|
loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
|
|
|
|
clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Data cache clean to the PoU is required so let L1 have a data
|
|
|
|
* cache and mark it as LoUU and LoUIS. As L1 has a data cache,
|
|
|
|
* it can be marked as LoC too.
|
|
|
|
*/
|
|
|
|
loc = 1;
|
|
|
|
clidr = 1 << CLIDR_LOUU_SHIFT;
|
|
|
|
clidr |= 1 << CLIDR_LOUIS_SHIFT;
|
|
|
|
clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Instruction cache invalidation to the PoU is required so let L1 have
|
|
|
|
* an instruction cache. If L1 already has a data cache, it will be
|
|
|
|
* CACHE_TYPE_SEPARATE.
|
|
|
|
*/
|
|
|
|
if (!(ctr_el0 & CTR_EL0_DIC))
|
|
|
|
clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
|
|
|
|
|
|
|
|
clidr |= loc << CLIDR_LOC_SHIFT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add tag cache unified to data cache. Allocation tags and data are
|
|
|
|
* unified in a cache line so that it looks valid even if there is only
|
|
|
|
* one cache line.
|
|
|
|
*/
|
|
|
|
if (kvm_has_mte(vcpu->kvm))
|
|
|
|
clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
|
|
|
|
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) = clidr;
|
2023-06-10 03:00:48 +08:00
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg);
|
2023-01-12 10:38:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|
|
|
u64 val)
|
|
|
|
{
|
|
|
|
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
|
|
|
u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
|
|
|
|
|
|
|
|
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
__vcpu_sys_reg(vcpu, rd->reg) = val;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-31 21:17:17 +08:00
|
|
|
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2020-05-29 23:06:54 +08:00
|
|
|
int reg = r->reg;
|
|
|
|
|
2019-01-31 21:17:17 +08:00
|
|
|
if (p->is_write)
|
2020-05-29 23:06:54 +08:00
|
|
|
vcpu_write_sys_reg(vcpu, p->regval, reg);
|
2019-01-31 21:17:17 +08:00
|
|
|
else
|
2020-05-29 23:06:54 +08:00
|
|
|
p->regval = vcpu_read_sys_reg(vcpu, reg);
|
2019-01-31 21:17:17 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u32 csselr;
|
|
|
|
|
|
|
|
if (p->is_write)
|
|
|
|
return write_to_read_only(vcpu, p, r);
|
|
|
|
|
|
|
|
csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
|
2023-01-12 10:38:52 +08:00
|
|
|
csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
|
|
|
|
if (csselr < CSSELR_MAX)
|
|
|
|
p->regval = get_ccsidr(vcpu, csselr);
|
2019-01-31 21:17:18 +08:00
|
|
|
|
2019-01-31 21:17:17 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-06-21 19:17:13 +08:00
|
|
|
static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
2021-06-21 19:17:14 +08:00
|
|
|
if (kvm_has_mte(vcpu->kvm))
|
|
|
|
return 0;
|
|
|
|
|
2021-06-21 19:17:13 +08:00
|
|
|
return REG_HIDDEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MTE_REG(name) { \
|
|
|
|
SYS_DESC(SYS_##name), \
|
|
|
|
.access = undef_access, \
|
|
|
|
.reset = reset_unknown, \
|
|
|
|
.reg = name, \
|
|
|
|
.visibility = mte_visibility, \
|
|
|
|
}
|
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
|
|
|
if (vcpu_has_nv(vcpu))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return REG_HIDDEN;
|
|
|
|
}
|
|
|
|
|
2023-11-07 17:02:10 +08:00
|
|
|
static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We really shouldn't be here, and this is likely the result
|
|
|
|
* of a misconfigured trap, as this register should target the
|
|
|
|
* VNCR page, and nothing else.
|
|
|
|
*/
|
|
|
|
return bad_trap(vcpu, p, r,
|
|
|
|
"trap of VNCR-backed register");
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We really shouldn't be here, and this is likely the result
|
|
|
|
* of a misconfigured trap, as this register should target the
|
|
|
|
* corresponding EL1, and nothing else.
|
|
|
|
*/
|
|
|
|
return bad_trap(vcpu, p, r,
|
|
|
|
"trap of EL2 register redirected to EL1");
|
|
|
|
}
|
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
#define EL2_REG(name, acc, rst, v) { \
|
|
|
|
SYS_DESC(SYS_##name), \
|
|
|
|
.access = acc, \
|
|
|
|
.reset = rst, \
|
|
|
|
.reg = name, \
|
|
|
|
.visibility = el2_visibility, \
|
|
|
|
.val = v, \
|
|
|
|
}
|
|
|
|
|
2023-11-07 17:02:10 +08:00
|
|
|
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
|
|
|
|
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
|
|
|
|
|
2023-02-10 01:58:18 +08:00
|
|
|
/*
|
|
|
|
* EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
|
|
|
|
* HCR_EL2.E2H==1, and only in the sysreg table for convenience of
|
|
|
|
* handling traps. Given that, they are always hidden from userspace.
|
|
|
|
*/
|
2023-10-23 17:54:44 +08:00
|
|
|
static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
2023-02-10 01:58:18 +08:00
|
|
|
{
|
|
|
|
return REG_HIDDEN_USER;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define EL12_REG(name, acc, rst, v) { \
|
|
|
|
SYS_DESC(SYS_##name##_EL12), \
|
|
|
|
.access = acc, \
|
|
|
|
.reset = rst, \
|
|
|
|
.reg = name##_EL1, \
|
|
|
|
.val = v, \
|
2023-10-23 17:54:44 +08:00
|
|
|
.visibility = hidden_user_visibility, \
|
2023-02-10 01:58:18 +08:00
|
|
|
}
|
|
|
|
|
2023-06-10 03:00:48 +08:00
|
|
|
/*
|
|
|
|
* Since reset() callback and field val are not used for idregs, they will be
|
|
|
|
* used for specific purposes for idregs.
|
|
|
|
* The reset() would return KVM sanitised register value. The value would be the
|
|
|
|
* same as the host kernel sanitised value if there is no KVM sanitisation.
|
|
|
|
* The val would be used as a mask indicating writable fields for the idreg.
|
|
|
|
* Only bits with 1 are writable from userspace. This mask might not be
|
|
|
|
* necessary in the future whenever all ID registers are enabled as writable
|
|
|
|
* from userspace.
|
|
|
|
*/
|
|
|
|
|
2023-10-04 07:04:02 +08:00
|
|
|
#define ID_DESC(name) \
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
SYS_DESC(SYS_##name), \
|
|
|
|
.access = access_id_reg, \
|
2023-10-04 07:04:02 +08:00
|
|
|
.get_user = get_id_reg \
|
|
|
|
|
|
|
|
/* sys_reg_desc initialiser for known cpufeature ID registers */
|
|
|
|
#define ID_SANITISED(name) { \
|
|
|
|
ID_DESC(name), \
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
.set_user = set_id_reg, \
|
2020-11-05 17:10:21 +08:00
|
|
|
.visibility = id_visibility, \
|
2023-06-10 03:00:48 +08:00
|
|
|
.reset = kvm_read_sanitised_id_reg, \
|
|
|
|
.val = 0, \
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
2022-09-13 17:44:39 +08:00
|
|
|
/* sys_reg_desc initialiser for known cpufeature ID registers */
|
|
|
|
#define AA32_ID_SANITISED(name) { \
|
2023-10-04 07:04:02 +08:00
|
|
|
ID_DESC(name), \
|
2022-09-13 17:44:39 +08:00
|
|
|
.set_user = set_id_reg, \
|
|
|
|
.visibility = aa32_id_visibility, \
|
2023-06-10 03:00:48 +08:00
|
|
|
.reset = kvm_read_sanitised_id_reg, \
|
|
|
|
.val = 0, \
|
2022-09-13 17:44:39 +08:00
|
|
|
}
|
|
|
|
|
2023-10-04 07:04:02 +08:00
|
|
|
/* sys_reg_desc initialiser for writable ID registers */
|
|
|
|
#define ID_WRITABLE(name, mask) { \
|
|
|
|
ID_DESC(name), \
|
|
|
|
.set_user = set_id_reg, \
|
|
|
|
.visibility = id_visibility, \
|
|
|
|
.reset = kvm_read_sanitised_id_reg, \
|
|
|
|
.val = mask, \
|
|
|
|
}
|
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
/*
|
|
|
|
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
|
|
|
|
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
|
|
|
|
* (1 <= crm < 8, 0 <= Op2 < 8).
|
|
|
|
*/
|
|
|
|
#define ID_UNALLOCATED(crm, op2) { \
|
|
|
|
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
|
2022-09-13 17:44:34 +08:00
|
|
|
.access = access_id_reg, \
|
|
|
|
.get_user = get_id_reg, \
|
|
|
|
.set_user = set_id_reg, \
|
2023-06-10 03:00:48 +08:00
|
|
|
.visibility = raz_visibility, \
|
|
|
|
.reset = kvm_read_sanitised_id_reg, \
|
|
|
|
.val = 0, \
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_reg_desc initialiser for known ID registers that we hide from guests.
|
|
|
|
* For now, these are exposed just like unallocated ID regs: they appear
|
|
|
|
* RAZ for the guest.
|
|
|
|
*/
|
|
|
|
#define ID_HIDDEN(name) { \
|
2023-10-04 07:04:02 +08:00
|
|
|
ID_DESC(name), \
|
2022-09-13 17:44:34 +08:00
|
|
|
.set_user = set_id_reg, \
|
|
|
|
.visibility = raz_visibility, \
|
2023-06-10 03:00:48 +08:00
|
|
|
.reset = kvm_read_sanitised_id_reg, \
|
|
|
|
.val = 0, \
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
}
|
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
static bool access_sp_el1(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
__vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
|
|
|
|
else
|
|
|
|
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-02-10 01:58:15 +08:00
|
|
|
static bool access_elr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
|
|
|
|
else
|
|
|
|
p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool access_spsr(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
|
|
|
|
else
|
|
|
|
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-01-23 02:13:43 +08:00
|
|
|
static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u64 val = r->val;
|
|
|
|
|
|
|
|
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
|
|
|
|
val |= HCR_E2H;
|
|
|
|
|
|
|
|
return __vcpu_sys_reg(vcpu, r->reg) = val;
|
|
|
|
}
|
|
|
|
|
2024-06-21 00:46:40 +08:00
|
|
|
static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd)
|
|
|
|
{
|
|
|
|
unsigned int r;
|
|
|
|
|
|
|
|
r = el2_visibility(vcpu, rd);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
return sve_visibility(vcpu, rd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
unsigned int vq;
|
|
|
|
|
|
|
|
if (guest_hyp_sve_traps_enabled(vcpu)) {
|
|
|
|
kvm_inject_nested_sve_trap(vcpu);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!p->is_write) {
|
|
|
|
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
|
|
|
|
vq = min(vq, vcpu_sve_max_vq(vcpu));
|
|
|
|
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/*
|
|
|
|
* Architected system registers.
|
|
|
|
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
2014-04-24 17:21:16 +08:00
|
|
|
*
|
2014-04-24 17:24:46 +08:00
|
|
|
* Debug handling: We do trap most, if not all debug related system
|
|
|
|
* registers. The implementation is good enough to ensure that a guest
|
|
|
|
* can use these with minimal performance degradation. The drawback is
|
2022-02-04 01:41:57 +08:00
|
|
|
* that we don't implement any of the external debug architecture.
|
|
|
|
* This should be revisited if we ever encounter a more demanding
|
|
|
|
* guest...
|
2012-12-11 00:15:34 +08:00
|
|
|
*/
|
|
|
|
static const struct sys_reg_desc sys_reg_descs[] = {
|
2014-04-24 17:24:46 +08:00
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(0),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(1),
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
|
|
|
|
{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
|
2014-04-24 17:24:46 +08:00
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(2),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(3),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(4),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(5),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(6),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(7),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(8),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(9),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(10),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(11),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(12),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(13),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(14),
|
|
|
|
DBG_BCR_BVR_WCR_WVR_EL1(15),
|
|
|
|
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
|
2022-02-04 01:41:56 +08:00
|
|
|
{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
|
2022-02-04 01:41:55 +08:00
|
|
|
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
|
2023-05-24 02:37:01 +08:00
|
|
|
OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
|
2017-01-14 01:19:12 +08:00
|
|
|
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
|
|
|
|
|
|
|
|
{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
|
|
|
|
// DBGDTR[TR]X_EL0 share the same encoding
|
|
|
|
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
|
|
|
|
|
2023-10-23 17:54:43 +08:00
|
|
|
{ SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 },
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ID regs: all ID_SANITISED() entries here must have corresponding
|
|
|
|
* entries in arm64_ftr_regs[].
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* AArch64 mappings of the AArch32 ID registers */
|
|
|
|
/* CRm=1 */
|
2022-09-13 17:44:39 +08:00
|
|
|
AA32_ID_SANITISED(ID_PFR0_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_PFR1_EL1),
|
2023-06-10 03:00:51 +08:00
|
|
|
{ SYS_DESC(SYS_ID_DFR0_EL1),
|
|
|
|
.access = access_id_reg,
|
|
|
|
.get_user = get_id_reg,
|
|
|
|
.set_user = set_id_dfr0_el1,
|
|
|
|
.visibility = aa32_id_visibility,
|
|
|
|
.reset = read_sanitised_id_dfr0_el1,
|
2023-10-04 07:04:01 +08:00
|
|
|
.val = ID_DFR0_EL1_PerfMon_MASK |
|
|
|
|
ID_DFR0_EL1_CopDbg_MASK, },
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_HIDDEN(ID_AFR0_EL1),
|
2022-09-13 17:44:39 +08:00
|
|
|
AA32_ID_SANITISED(ID_MMFR0_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_MMFR1_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_MMFR2_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_MMFR3_EL1),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
|
|
|
|
/* CRm=2 */
|
2022-09-13 17:44:39 +08:00
|
|
|
AA32_ID_SANITISED(ID_ISAR0_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_ISAR1_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_ISAR2_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_ISAR3_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_ISAR4_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_ISAR5_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_MMFR4_EL1),
|
|
|
|
AA32_ID_SANITISED(ID_ISAR6_EL1),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
|
|
|
|
/* CRm=3 */
|
2022-09-13 17:44:39 +08:00
|
|
|
AA32_ID_SANITISED(MVFR0_EL1),
|
|
|
|
AA32_ID_SANITISED(MVFR1_EL1),
|
|
|
|
AA32_ID_SANITISED(MVFR2_EL1),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_UNALLOCATED(3,3),
|
2022-09-13 17:44:39 +08:00
|
|
|
AA32_ID_SANITISED(ID_PFR2_EL1),
|
2020-05-19 17:40:42 +08:00
|
|
|
ID_HIDDEN(ID_DFR1_EL1),
|
2022-09-13 17:44:39 +08:00
|
|
|
AA32_ID_SANITISED(ID_MMFR5_EL1),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_UNALLOCATED(3,7),
|
|
|
|
|
|
|
|
/* AArch64 ID registers */
|
|
|
|
/* CRm=4 */
|
2023-06-10 03:00:52 +08:00
|
|
|
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
|
|
|
|
.access = access_id_reg,
|
|
|
|
.get_user = get_id_reg,
|
2023-06-10 03:00:54 +08:00
|
|
|
.set_user = set_id_reg,
|
2023-06-10 03:00:52 +08:00
|
|
|
.reset = read_sanitised_id_aa64pfr0_el1,
|
2023-10-04 07:04:04 +08:00
|
|
|
.val = ~(ID_AA64PFR0_EL1_AMU |
|
|
|
|
ID_AA64PFR0_EL1_MPAM |
|
|
|
|
ID_AA64PFR0_EL1_SVE |
|
|
|
|
ID_AA64PFR0_EL1_RAS |
|
|
|
|
ID_AA64PFR0_EL1_AdvSIMD |
|
|
|
|
ID_AA64PFR0_EL1_FP), },
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_SANITISED(ID_AA64PFR1_EL1),
|
|
|
|
ID_UNALLOCATED(4,2),
|
|
|
|
ID_UNALLOCATED(4,3),
|
2023-10-04 07:04:05 +08:00
|
|
|
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
|
2022-04-19 19:22:32 +08:00
|
|
|
ID_HIDDEN(ID_AA64SMFR0_EL1),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_UNALLOCATED(4,6),
|
|
|
|
ID_UNALLOCATED(4,7),
|
|
|
|
|
|
|
|
/* CRm=5 */
|
2023-06-10 03:00:51 +08:00
|
|
|
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
|
|
|
|
.access = access_id_reg,
|
|
|
|
.get_user = get_id_reg,
|
|
|
|
.set_user = set_id_aa64dfr0_el1,
|
|
|
|
.reset = read_sanitised_id_aa64dfr0_el1,
|
2023-10-04 07:04:01 +08:00
|
|
|
.val = ID_AA64DFR0_EL1_PMUVer_MASK |
|
|
|
|
ID_AA64DFR0_EL1_DebugVer_MASK, },
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_SANITISED(ID_AA64DFR1_EL1),
|
|
|
|
ID_UNALLOCATED(5,2),
|
|
|
|
ID_UNALLOCATED(5,3),
|
|
|
|
ID_HIDDEN(ID_AA64AFR0_EL1),
|
|
|
|
ID_HIDDEN(ID_AA64AFR1_EL1),
|
|
|
|
ID_UNALLOCATED(5,6),
|
|
|
|
ID_UNALLOCATED(5,7),
|
|
|
|
|
|
|
|
/* CRm=6 */
|
2023-10-04 07:04:02 +08:00
|
|
|
ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
|
|
|
|
ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
|
|
|
|
ID_AA64ISAR1_EL1_GPA |
|
|
|
|
ID_AA64ISAR1_EL1_API |
|
|
|
|
ID_AA64ISAR1_EL1_APA)),
|
|
|
|
ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
|
|
|
|
ID_AA64ISAR2_EL1_APA3 |
|
|
|
|
ID_AA64ISAR2_EL1_GPA3)),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_UNALLOCATED(6,3),
|
|
|
|
ID_UNALLOCATED(6,4),
|
|
|
|
ID_UNALLOCATED(6,5),
|
|
|
|
ID_UNALLOCATED(6,6),
|
|
|
|
ID_UNALLOCATED(6,7),
|
|
|
|
|
|
|
|
/* CRm=7 */
|
2023-10-04 07:04:03 +08:00
|
|
|
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
|
|
|
|
ID_AA64MMFR0_EL1_TGRAN4_2 |
|
|
|
|
ID_AA64MMFR0_EL1_TGRAN64_2 |
|
|
|
|
ID_AA64MMFR0_EL1_TGRAN16_2)),
|
|
|
|
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
|
|
|
|
ID_AA64MMFR1_EL1_HCX |
|
|
|
|
ID_AA64MMFR1_EL1_TWED |
|
|
|
|
ID_AA64MMFR1_EL1_XNX |
|
|
|
|
ID_AA64MMFR1_EL1_VH |
|
|
|
|
ID_AA64MMFR1_EL1_VMIDBits)),
|
|
|
|
ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
|
|
|
|
ID_AA64MMFR2_EL1_EVT |
|
|
|
|
ID_AA64MMFR2_EL1_FWB |
|
|
|
|
ID_AA64MMFR2_EL1_IDS |
|
|
|
|
ID_AA64MMFR2_EL1_NV |
|
|
|
|
ID_AA64MMFR2_EL1_CCIDX)),
|
2023-06-06 22:58:49 +08:00
|
|
|
ID_SANITISED(ID_AA64MMFR3_EL1),
|
2024-01-23 02:13:42 +08:00
|
|
|
ID_SANITISED(ID_AA64MMFR4_EL1),
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
ID_UNALLOCATED(7,5),
|
|
|
|
ID_UNALLOCATED(7,6),
|
|
|
|
ID_UNALLOCATED(7,7),
|
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
|
2020-06-22 19:33:15 +08:00
|
|
|
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
|
2020-08-27 01:52:59 +08:00
|
|
|
|
2021-06-21 19:17:13 +08:00
|
|
|
MTE_REG(RGSR_EL1),
|
|
|
|
MTE_REG(GCR_EL1),
|
2020-08-27 01:52:59 +08:00
|
|
|
|
2018-09-28 21:39:16 +08:00
|
|
|
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
|
2021-04-06 00:42:52 +08:00
|
|
|
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
|
2022-04-19 19:22:32 +08:00
|
|
|
{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_SMCR_EL1), undef_access },
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
|
|
|
|
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
|
|
|
|
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
|
2023-06-06 22:58:47 +08:00
|
|
|
{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
|
2017-01-20 02:39:39 +08:00
|
|
|
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 12:42:35 +08:00
|
|
|
PTRAUTH_KEY(APIA),
|
|
|
|
PTRAUTH_KEY(APIB),
|
|
|
|
PTRAUTH_KEY(APDA),
|
|
|
|
PTRAUTH_KEY(APDB),
|
|
|
|
PTRAUTH_KEY(APGA),
|
|
|
|
|
2023-02-10 01:58:15 +08:00
|
|
|
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
|
|
|
|
{ SYS_DESC(SYS_ELR_EL1), access_elr},
|
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
|
|
|
|
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
|
|
|
|
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
|
2018-01-16 03:39:06 +08:00
|
|
|
|
|
|
|
{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
|
|
|
|
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
|
|
|
|
|
2021-06-21 19:17:13 +08:00
|
|
|
MTE_REG(TFSR_EL1),
|
|
|
|
MTE_REG(TFSRE0_EL1),
|
2020-08-27 01:52:59 +08:00
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
|
|
|
|
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2021-04-09 23:21:53 +08:00
|
|
|
{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
|
|
|
|
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
|
|
|
|
/* PMBIDR_EL1 is not trapped */
|
|
|
|
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMINTENSET_EL1),
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
.access = access_pminten, .reg = PMINTENSET_EL1,
|
|
|
|
.get_user = get_pmreg, .set_user = set_pmreg },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMINTENCLR_EL1),
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
.access = access_pminten, .reg = PMINTENSET_EL1,
|
|
|
|
.get_user = get_pmreg, .set_user = set_pmreg },
|
2020-02-17 02:17:22 +08:00
|
|
|
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
2023-10-12 20:34:59 +08:00
|
|
|
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
|
|
|
|
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2018-11-05 23:30:26 +08:00
|
|
|
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
|
|
|
|
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
|
|
|
|
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
|
|
|
|
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
|
|
|
|
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
|
arm64/kvm: Prohibit guest LOR accesses
We don't currently limit guest accesses to the LOR registers, which we
neither virtualize nor context-switch. As such, guests are provided with
unusable information/controls, and are not isolated from each other (or
the host).
To prevent these issues, we can trap register accesses and present the
illusion LORegions are unssupported by the CPU. To do this, we mask
ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following
registers:
* LORC_EL1
* LOREA_EL1
* LORID_EL1
* LORN_EL1
* LORSA_EL1
... when trapped, we inject an UNDEFINED exception to EL1, simulating
their non-existence.
As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire
and StoreLORelease must behave as LoadAcquire and StoreRelease
respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's
EL2 is first initialized, as the host kernel will not modify this.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-02-13 21:39:23 +08:00
|
|
|
|
2023-02-10 01:58:15 +08:00
|
|
|
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
|
2018-01-16 03:39:02 +08:00
|
|
|
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
|
2014-11-19 19:23:54 +08:00
|
|
|
|
2017-06-09 19:49:56 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
|
2017-06-09 19:49:55 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
|
2017-06-09 19:49:56 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
|
2017-06-09 19:49:55 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
|
2017-06-09 19:49:56 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
|
2017-01-21 02:02:39 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
|
2018-08-06 20:03:36 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
|
|
|
|
{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
|
2017-06-09 19:49:56 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
|
2017-06-09 19:49:55 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
|
2017-06-09 19:49:56 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
|
2017-01-21 02:02:39 +08:00
|
|
|
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
|
2014-11-19 19:23:54 +08:00
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
|
|
|
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-08-16 02:38:44 +08:00
|
|
|
{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
|
|
|
|
|
2020-11-10 22:13:08 +08:00
|
|
|
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
|
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2019-01-31 21:17:17 +08:00
|
|
|
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
|
2023-01-12 10:38:52 +08:00
|
|
|
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
|
2024-06-20 01:40:34 +08:00
|
|
|
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
|
2023-01-12 10:38:51 +08:00
|
|
|
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
|
2022-04-19 19:22:32 +08:00
|
|
|
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
|
2019-01-31 21:17:17 +08:00
|
|
|
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
2024-06-20 01:40:33 +08:00
|
|
|
ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK |
|
|
|
|
CTR_EL0_IDC_MASK |
|
|
|
|
CTR_EL0_DminLine_MASK |
|
|
|
|
CTR_EL0_IminLine_MASK),
|
2022-05-11 00:12:01 +08:00
|
|
|
{ SYS_DESC(SYS_SVCR), undef_access },
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2023-10-21 05:40:47 +08:00
|
|
|
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
|
|
|
|
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMCNTENSET_EL0),
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
|
|
|
|
.get_user = get_pmreg, .set_user = set_pmreg },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMCNTENCLR_EL0),
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
|
|
|
|
.get_user = get_pmreg, .set_user = set_pmreg },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMOVSCLR_EL0),
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
.access = access_pmovs, .reg = PMOVSSET_EL0,
|
|
|
|
.get_user = get_pmreg, .set_user = set_pmreg },
|
2021-07-19 20:39:02 +08:00
|
|
|
/*
|
|
|
|
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
|
|
|
|
* previously (and pointlessly) advertised in the past...
|
|
|
|
*/
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMSWINC_EL0),
|
KVM: arm64: Use get_raz_reg() for userspace reads of PMSWINC_EL0
PMSWINC_EL0 is a write-only register and was initially part of the VCPU
register state, but was later removed in commit 7a3ba3095a32 ("KVM:
arm64: Remove PMSWINC_EL0 shadow register"). To prevent regressions, the
register was kept accessible from userspace as Read-As-Zero (RAZ).
The read function that is used to handle userspace reads of this
register is get_raz_id_reg(), which, while technically correct, as it
returns 0, it is not semantically correct, as PMSWINC_EL0 is not an ID
register as the function name suggests.
Add a new function, get_raz_reg(), to use it as the accessor for
PMSWINC_EL0, as to not conflate get_raz_id_reg() to handle other types
of registers.
No functional change intended.
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211011105840.155815-3-alexandru.elisei@arm.com
2021-10-11 18:58:39 +08:00
|
|
|
.get_user = get_raz_reg, .set_user = set_wi_reg,
|
2021-07-19 20:39:02 +08:00
|
|
|
.access = access_pmswinc, .reset = NULL },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMSELR_EL0),
|
2021-07-19 20:38:59 +08:00
|
|
|
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMCEID0_EL0),
|
2021-01-07 01:22:27 +08:00
|
|
|
.access = access_pmceid, .reset = NULL },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMCEID1_EL0),
|
2021-01-07 01:22:27 +08:00
|
|
|
.access = access_pmceid, .reset = NULL },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMCCNTR_EL0),
|
2023-03-13 11:32:08 +08:00
|
|
|
.access = access_pmu_evcntr, .reset = reset_unknown,
|
|
|
|
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMXEVTYPER_EL0),
|
2021-01-07 01:22:27 +08:00
|
|
|
.access = access_pmu_evtyper, .reset = NULL },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMXEVCNTR_EL0),
|
2021-01-07 01:22:27 +08:00
|
|
|
.access = access_pmu_evcntr, .reset = NULL },
|
2017-01-21 01:43:20 +08:00
|
|
|
/*
|
|
|
|
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
|
2015-09-08 15:15:56 +08:00
|
|
|
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
|
|
|
*/
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
|
2021-01-07 01:22:27 +08:00
|
|
|
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMOVSSET_EL0),
|
KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Co-developed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com
[Oliver: drop unnecessary locking]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-21 05:40:45 +08:00
|
|
|
.access = access_pmovs, .reg = PMOVSSET_EL0,
|
|
|
|
.get_user = get_pmreg, .set_user = set_pmreg },
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2017-01-20 02:39:39 +08:00
|
|
|
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
|
|
|
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
2022-04-19 19:22:32 +08:00
|
|
|
{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
|
2020-03-05 17:06:23 +08:00
|
|
|
|
2020-11-10 22:13:08 +08:00
|
|
|
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
|
|
|
|
|
2020-11-10 22:13:07 +08:00
|
|
|
{ SYS_DESC(SYS_AMCR_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
|
|
|
|
{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
|
2020-03-05 17:06:23 +08:00
|
|
|
AMU_AMEVCNTR0_EL0(0),
|
|
|
|
AMU_AMEVCNTR0_EL0(1),
|
|
|
|
AMU_AMEVCNTR0_EL0(2),
|
|
|
|
AMU_AMEVCNTR0_EL0(3),
|
|
|
|
AMU_AMEVCNTR0_EL0(4),
|
|
|
|
AMU_AMEVCNTR0_EL0(5),
|
|
|
|
AMU_AMEVCNTR0_EL0(6),
|
|
|
|
AMU_AMEVCNTR0_EL0(7),
|
|
|
|
AMU_AMEVCNTR0_EL0(8),
|
|
|
|
AMU_AMEVCNTR0_EL0(9),
|
|
|
|
AMU_AMEVCNTR0_EL0(10),
|
|
|
|
AMU_AMEVCNTR0_EL0(11),
|
|
|
|
AMU_AMEVCNTR0_EL0(12),
|
|
|
|
AMU_AMEVCNTR0_EL0(13),
|
|
|
|
AMU_AMEVCNTR0_EL0(14),
|
|
|
|
AMU_AMEVCNTR0_EL0(15),
|
2020-07-21 17:12:59 +08:00
|
|
|
AMU_AMEVTYPER0_EL0(0),
|
|
|
|
AMU_AMEVTYPER0_EL0(1),
|
|
|
|
AMU_AMEVTYPER0_EL0(2),
|
|
|
|
AMU_AMEVTYPER0_EL0(3),
|
|
|
|
AMU_AMEVTYPER0_EL0(4),
|
|
|
|
AMU_AMEVTYPER0_EL0(5),
|
|
|
|
AMU_AMEVTYPER0_EL0(6),
|
|
|
|
AMU_AMEVTYPER0_EL0(7),
|
|
|
|
AMU_AMEVTYPER0_EL0(8),
|
|
|
|
AMU_AMEVTYPER0_EL0(9),
|
|
|
|
AMU_AMEVTYPER0_EL0(10),
|
|
|
|
AMU_AMEVTYPER0_EL0(11),
|
|
|
|
AMU_AMEVTYPER0_EL0(12),
|
|
|
|
AMU_AMEVTYPER0_EL0(13),
|
|
|
|
AMU_AMEVTYPER0_EL0(14),
|
|
|
|
AMU_AMEVTYPER0_EL0(15),
|
2020-03-05 17:06:23 +08:00
|
|
|
AMU_AMEVCNTR1_EL0(0),
|
|
|
|
AMU_AMEVCNTR1_EL0(1),
|
|
|
|
AMU_AMEVCNTR1_EL0(2),
|
|
|
|
AMU_AMEVCNTR1_EL0(3),
|
|
|
|
AMU_AMEVCNTR1_EL0(4),
|
|
|
|
AMU_AMEVCNTR1_EL0(5),
|
|
|
|
AMU_AMEVCNTR1_EL0(6),
|
|
|
|
AMU_AMEVCNTR1_EL0(7),
|
|
|
|
AMU_AMEVCNTR1_EL0(8),
|
|
|
|
AMU_AMEVCNTR1_EL0(9),
|
|
|
|
AMU_AMEVCNTR1_EL0(10),
|
|
|
|
AMU_AMEVCNTR1_EL0(11),
|
|
|
|
AMU_AMEVCNTR1_EL0(12),
|
|
|
|
AMU_AMEVCNTR1_EL0(13),
|
|
|
|
AMU_AMEVCNTR1_EL0(14),
|
|
|
|
AMU_AMEVCNTR1_EL0(15),
|
2020-07-21 17:12:59 +08:00
|
|
|
AMU_AMEVTYPER1_EL0(0),
|
|
|
|
AMU_AMEVTYPER1_EL0(1),
|
|
|
|
AMU_AMEVTYPER1_EL0(2),
|
|
|
|
AMU_AMEVTYPER1_EL0(3),
|
|
|
|
AMU_AMEVTYPER1_EL0(4),
|
|
|
|
AMU_AMEVTYPER1_EL0(5),
|
|
|
|
AMU_AMEVTYPER1_EL0(6),
|
|
|
|
AMU_AMEVTYPER1_EL0(7),
|
|
|
|
AMU_AMEVTYPER1_EL0(8),
|
|
|
|
AMU_AMEVTYPER1_EL0(9),
|
|
|
|
AMU_AMEVTYPER1_EL0(10),
|
|
|
|
AMU_AMEVTYPER1_EL0(11),
|
|
|
|
AMU_AMEVTYPER1_EL0(12),
|
|
|
|
AMU_AMEVTYPER1_EL0(13),
|
|
|
|
AMU_AMEVTYPER1_EL0(14),
|
|
|
|
AMU_AMEVTYPER1_EL0(15),
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2023-03-31 01:47:45 +08:00
|
|
|
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
|
|
|
|
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
|
2018-07-05 23:48:23 +08:00
|
|
|
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
|
|
|
|
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
|
|
|
|
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
|
2017-02-03 23:20:07 +08:00
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
/* PMEVCNTRn_EL0 */
|
|
|
|
PMU_PMEVCNTR_EL0(0),
|
|
|
|
PMU_PMEVCNTR_EL0(1),
|
|
|
|
PMU_PMEVCNTR_EL0(2),
|
|
|
|
PMU_PMEVCNTR_EL0(3),
|
|
|
|
PMU_PMEVCNTR_EL0(4),
|
|
|
|
PMU_PMEVCNTR_EL0(5),
|
|
|
|
PMU_PMEVCNTR_EL0(6),
|
|
|
|
PMU_PMEVCNTR_EL0(7),
|
|
|
|
PMU_PMEVCNTR_EL0(8),
|
|
|
|
PMU_PMEVCNTR_EL0(9),
|
|
|
|
PMU_PMEVCNTR_EL0(10),
|
|
|
|
PMU_PMEVCNTR_EL0(11),
|
|
|
|
PMU_PMEVCNTR_EL0(12),
|
|
|
|
PMU_PMEVCNTR_EL0(13),
|
|
|
|
PMU_PMEVCNTR_EL0(14),
|
|
|
|
PMU_PMEVCNTR_EL0(15),
|
|
|
|
PMU_PMEVCNTR_EL0(16),
|
|
|
|
PMU_PMEVCNTR_EL0(17),
|
|
|
|
PMU_PMEVCNTR_EL0(18),
|
|
|
|
PMU_PMEVCNTR_EL0(19),
|
|
|
|
PMU_PMEVCNTR_EL0(20),
|
|
|
|
PMU_PMEVCNTR_EL0(21),
|
|
|
|
PMU_PMEVCNTR_EL0(22),
|
|
|
|
PMU_PMEVCNTR_EL0(23),
|
|
|
|
PMU_PMEVCNTR_EL0(24),
|
|
|
|
PMU_PMEVCNTR_EL0(25),
|
|
|
|
PMU_PMEVCNTR_EL0(26),
|
|
|
|
PMU_PMEVCNTR_EL0(27),
|
|
|
|
PMU_PMEVCNTR_EL0(28),
|
|
|
|
PMU_PMEVCNTR_EL0(29),
|
|
|
|
PMU_PMEVCNTR_EL0(30),
|
2016-02-23 11:11:27 +08:00
|
|
|
/* PMEVTYPERn_EL0 */
|
|
|
|
PMU_PMEVTYPER_EL0(0),
|
|
|
|
PMU_PMEVTYPER_EL0(1),
|
|
|
|
PMU_PMEVTYPER_EL0(2),
|
|
|
|
PMU_PMEVTYPER_EL0(3),
|
|
|
|
PMU_PMEVTYPER_EL0(4),
|
|
|
|
PMU_PMEVTYPER_EL0(5),
|
|
|
|
PMU_PMEVTYPER_EL0(6),
|
|
|
|
PMU_PMEVTYPER_EL0(7),
|
|
|
|
PMU_PMEVTYPER_EL0(8),
|
|
|
|
PMU_PMEVTYPER_EL0(9),
|
|
|
|
PMU_PMEVTYPER_EL0(10),
|
|
|
|
PMU_PMEVTYPER_EL0(11),
|
|
|
|
PMU_PMEVTYPER_EL0(12),
|
|
|
|
PMU_PMEVTYPER_EL0(13),
|
|
|
|
PMU_PMEVTYPER_EL0(14),
|
|
|
|
PMU_PMEVTYPER_EL0(15),
|
|
|
|
PMU_PMEVTYPER_EL0(16),
|
|
|
|
PMU_PMEVTYPER_EL0(17),
|
|
|
|
PMU_PMEVTYPER_EL0(18),
|
|
|
|
PMU_PMEVTYPER_EL0(19),
|
|
|
|
PMU_PMEVTYPER_EL0(20),
|
|
|
|
PMU_PMEVTYPER_EL0(21),
|
|
|
|
PMU_PMEVTYPER_EL0(22),
|
|
|
|
PMU_PMEVTYPER_EL0(23),
|
|
|
|
PMU_PMEVTYPER_EL0(24),
|
|
|
|
PMU_PMEVTYPER_EL0(25),
|
|
|
|
PMU_PMEVTYPER_EL0(26),
|
|
|
|
PMU_PMEVTYPER_EL0(27),
|
|
|
|
PMU_PMEVTYPER_EL0(28),
|
|
|
|
PMU_PMEVTYPER_EL0(29),
|
|
|
|
PMU_PMEVTYPER_EL0(30),
|
2017-01-21 01:43:20 +08:00
|
|
|
/*
|
|
|
|
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
|
2016-02-23 11:11:27 +08:00
|
|
|
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
|
|
|
*/
|
2023-07-14 11:38:40 +08:00
|
|
|
{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
|
2021-01-07 01:22:27 +08:00
|
|
|
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
|
2015-12-08 15:29:06 +08:00
|
|
|
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
|
|
|
|
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
|
|
|
|
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
|
2024-01-23 02:13:43 +08:00
|
|
|
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
|
2023-06-10 00:21:56 +08:00
|
|
|
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
|
2024-06-21 00:46:40 +08:00
|
|
|
{ SYS_DESC(SYS_ZCR_EL2), .access = access_zcr_el2, .reset = reset_val,
|
|
|
|
.visibility = sve_el2_visibility, .reg = ZCR_EL2 },
|
|
|
|
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
|
2023-08-16 02:39:02 +08:00
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
|
|
|
|
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
|
|
|
|
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
|
2023-10-23 17:54:43 +08:00
|
|
|
{ SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 },
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
|
2023-12-19 18:06:58 +08:00
|
|
|
EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
|
|
|
|
EL2_REG_REDIR(ELR_EL2, reset_val, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
|
|
|
|
|
2023-10-23 17:54:44 +08:00
|
|
|
/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
|
|
|
|
{ SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi,
|
|
|
|
.visibility = hidden_user_visibility },
|
|
|
|
{ SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi,
|
|
|
|
.visibility = hidden_user_visibility },
|
|
|
|
{ SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi,
|
|
|
|
.visibility = hidden_user_visibility },
|
|
|
|
{ SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi,
|
|
|
|
.visibility = hidden_user_visibility },
|
|
|
|
|
2023-10-23 17:54:43 +08:00
|
|
|
{ SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 },
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
|
|
|
|
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_REDIR(ESR_EL2, reset_val, 0),
|
2023-10-23 17:54:43 +08:00
|
|
|
{ SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 },
|
2023-02-10 01:58:10 +08:00
|
|
|
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_REDIR(FAR_EL2, reset_val, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
|
|
|
|
|
|
|
|
EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
|
|
|
|
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
|
|
|
|
|
|
|
|
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
|
|
|
|
EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
|
|
|
|
{ SYS_DESC(SYS_RMR_EL2), trap_undef },
|
|
|
|
|
|
|
|
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
|
|
|
|
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
|
|
|
|
|
2023-11-07 17:02:10 +08:00
|
|
|
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
|
|
|
|
|
2023-02-10 01:58:18 +08:00
|
|
|
EL12_REG(CNTKCTL, access_rw, reset_val, 0),
|
|
|
|
|
2023-02-10 01:58:10 +08:00
|
|
|
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
|
2013-02-07 18:32:33 +08:00
|
|
|
};
|
|
|
|
|
2024-06-14 22:45:44 +08:00
|
|
|
static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = vpcu->kvm;
|
|
|
|
u8 CRm = sys_reg_CRm(instr);
|
|
|
|
|
|
|
|
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_nROS &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-14 22:45:45 +08:00
|
|
|
static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
|
|
|
|
|
|
|
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
write_lock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
|
|
|
|
* corresponding VMIDs.
|
|
|
|
*/
|
|
|
|
kvm_nested_s2_unmap(vcpu->kvm);
|
|
|
|
|
|
|
|
write_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-14 22:45:46 +08:00
|
|
|
static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = vpcu->kvm;
|
|
|
|
u8 CRm = sys_reg_CRm(instr);
|
|
|
|
u8 Op2 = sys_reg_Op2(instr);
|
|
|
|
|
|
|
|
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-14 22:45:43 +08:00
|
|
|
/* Only defined here as this is an internal "abstraction" */
|
|
|
|
union tlbi_info {
|
|
|
|
struct {
|
|
|
|
u64 start;
|
|
|
|
u64 size;
|
|
|
|
} range;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
u64 addr;
|
|
|
|
} ipa;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
u64 addr;
|
|
|
|
u32 encoding;
|
|
|
|
} va;
|
|
|
|
};
|
|
|
|
|
2024-06-14 22:45:44 +08:00
|
|
|
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
|
|
|
|
const union tlbi_info *info)
|
|
|
|
{
|
|
|
|
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
|
|
|
u64 limit, vttbr;
|
|
|
|
|
|
|
|
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
|
|
|
limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
|
|
|
|
|
|
|
|
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
|
|
|
&(union tlbi_info) {
|
|
|
|
.range = {
|
|
|
|
.start = 0,
|
|
|
|
.size = limit,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
s2_mmu_unmap_range);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-14 22:45:51 +08:00
|
|
|
static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
|
|
|
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
|
|
|
u64 base, range, tg, num, scale;
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Because the shadow S2 structure doesn't necessarily reflect that
|
|
|
|
* of the guest's S2 (different base granule size, for example), we
|
|
|
|
* decide to ignore TTL and only use the described range.
|
|
|
|
*/
|
|
|
|
tg = FIELD_GET(GENMASK(47, 46), p->regval);
|
|
|
|
scale = FIELD_GET(GENMASK(45, 44), p->regval);
|
|
|
|
num = FIELD_GET(GENMASK(43, 39), p->regval);
|
|
|
|
base = p->regval & GENMASK(36, 0);
|
|
|
|
|
|
|
|
switch(tg) {
|
|
|
|
case 1:
|
|
|
|
shift = 12;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
shift = 14;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default: /* IMPDEF: handle tg==0 as 64k */
|
|
|
|
shift = 16;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
base <<= shift;
|
|
|
|
range = __TLBI_RANGE_PAGES(num, scale) << shift;
|
|
|
|
|
|
|
|
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
|
|
|
&(union tlbi_info) {
|
|
|
|
.range = {
|
|
|
|
.start = base,
|
|
|
|
.size = range,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
s2_mmu_unmap_range);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-14 22:45:46 +08:00
|
|
|
static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
|
|
|
|
const union tlbi_info *info)
|
|
|
|
{
|
|
|
|
unsigned long max_size;
|
|
|
|
u64 base_addr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We drop a number of things from the supplied value:
|
|
|
|
*
|
|
|
|
* - NS bit: we're non-secure only.
|
|
|
|
*
|
|
|
|
* - IPA[51:48]: We don't support 52bit IPA just yet...
|
|
|
|
*
|
|
|
|
* And of course, adjust the IPA to be on an actual address.
|
|
|
|
*/
|
|
|
|
base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
|
2024-06-14 22:45:47 +08:00
|
|
|
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
|
2024-06-14 22:45:46 +08:00
|
|
|
base_addr &= ~(max_size - 1);
|
|
|
|
|
|
|
|
kvm_stage2_unmap_range(mmu, base_addr, max_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
|
|
|
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
|
|
|
|
|
|
|
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
|
|
|
&(union tlbi_info) {
|
|
|
|
.ipa = {
|
|
|
|
.addr = p->regval,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
s2_mmu_unmap_ipa);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-14 22:45:43 +08:00
|
|
|
static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
|
|
|
|
const union tlbi_info *info)
|
|
|
|
{
|
|
|
|
WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
|
|
|
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're here, this is because we've trapped on a EL1 TLBI
|
|
|
|
* instruction that affects the EL1 translation regime while
|
|
|
|
* we're running in a context that doesn't allow us to let the
|
|
|
|
* HW do its thing (aka vEL2):
|
|
|
|
*
|
|
|
|
* - HCR_EL2.E2H == 0 : a non-VHE guest
|
|
|
|
* - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
|
|
|
|
*
|
|
|
|
* We don't expect these helpers to ever be called when running
|
|
|
|
* in a vEL1 context.
|
|
|
|
*/
|
|
|
|
|
|
|
|
WARN_ON(!vcpu_is_el2(vcpu));
|
|
|
|
|
|
|
|
if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
|
|
|
&(union tlbi_info) {
|
|
|
|
.va = {
|
|
|
|
.addr = p->regval,
|
|
|
|
.encoding = sys_encoding,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
s2_mmu_tlbi_s1e1);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SYS_INSN(insn, access_fn) \
|
|
|
|
{ \
|
|
|
|
SYS_DESC(OP_##insn), \
|
|
|
|
.access = (access_fn), \
|
|
|
|
}
|
|
|
|
|
2024-02-14 21:18:13 +08:00
|
|
|
static struct sys_reg_desc sys_insn_descs[] = {
|
|
|
|
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
|
|
|
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
|
|
|
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
|
|
|
|
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
|
|
|
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
|
|
|
|
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
|
|
|
|
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
|
|
|
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
|
|
|
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
2024-06-14 22:45:43 +08:00
|
|
|
|
2024-06-14 22:45:50 +08:00
|
|
|
SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
|
|
|
|
|
2024-06-14 22:45:51 +08:00
|
|
|
SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
|
|
|
|
|
2024-06-14 22:45:43 +08:00
|
|
|
SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
|
2024-06-14 22:45:51 +08:00
|
|
|
|
|
|
|
SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
|
|
|
|
|
2024-06-14 22:45:43 +08:00
|
|
|
SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
|
2024-06-14 22:45:44 +08:00
|
|
|
|
2024-06-14 22:45:52 +08:00
|
|
|
SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
|
|
|
|
SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
|
|
|
|
|
2024-06-14 22:45:46 +08:00
|
|
|
SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
|
2024-06-14 22:45:51 +08:00
|
|
|
SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
|
2024-06-14 22:45:46 +08:00
|
|
|
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
|
2024-06-14 22:45:51 +08:00
|
|
|
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
|
2024-06-14 22:45:46 +08:00
|
|
|
|
2024-06-14 22:45:50 +08:00
|
|
|
SYS_INSN(TLBI_ALLE2OS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VAE2OS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
|
|
|
|
SYS_INSN(TLBI_VALE2OS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
|
|
|
|
|
2024-06-14 22:45:51 +08:00
|
|
|
SYS_INSN(TLBI_RVAE2IS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVALE2IS, trap_undef),
|
|
|
|
|
2024-06-14 22:45:45 +08:00
|
|
|
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
|
2024-06-14 22:45:44 +08:00
|
|
|
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
|
2024-06-14 22:45:50 +08:00
|
|
|
SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
|
2024-06-14 22:45:46 +08:00
|
|
|
SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
|
2024-06-14 22:45:51 +08:00
|
|
|
SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
|
2024-06-14 22:45:50 +08:00
|
|
|
SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
|
2024-06-14 22:45:46 +08:00
|
|
|
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
|
2024-06-14 22:45:51 +08:00
|
|
|
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_RVAE2OS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVALE2OS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVAE2, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVALE2, trap_undef),
|
2024-06-14 22:45:45 +08:00
|
|
|
SYS_INSN(TLBI_ALLE1, handle_alle1is),
|
2024-06-14 22:45:44 +08:00
|
|
|
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
|
2024-06-14 22:45:52 +08:00
|
|
|
|
|
|
|
SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_ALLE2OSNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VAE2OSNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
|
|
|
|
SYS_INSN(TLBI_VALE2OSNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_RVAE2ISNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVALE2ISNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_ALLE2ISNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VAE2ISNXS, trap_undef),
|
|
|
|
|
|
|
|
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
|
|
|
|
SYS_INSN(TLBI_VALE2ISNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
|
|
|
|
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
|
|
|
|
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
|
|
|
|
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
|
|
|
|
SYS_INSN(TLBI_RVAE2OSNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVALE2OSNXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVAE2NXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_RVALE2NXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_ALLE2NXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VAE2NXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
|
|
|
|
SYS_INSN(TLBI_VALE2NXS, trap_undef),
|
|
|
|
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
|
2024-02-14 21:18:13 +08:00
|
|
|
};
|
|
|
|
|
2021-01-28 21:28:23 +08:00
|
|
|
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
2015-12-04 20:03:12 +08:00
|
|
|
struct sys_reg_params *p,
|
2014-04-24 17:31:37 +08:00
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
|
|
|
if (p->is_write) {
|
|
|
|
return ignore_write(vcpu, p);
|
|
|
|
} else {
|
2024-06-20 01:40:29 +08:00
|
|
|
u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
|
2024-02-14 21:18:03 +08:00
|
|
|
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
|
2014-04-24 17:31:37 +08:00
|
|
|
|
2023-10-05 01:03:17 +08:00
|
|
|
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
|
|
|
|
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
|
|
|
|
(SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
|
|
|
|
(SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
|
|
|
|
(1 << 15) | (el3 << 14) | (el3 << 12));
|
2014-04-24 17:31:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 01:17:23 +08:00
|
|
|
/*
|
|
|
|
* AArch32 debug register mappings
|
2015-07-08 00:30:00 +08:00
|
|
|
*
|
|
|
|
* AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
|
|
|
|
* AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
|
|
|
|
*
|
2020-10-30 01:17:23 +08:00
|
|
|
* None of the other registers share their location, so treat them as
|
|
|
|
* if they were 64bit.
|
2015-07-08 00:30:00 +08:00
|
|
|
*/
|
2020-10-30 01:17:23 +08:00
|
|
|
#define DBG_BCR_BVR_WCR_WVR(n) \
|
|
|
|
/* DBGBVRn */ \
|
|
|
|
{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
|
|
|
|
/* DBGBCRn */ \
|
|
|
|
{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
|
|
|
|
/* DBGWVRn */ \
|
|
|
|
{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
|
|
|
|
/* DBGWCRn */ \
|
2015-07-08 00:30:00 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
|
|
|
|
|
2020-10-30 01:17:23 +08:00
|
|
|
#define DBGBXVR(n) \
|
|
|
|
{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
|
2014-04-24 17:31:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Trapped cp14 registers. We generally ignore most of the external
|
|
|
|
* debug, on the principle that they don't really make sense to a
|
2015-07-08 00:30:00 +08:00
|
|
|
* guest. Revisit this one day, would this principle change.
|
2014-04-24 17:31:37 +08:00
|
|
|
*/
|
2014-04-24 17:27:13 +08:00
|
|
|
static const struct sys_reg_desc cp14_regs[] = {
|
2021-01-28 21:28:23 +08:00
|
|
|
/* DBGDIDR */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
|
2014-04-24 17:31:37 +08:00
|
|
|
/* DBGDTRRXext */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
|
|
|
|
|
|
|
|
DBG_BCR_BVR_WCR_WVR(0),
|
|
|
|
/* DBGDSCRint */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
|
|
|
|
DBG_BCR_BVR_WCR_WVR(1),
|
|
|
|
/* DBGDCCINT */
|
2020-10-30 01:17:23 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
|
2014-04-24 17:31:37 +08:00
|
|
|
/* DBGDSCRext */
|
2020-10-30 01:17:23 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
|
2014-04-24 17:31:37 +08:00
|
|
|
DBG_BCR_BVR_WCR_WVR(2),
|
|
|
|
/* DBGDTR[RT]Xint */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
|
|
|
|
/* DBGDTR[RT]Xext */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
|
|
|
|
DBG_BCR_BVR_WCR_WVR(3),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(4),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(5),
|
|
|
|
/* DBGWFAR */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
|
|
|
|
/* DBGOSECCR */
|
|
|
|
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
|
|
|
|
DBG_BCR_BVR_WCR_WVR(6),
|
|
|
|
/* DBGVCR */
|
2020-10-30 01:17:23 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
|
2014-04-24 17:31:37 +08:00
|
|
|
DBG_BCR_BVR_WCR_WVR(7),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(8),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(9),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(10),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(11),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(12),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(13),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(14),
|
|
|
|
DBG_BCR_BVR_WCR_WVR(15),
|
|
|
|
|
|
|
|
/* DBGDRAR (32bit) */
|
|
|
|
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
|
|
|
|
|
|
|
|
DBGBXVR(0),
|
|
|
|
/* DBGOSLAR */
|
2022-02-04 01:41:56 +08:00
|
|
|
{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
|
2014-04-24 17:31:37 +08:00
|
|
|
DBGBXVR(1),
|
|
|
|
/* DBGOSLSR */
|
2022-02-04 01:41:55 +08:00
|
|
|
{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
|
2014-04-24 17:31:37 +08:00
|
|
|
DBGBXVR(2),
|
|
|
|
DBGBXVR(3),
|
|
|
|
/* DBGOSDLR */
|
|
|
|
{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
|
|
|
|
DBGBXVR(4),
|
|
|
|
/* DBGPRCR */
|
|
|
|
{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
|
|
|
|
DBGBXVR(5),
|
|
|
|
DBGBXVR(6),
|
|
|
|
DBGBXVR(7),
|
|
|
|
DBGBXVR(8),
|
|
|
|
DBGBXVR(9),
|
|
|
|
DBGBXVR(10),
|
|
|
|
DBGBXVR(11),
|
|
|
|
DBGBXVR(12),
|
|
|
|
DBGBXVR(13),
|
|
|
|
DBGBXVR(14),
|
|
|
|
DBGBXVR(15),
|
|
|
|
|
|
|
|
/* DBGDSAR (32bit) */
|
|
|
|
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
|
|
|
|
|
|
|
|
/* DBGDEVID2 */
|
|
|
|
{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
|
|
|
|
/* DBGDEVID1 */
|
|
|
|
{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
|
|
|
|
/* DBGDEVID */
|
|
|
|
{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
|
|
|
|
/* DBGCLAIMSET */
|
|
|
|
{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
|
|
|
|
/* DBGCLAIMCLR */
|
|
|
|
{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
|
|
|
|
/* DBGAUTHSTATUS */
|
|
|
|
{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
|
2014-04-24 17:27:13 +08:00
|
|
|
};
|
|
|
|
|
2014-04-24 21:11:48 +08:00
|
|
|
/* Trapped cp14 64bit registers */
|
|
|
|
static const struct sys_reg_desc cp14_64_regs[] = {
|
2014-04-24 17:31:37 +08:00
|
|
|
/* DBGDRAR (64bit) */
|
|
|
|
{ Op1( 0), CRm( 1), .access = trap_raz_wi },
|
|
|
|
|
|
|
|
/* DBGDSAR (64bit) */
|
|
|
|
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
|
2014-04-24 21:11:48 +08:00
|
|
|
};
|
|
|
|
|
2022-05-03 14:02:04 +08:00
|
|
|
#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
|
|
|
|
AA32(_map), \
|
|
|
|
Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
|
|
|
|
.visibility = pmu_visibility
|
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
/* Macro to expand the PMEVCNTRn register */
|
|
|
|
#define PMU_PMEVCNTR(n) \
|
2022-05-03 14:02:04 +08:00
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
|
|
|
|
(0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
|
|
|
|
.access = access_pmu_evcntr }
|
2015-12-08 15:29:06 +08:00
|
|
|
|
2016-02-23 11:11:27 +08:00
|
|
|
/* Macro to expand the PMEVTYPERn register */
|
|
|
|
#define PMU_PMEVTYPER(n) \
|
2022-05-03 14:02:04 +08:00
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
|
|
|
|
(0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
|
|
|
|
.access = access_pmu_evtyper }
|
2014-01-15 02:00:55 +08:00
|
|
|
/*
|
|
|
|
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
|
|
|
|
* depending on the way they are accessed (as a 32bit or a 64bit
|
|
|
|
* register).
|
|
|
|
*/
|
2013-02-07 18:32:33 +08:00
|
|
|
static const struct sys_reg_desc cp15_regs[] = {
|
2019-01-31 21:17:17 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
|
2020-10-30 01:14:20 +08:00
|
|
|
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
|
|
|
|
/* ACTLR */
|
|
|
|
{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
|
|
|
|
/* ACTLR2 */
|
|
|
|
{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
|
|
|
|
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
|
|
|
|
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
|
|
|
|
/* TTBCR */
|
|
|
|
{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
|
|
|
|
/* TTBCR2 */
|
|
|
|
{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
|
|
|
|
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
|
|
|
|
/* DFSR */
|
|
|
|
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
|
|
|
|
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
|
|
|
|
/* ADFSR */
|
|
|
|
{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
|
|
|
|
/* AIFSR */
|
|
|
|
{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
|
|
|
|
/* DFAR */
|
|
|
|
{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
|
|
|
|
/* IFAR */
|
|
|
|
{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
|
2014-01-15 02:00:55 +08:00
|
|
|
|
2013-02-07 18:32:33 +08:00
|
|
|
/*
|
|
|
|
* DC{C,I,CI}SW operations:
|
|
|
|
*/
|
|
|
|
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
|
|
|
|
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
|
|
|
|
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
|
2014-01-15 02:00:55 +08:00
|
|
|
|
2014-04-24 17:21:16 +08:00
|
|
|
/* PMU */
|
2022-05-03 14:02:04 +08:00
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
|
|
|
|
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
|
|
|
|
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
|
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
|
|
|
|
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
|
|
|
|
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
|
2020-02-17 02:17:22 +08:00
|
|
|
/* PMMIR */
|
2022-05-03 14:02:04 +08:00
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
|
2014-01-15 02:00:55 +08:00
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
/* PRRR/MAIR0 */
|
|
|
|
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
|
|
|
|
/* NMRR/MAIR1 */
|
|
|
|
{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
|
|
|
|
/* AMAIR0 */
|
|
|
|
{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
|
|
|
|
/* AMAIR1 */
|
|
|
|
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
|
2014-11-19 19:23:54 +08:00
|
|
|
|
|
|
|
/* ICC_SRE */
|
2016-08-10 17:49:43 +08:00
|
|
|
{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
|
2014-11-19 19:23:54 +08:00
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
|
2015-12-08 15:29:06 +08:00
|
|
|
|
2018-07-05 23:48:23 +08:00
|
|
|
/* Arch Tmers */
|
|
|
|
{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
|
|
|
|
{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
|
2018-02-08 19:57:19 +08:00
|
|
|
|
2015-12-08 15:29:06 +08:00
|
|
|
/* PMEVCNTRn */
|
|
|
|
PMU_PMEVCNTR(0),
|
|
|
|
PMU_PMEVCNTR(1),
|
|
|
|
PMU_PMEVCNTR(2),
|
|
|
|
PMU_PMEVCNTR(3),
|
|
|
|
PMU_PMEVCNTR(4),
|
|
|
|
PMU_PMEVCNTR(5),
|
|
|
|
PMU_PMEVCNTR(6),
|
|
|
|
PMU_PMEVCNTR(7),
|
|
|
|
PMU_PMEVCNTR(8),
|
|
|
|
PMU_PMEVCNTR(9),
|
|
|
|
PMU_PMEVCNTR(10),
|
|
|
|
PMU_PMEVCNTR(11),
|
|
|
|
PMU_PMEVCNTR(12),
|
|
|
|
PMU_PMEVCNTR(13),
|
|
|
|
PMU_PMEVCNTR(14),
|
|
|
|
PMU_PMEVCNTR(15),
|
|
|
|
PMU_PMEVCNTR(16),
|
|
|
|
PMU_PMEVCNTR(17),
|
|
|
|
PMU_PMEVCNTR(18),
|
|
|
|
PMU_PMEVCNTR(19),
|
|
|
|
PMU_PMEVCNTR(20),
|
|
|
|
PMU_PMEVCNTR(21),
|
|
|
|
PMU_PMEVCNTR(22),
|
|
|
|
PMU_PMEVCNTR(23),
|
|
|
|
PMU_PMEVCNTR(24),
|
|
|
|
PMU_PMEVCNTR(25),
|
|
|
|
PMU_PMEVCNTR(26),
|
|
|
|
PMU_PMEVCNTR(27),
|
|
|
|
PMU_PMEVCNTR(28),
|
|
|
|
PMU_PMEVCNTR(29),
|
|
|
|
PMU_PMEVCNTR(30),
|
2016-02-23 11:11:27 +08:00
|
|
|
/* PMEVTYPERn */
|
|
|
|
PMU_PMEVTYPER(0),
|
|
|
|
PMU_PMEVTYPER(1),
|
|
|
|
PMU_PMEVTYPER(2),
|
|
|
|
PMU_PMEVTYPER(3),
|
|
|
|
PMU_PMEVTYPER(4),
|
|
|
|
PMU_PMEVTYPER(5),
|
|
|
|
PMU_PMEVTYPER(6),
|
|
|
|
PMU_PMEVTYPER(7),
|
|
|
|
PMU_PMEVTYPER(8),
|
|
|
|
PMU_PMEVTYPER(9),
|
|
|
|
PMU_PMEVTYPER(10),
|
|
|
|
PMU_PMEVTYPER(11),
|
|
|
|
PMU_PMEVTYPER(12),
|
|
|
|
PMU_PMEVTYPER(13),
|
|
|
|
PMU_PMEVTYPER(14),
|
|
|
|
PMU_PMEVTYPER(15),
|
|
|
|
PMU_PMEVTYPER(16),
|
|
|
|
PMU_PMEVTYPER(17),
|
|
|
|
PMU_PMEVTYPER(18),
|
|
|
|
PMU_PMEVTYPER(19),
|
|
|
|
PMU_PMEVTYPER(20),
|
|
|
|
PMU_PMEVTYPER(21),
|
|
|
|
PMU_PMEVTYPER(22),
|
|
|
|
PMU_PMEVTYPER(23),
|
|
|
|
PMU_PMEVTYPER(24),
|
|
|
|
PMU_PMEVTYPER(25),
|
|
|
|
PMU_PMEVTYPER(26),
|
|
|
|
PMU_PMEVTYPER(27),
|
|
|
|
PMU_PMEVTYPER(28),
|
|
|
|
PMU_PMEVTYPER(29),
|
|
|
|
PMU_PMEVTYPER(30),
|
|
|
|
/* PMCCFILTR */
|
2022-05-03 14:02:04 +08:00
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
|
2019-01-31 21:17:17 +08:00
|
|
|
|
|
|
|
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
|
|
|
|
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
|
2023-01-12 10:38:51 +08:00
|
|
|
|
|
|
|
/* CCSIDR2 */
|
|
|
|
{ Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
|
|
|
|
|
2020-10-30 01:14:20 +08:00
|
|
|
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
|
2014-04-24 21:11:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct sys_reg_desc cp15_64_regs[] = {
|
2020-10-30 01:14:20 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
|
2022-05-03 14:02:04 +08:00
|
|
|
{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
|
2018-08-06 20:03:36 +08:00
|
|
|
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
|
2023-03-31 01:47:45 +08:00
|
|
|
{ SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
|
2020-10-30 01:14:20 +08:00
|
|
|
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
|
2018-08-06 20:03:36 +08:00
|
|
|
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
|
|
|
|
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
|
2018-07-05 23:48:23 +08:00
|
|
|
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
|
2023-04-13 21:23:42 +08:00
|
|
|
{ SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
|
2012-12-11 00:15:34 +08:00
|
|
|
};
|
|
|
|
|
2022-04-28 18:34:04 +08:00
|
|
|
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
|
|
|
|
bool is_32)
|
2020-01-27 19:21:17 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (!is_32 && table[i].reg && !table[i].reset) {
|
2024-04-10 23:25:03 +08:00
|
|
|
kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
|
|
|
|
&table[i], i, table[i].name);
|
2022-04-28 18:34:04 +08:00
|
|
|
return false;
|
2020-01-27 19:21:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
|
2024-04-10 23:25:03 +08:00
|
|
|
kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
|
|
|
|
&table[i], i, table[i - 1].name, table[i].name);
|
2022-04-28 18:34:04 +08:00
|
|
|
return false;
|
2020-01-27 19:21:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-28 18:34:04 +08:00
|
|
|
return true;
|
2020-01-27 19:21:17 +08:00
|
|
|
}
|
|
|
|
|
2020-06-23 21:14:15 +08:00
|
|
|
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
|
2013-02-07 18:32:33 +08:00
|
|
|
{
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-03-28 00:03:42 +08:00
|
|
|
static void perform_access(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params,
|
|
|
|
const struct sys_reg_desc *r)
|
|
|
|
{
|
2018-12-04 18:44:22 +08:00
|
|
|
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
|
|
|
|
|
2018-09-28 21:39:15 +08:00
|
|
|
/* Check for regs disabled by runtime config */
|
2020-11-05 17:10:20 +08:00
|
|
|
if (sysreg_hidden(vcpu, r)) {
|
2018-09-28 21:39:15 +08:00
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-28 00:03:42 +08:00
|
|
|
/*
|
|
|
|
* Not having an accessor means that we have configured a trap
|
|
|
|
* that we don't know how to handle. This certainly qualifies
|
|
|
|
* as a gross bug that should be fixed right away.
|
|
|
|
*/
|
|
|
|
BUG_ON(!r->access);
|
|
|
|
|
|
|
|
/* Skip instruction if instructed so */
|
|
|
|
if (likely(r->access(vcpu, params, r)))
|
2020-10-14 16:29:27 +08:00
|
|
|
kvm_incr_pc(vcpu);
|
2017-03-28 00:03:42 +08:00
|
|
|
}
|
|
|
|
|
2014-04-24 17:27:13 +08:00
|
|
|
/*
|
|
|
|
* emulate_cp -- tries to match a sys_reg access in a handling table, and
|
|
|
|
* call the corresponding trap handler.
|
|
|
|
*
|
|
|
|
* @params: pointer to the descriptor of the access
|
|
|
|
* @table: array of trap descriptors
|
|
|
|
* @num: size of the trap descriptor array
|
|
|
|
*
|
2022-05-03 14:01:59 +08:00
|
|
|
* Return true if the access has been handled, false if not.
|
2014-04-24 17:27:13 +08:00
|
|
|
*/
|
2022-05-03 14:01:59 +08:00
|
|
|
static bool emulate_cp(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params,
|
|
|
|
const struct sys_reg_desc *table,
|
|
|
|
size_t num)
|
2013-02-07 18:32:33 +08:00
|
|
|
{
|
2014-04-24 17:27:13 +08:00
|
|
|
const struct sys_reg_desc *r;
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2014-04-24 17:27:13 +08:00
|
|
|
if (!table)
|
2022-05-03 14:01:59 +08:00
|
|
|
return false; /* Not handled */
|
2013-02-07 18:32:33 +08:00
|
|
|
|
|
|
|
r = find_reg(params, table, num);
|
|
|
|
|
2014-04-24 17:27:13 +08:00
|
|
|
if (r) {
|
2017-03-28 00:03:42 +08:00
|
|
|
perform_access(vcpu, params, r);
|
2022-05-03 14:01:59 +08:00
|
|
|
return true;
|
2014-04-24 17:27:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Not handled */
|
2022-05-03 14:01:59 +08:00
|
|
|
return false;
|
2014-04-24 17:27:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params)
|
|
|
|
{
|
2020-06-30 09:57:05 +08:00
|
|
|
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
|
2016-07-14 18:19:34 +08:00
|
|
|
int cp = -1;
|
2014-04-24 17:27:13 +08:00
|
|
|
|
2020-06-30 09:57:05 +08:00
|
|
|
switch (esr_ec) {
|
2014-11-24 21:59:30 +08:00
|
|
|
case ESR_ELx_EC_CP15_32:
|
|
|
|
case ESR_ELx_EC_CP15_64:
|
2014-04-24 17:27:13 +08:00
|
|
|
cp = 15;
|
|
|
|
break;
|
2014-11-24 21:59:30 +08:00
|
|
|
case ESR_ELx_EC_CP14_MR:
|
|
|
|
case ESR_ELx_EC_CP14_64:
|
2014-04-24 17:27:13 +08:00
|
|
|
cp = 14;
|
|
|
|
break;
|
|
|
|
default:
|
2016-07-14 18:19:34 +08:00
|
|
|
WARN_ON(1);
|
2013-02-07 18:32:33 +08:00
|
|
|
}
|
|
|
|
|
2019-12-06 02:06:51 +08:00
|
|
|
print_sys_reg_msg(params,
|
|
|
|
"Unsupported guest CP%d access at: %08lx [%08lx]\n",
|
|
|
|
cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
|
2013-02-07 18:32:33 +08:00
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-01-13 17:16:40 +08:00
|
|
|
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
|
2013-02-07 18:32:33 +08:00
|
|
|
* @vcpu: The VCPU pointer
|
2024-01-18 07:07:11 +08:00
|
|
|
* @global: &struct sys_reg_desc
|
|
|
|
* @nr_global: size of the @global array
|
2013-02-07 18:32:33 +08:00
|
|
|
*/
|
2014-04-24 17:27:13 +08:00
|
|
|
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *global,
|
2020-06-22 19:33:16 +08:00
|
|
|
size_t nr_global)
|
2013-02-07 18:32:33 +08:00
|
|
|
{
|
|
|
|
struct sys_reg_params params;
|
2022-04-25 19:44:43 +08:00
|
|
|
u64 esr = kvm_vcpu_get_esr(vcpu);
|
2017-04-28 02:06:48 +08:00
|
|
|
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
2020-06-30 09:57:05 +08:00
|
|
|
int Rt2 = (esr >> 10) & 0x1f;
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2020-06-30 09:57:05 +08:00
|
|
|
params.CRm = (esr >> 1) & 0xf;
|
|
|
|
params.is_write = ((esr & 1) == 0);
|
2013-02-07 18:32:33 +08:00
|
|
|
|
|
|
|
params.Op0 = 0;
|
2020-06-30 09:57:05 +08:00
|
|
|
params.Op1 = (esr >> 16) & 0xf;
|
2013-02-07 18:32:33 +08:00
|
|
|
params.Op2 = 0;
|
|
|
|
params.CRn = 0;
|
|
|
|
|
|
|
|
/*
|
2015-12-04 20:03:13 +08:00
|
|
|
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
|
2013-02-07 18:32:33 +08:00
|
|
|
* backends between AArch32 and AArch64, we get away with it.
|
|
|
|
*/
|
|
|
|
if (params.is_write) {
|
2015-12-04 20:03:13 +08:00
|
|
|
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
|
|
|
|
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
|
2013-02-07 18:32:33 +08:00
|
|
|
}
|
|
|
|
|
2017-03-28 00:03:43 +08:00
|
|
|
/*
|
2020-06-22 19:33:16 +08:00
|
|
|
* If the table contains a handler, handle the
|
2017-03-28 00:03:43 +08:00
|
|
|
* potential register operation in the case of a read and return
|
|
|
|
* with success.
|
|
|
|
*/
|
2022-05-03 14:01:59 +08:00
|
|
|
if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
|
2017-03-28 00:03:43 +08:00
|
|
|
/* Split up the value between registers for the read side */
|
|
|
|
if (!params.is_write) {
|
|
|
|
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
|
|
|
|
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
|
|
|
|
}
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2017-03-28 00:03:43 +08:00
|
|
|
return 1;
|
2013-02-07 18:32:33 +08:00
|
|
|
}
|
|
|
|
|
2017-03-28 00:03:43 +08:00
|
|
|
unhandled_cp_access(vcpu, ¶ms);
|
2013-02-07 18:32:33 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2022-05-03 14:02:01 +08:00
|
|
|
static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
|
|
|
|
|
2022-05-03 14:02:02 +08:00
|
|
|
/*
|
|
|
|
* The CP10 ID registers are architecturally mapped to AArch64 feature
|
|
|
|
* registers. Abuse that fact so we can rely on the AArch64 handler for accesses
|
|
|
|
* from AArch32.
|
|
|
|
*/
|
2022-05-04 15:01:05 +08:00
|
|
|
static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
|
2022-05-03 14:02:02 +08:00
|
|
|
{
|
|
|
|
u8 reg_id = (esr >> 10) & 0xf;
|
|
|
|
bool valid;
|
|
|
|
|
|
|
|
params->is_write = ((esr & 1) == 0);
|
|
|
|
params->Op0 = 3;
|
|
|
|
params->Op1 = 0;
|
|
|
|
params->CRn = 0;
|
|
|
|
params->CRm = 3;
|
|
|
|
|
|
|
|
/* CP10 ID registers are read-only */
|
|
|
|
valid = !params->is_write;
|
|
|
|
|
|
|
|
switch (reg_id) {
|
|
|
|
/* MVFR0 */
|
|
|
|
case 0b0111:
|
|
|
|
params->Op2 = 0;
|
|
|
|
break;
|
|
|
|
/* MVFR1 */
|
|
|
|
case 0b0110:
|
|
|
|
params->Op2 = 1;
|
|
|
|
break;
|
|
|
|
/* MVFR2 */
|
|
|
|
case 0b0101:
|
|
|
|
params->Op2 = 2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
valid = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (valid)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
|
|
|
|
params->is_write ? "write" : "read", reg_id);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
|
|
|
|
* VFP Register' from AArch32.
|
|
|
|
* @vcpu: The vCPU pointer
|
|
|
|
*
|
|
|
|
* MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
|
|
|
|
* Work out the correct AArch64 system register encoding and reroute to the
|
|
|
|
* AArch64 system register emulation.
|
|
|
|
*/
|
|
|
|
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
2022-05-04 15:01:05 +08:00
|
|
|
u64 esr = kvm_vcpu_get_esr(vcpu);
|
2022-05-03 14:02:02 +08:00
|
|
|
struct sys_reg_params params;
|
|
|
|
|
|
|
|
/* UNDEF on any unhandled register access */
|
|
|
|
if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (emulate_sys_reg(vcpu, ¶ms))
|
|
|
|
vcpu_set_reg(vcpu, Rt, params.regval);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2022-05-03 14:02:01 +08:00
|
|
|
/**
|
|
|
|
* kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
|
|
|
|
* CRn=0, which corresponds to the AArch32 feature
|
|
|
|
* registers.
|
|
|
|
* @vcpu: the vCPU pointer
|
|
|
|
* @params: the system register access parameters.
|
|
|
|
*
|
|
|
|
* Our cp15 system register tables do not enumerate the AArch32 feature
|
|
|
|
* registers. Conveniently, our AArch64 table does, and the AArch32 system
|
|
|
|
* register encoding can be trivially remapped into the AArch64 for the feature
|
|
|
|
* registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
|
|
|
|
*
|
|
|
|
* According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
|
|
|
|
* System registers with (coproc=0b1111, CRn==c0)", read accesses from this
|
|
|
|
* range are either UNKNOWN or RES0. Rerouting remains architectural as we
|
|
|
|
* treat undefined registers in this range as RAZ.
|
|
|
|
*/
|
|
|
|
static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
|
|
|
|
struct sys_reg_params *params)
|
|
|
|
{
|
|
|
|
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
|
|
|
|
|
|
|
/* Treat impossible writes to RO registers as UNDEFINED */
|
|
|
|
if (params->is_write) {
|
|
|
|
unhandled_cp_access(vcpu, params);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
params->Op0 = 3;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
|
|
|
|
* Avoid conflicting with future expansion of AArch64 feature registers
|
|
|
|
* and simply treat them as RAZ here.
|
|
|
|
*/
|
|
|
|
if (params->CRm > 3)
|
|
|
|
params->regval = 0;
|
|
|
|
else if (!emulate_sys_reg(vcpu, params))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
vcpu_set_reg(vcpu, Rt, params->regval);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-02-07 18:32:33 +08:00
|
|
|
/**
|
2016-01-13 17:16:40 +08:00
|
|
|
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
|
2013-02-07 18:32:33 +08:00
|
|
|
* @vcpu: The VCPU pointer
|
2024-01-18 07:07:11 +08:00
|
|
|
* @params: &struct sys_reg_params
|
|
|
|
* @global: &struct sys_reg_desc
|
|
|
|
* @nr_global: size of the @global array
|
2013-02-07 18:32:33 +08:00
|
|
|
*/
|
2014-04-24 17:27:13 +08:00
|
|
|
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
|
2022-05-03 14:02:01 +08:00
|
|
|
struct sys_reg_params *params,
|
2014-04-24 17:27:13 +08:00
|
|
|
const struct sys_reg_desc *global,
|
2020-06-22 19:33:16 +08:00
|
|
|
size_t nr_global)
|
2013-02-07 18:32:33 +08:00
|
|
|
{
|
2017-04-28 02:06:48 +08:00
|
|
|
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2022-05-03 14:02:01 +08:00
|
|
|
params->regval = vcpu_get_reg(vcpu, Rt);
|
2013-02-07 18:32:33 +08:00
|
|
|
|
2022-05-03 14:02:01 +08:00
|
|
|
if (emulate_cp(vcpu, params, global, nr_global)) {
|
|
|
|
if (!params->is_write)
|
|
|
|
vcpu_set_reg(vcpu, Rt, params->regval);
|
2014-04-24 17:27:13 +08:00
|
|
|
return 1;
|
2015-12-04 20:03:13 +08:00
|
|
|
}
|
2014-04-24 17:27:13 +08:00
|
|
|
|
2022-05-03 14:02:01 +08:00
|
|
|
unhandled_cp_access(vcpu, params);
|
2013-02-07 18:32:33 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:14:15 +08:00
|
|
|
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
|
2014-04-24 17:27:13 +08:00
|
|
|
{
|
2020-06-22 19:33:16 +08:00
|
|
|
return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
|
2014-04-24 17:27:13 +08:00
|
|
|
}
|
|
|
|
|
2020-06-23 21:14:15 +08:00
|
|
|
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
|
2014-04-24 17:27:13 +08:00
|
|
|
{
|
2022-05-03 14:02:01 +08:00
|
|
|
struct sys_reg_params params;
|
|
|
|
|
|
|
|
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Certain AArch32 ID registers are handled by rerouting to the AArch64
|
|
|
|
* system register table. Registers in the ID range where CRm=0 are
|
|
|
|
* excluded from this scheme as they do not trivially map into AArch64
|
|
|
|
* system register encodings.
|
|
|
|
*/
|
|
|
|
if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
|
|
|
|
return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
|
|
|
|
|
|
|
|
return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
|
2014-04-24 17:27:13 +08:00
|
|
|
}
|
|
|
|
|
2020-06-23 21:14:15 +08:00
|
|
|
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
|
2014-04-24 17:27:13 +08:00
|
|
|
{
|
2020-06-22 19:33:16 +08:00
|
|
|
return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
|
2014-04-24 17:27:13 +08:00
|
|
|
}
|
|
|
|
|
2020-06-23 21:14:15 +08:00
|
|
|
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
|
2014-04-24 17:27:13 +08:00
|
|
|
{
|
2022-05-03 14:02:01 +08:00
|
|
|
struct sys_reg_params params;
|
|
|
|
|
|
|
|
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
|
|
|
|
|
|
|
|
return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
|
2014-04-24 17:27:13 +08:00
|
|
|
}
|
|
|
|
|
2022-05-03 14:02:00 +08:00
|
|
|
/**
|
|
|
|
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
* @params: Decoded system register parameters
|
|
|
|
*
|
|
|
|
* Return: true if the system register access was successful, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
|
2024-02-14 21:18:16 +08:00
|
|
|
struct sys_reg_params *params)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2020-06-22 19:33:16 +08:00
|
|
|
const struct sys_reg_desc *r;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2020-06-22 19:33:16 +08:00
|
|
|
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
2012-12-11 00:15:34 +08:00
|
|
|
if (likely(r)) {
|
2017-03-28 00:03:42 +08:00
|
|
|
perform_access(vcpu, params, r);
|
2022-05-03 14:02:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-02-14 21:18:16 +08:00
|
|
|
print_sys_reg_msg(params,
|
|
|
|
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
|
|
|
|
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
|
|
|
|
kvm_inject_undefined(vcpu);
|
2024-02-14 21:18:13 +08:00
|
|
|
|
2024-02-14 21:18:16 +08:00
|
|
|
return false;
|
2024-02-14 21:18:13 +08:00
|
|
|
}
|
|
|
|
|
2024-06-20 01:40:28 +08:00
|
|
|
static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
|
|
|
|
{
|
|
|
|
unsigned long i, idreg_idx = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
|
|
|
|
const struct sys_reg_desc *r = &sys_reg_descs[i];
|
|
|
|
|
|
|
|
if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (idreg_idx == pos)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
idreg_idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-02-14 21:18:27 +08:00
|
|
|
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = s->private;
|
|
|
|
u8 *iter;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->arch.config_lock);
|
|
|
|
|
|
|
|
iter = &kvm->arch.idreg_debugfs_iter;
|
2024-02-27 17:41:14 +08:00
|
|
|
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
|
|
|
|
*iter == (u8)~0) {
|
2024-02-14 21:18:27 +08:00
|
|
|
*iter = *pos;
|
2024-06-20 01:40:28 +08:00
|
|
|
if (!idregs_debug_find(kvm, *iter))
|
2024-02-14 21:18:27 +08:00
|
|
|
iter = NULL;
|
|
|
|
} else {
|
|
|
|
iter = ERR_PTR(-EBUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&kvm->arch.config_lock);
|
|
|
|
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = s->private;
|
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
|
2024-06-20 01:40:28 +08:00
|
|
|
if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
|
2024-02-14 21:18:27 +08:00
|
|
|
kvm->arch.idreg_debugfs_iter++;
|
|
|
|
|
|
|
|
return &kvm->arch.idreg_debugfs_iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void idregs_debug_stop(struct seq_file *s, void *v)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = s->private;
|
|
|
|
|
|
|
|
if (IS_ERR(v))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->arch.config_lock);
|
|
|
|
|
|
|
|
kvm->arch.idreg_debugfs_iter = ~0;
|
|
|
|
|
|
|
|
mutex_unlock(&kvm->arch.config_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int idregs_debug_show(struct seq_file *s, void *v)
|
|
|
|
{
|
|
|
|
const struct sys_reg_desc *desc;
|
2024-06-20 01:40:28 +08:00
|
|
|
struct kvm *kvm = s->private;
|
2024-02-14 21:18:27 +08:00
|
|
|
|
2024-06-20 01:40:28 +08:00
|
|
|
desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
|
2024-02-14 21:18:27 +08:00
|
|
|
|
|
|
|
if (!desc->name)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
seq_printf(s, "%20s:\t%016llx\n",
|
2024-06-20 01:40:29 +08:00
|
|
|
desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
|
2024-02-14 21:18:27 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations idregs_debug_sops = {
|
|
|
|
.start = idregs_debug_start,
|
|
|
|
.next = idregs_debug_next,
|
|
|
|
.stop = idregs_debug_stop,
|
|
|
|
.show = idregs_debug_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
|
|
|
|
|
2024-02-27 17:41:15 +08:00
|
|
|
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
kvm->arch.idreg_debugfs_iter = ~0;
|
|
|
|
|
|
|
|
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
|
|
|
|
&idregs_debug_fops);
|
|
|
|
}
|
|
|
|
|
2024-05-03 07:35:24 +08:00
|
|
|
static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
|
2023-06-10 03:00:49 +08:00
|
|
|
{
|
2024-05-03 07:35:24 +08:00
|
|
|
u32 id = reg_to_encoding(reg);
|
2023-06-10 03:00:49 +08:00
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
|
|
|
|
|
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
|
|
|
|
return;
|
|
|
|
|
2024-06-20 01:40:30 +08:00
|
|
|
kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
|
2023-06-10 03:00:49 +08:00
|
|
|
}
|
|
|
|
|
2024-05-03 07:35:25 +08:00
|
|
|
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *reg)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_initialized(vcpu))
|
|
|
|
return;
|
2023-06-10 03:00:49 +08:00
|
|
|
|
2024-05-03 07:35:25 +08:00
|
|
|
reg->reset(vcpu, reg);
|
2023-06-10 03:00:49 +08:00
|
|
|
}
|
|
|
|
|
2020-06-22 19:33:17 +08:00
|
|
|
/**
|
|
|
|
* kvm_reset_sys_regs - sets system registers to reset value
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
*
|
|
|
|
* This function finds the right table above and sets the registers on the
|
|
|
|
* virtual CPU struct to their architecturally defined reset values.
|
|
|
|
*/
|
|
|
|
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2024-05-03 07:35:24 +08:00
|
|
|
struct kvm *kvm = vcpu->kvm;
|
2012-12-11 00:15:34 +08:00
|
|
|
unsigned long i;
|
|
|
|
|
2023-06-10 03:00:49 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
|
|
|
|
const struct sys_reg_desc *r = &sys_reg_descs[i];
|
|
|
|
|
2024-05-03 07:35:24 +08:00
|
|
|
if (!r->reset)
|
2023-06-10 03:00:49 +08:00
|
|
|
continue;
|
|
|
|
|
2024-05-03 07:35:24 +08:00
|
|
|
if (is_vm_ftr_id_reg(reg_to_encoding(r)))
|
|
|
|
reset_vm_ftr_id_reg(vcpu, r);
|
2024-05-03 07:35:25 +08:00
|
|
|
else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
|
|
|
|
reset_vcpu_ftr_id_reg(vcpu, r);
|
2024-05-03 07:35:24 +08:00
|
|
|
else
|
2023-06-10 03:00:49 +08:00
|
|
|
r->reset(vcpu, r);
|
|
|
|
}
|
2024-05-03 07:35:24 +08:00
|
|
|
|
|
|
|
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-02-14 21:18:13 +08:00
|
|
|
* kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
|
|
|
|
* trap on a guest execution
|
2012-12-11 00:15:34 +08:00
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
*/
|
2020-06-23 21:14:15 +08:00
|
|
|
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2024-02-14 21:18:16 +08:00
|
|
|
const struct sys_reg_desc *desc = NULL;
|
2012-12-11 00:15:34 +08:00
|
|
|
struct sys_reg_params params;
|
2020-06-30 09:57:05 +08:00
|
|
|
unsigned long esr = kvm_vcpu_get_esr(vcpu);
|
2017-04-28 02:06:48 +08:00
|
|
|
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
2024-02-14 21:18:16 +08:00
|
|
|
int sr_idx;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2015-07-08 00:30:03 +08:00
|
|
|
trace_kvm_handle_sys_reg(esr);
|
|
|
|
|
2024-02-14 21:18:17 +08:00
|
|
|
if (triage_sysreg_trap(vcpu, &sr_idx))
|
KVM: arm64: nv: Add trap forwarding infrastructure
A significant part of what a NV hypervisor needs to do is to decide
whether a trap from a L2+ guest has to be forwarded to a L1 guest
or handled locally. This is done by checking for the trap bits that
the guest hypervisor has set and acting accordingly, as described by
the architecture.
A previous approach was to sprinkle a bunch of checks in all the
system register accessors, but this is pretty error prone and doesn't
help getting an overview of what is happening.
Instead, implement a set of global tables that describe a trap bit,
combinations of trap bits, behaviours on trap, and what bits must
be evaluated on a system register trap.
Although this is painful to describe, this allows to specify each
and every control bit in a static manner. To make it efficient,
the table is inserted in an xarray that is global to the system,
and checked each time we trap a system register while running
a L2 guest.
Add the basic infrastructure for now, while additional patches will
implement configuration registers.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Jing Zhang <jingzhangos@google.com>
Reviewed-by: Miguel Luis <miguel.luis@oracle.com>
Link: https://lore.kernel.org/r/20230815183903.2735724-15-maz@kernel.org
2023-08-16 02:38:48 +08:00
|
|
|
return 1;
|
|
|
|
|
2021-08-17 16:11:24 +08:00
|
|
|
params = esr_sys64_to_params(esr);
|
2015-12-04 20:03:13 +08:00
|
|
|
params.regval = vcpu_get_reg(vcpu, Rt);
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2024-02-14 21:18:13 +08:00
|
|
|
/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
|
2024-02-14 21:18:16 +08:00
|
|
|
if (params.Op0 == 2 || params.Op0 == 3)
|
|
|
|
desc = &sys_reg_descs[sr_idx];
|
|
|
|
else
|
|
|
|
desc = &sys_insn_descs[sr_idx];
|
2024-02-14 21:18:13 +08:00
|
|
|
|
2024-02-14 21:18:16 +08:00
|
|
|
perform_access(vcpu, ¶ms, desc);
|
2024-02-14 21:18:13 +08:00
|
|
|
|
2024-02-14 21:18:16 +08:00
|
|
|
/* Read from system register? */
|
|
|
|
if (!params.is_write &&
|
|
|
|
(params.Op0 == 2 || params.Op0 == 3))
|
|
|
|
vcpu_set_reg(vcpu, Rt, params.regval);
|
2015-12-04 20:03:13 +08:00
|
|
|
|
2024-02-14 21:18:16 +08:00
|
|
|
return 1;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* Userspace API
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
static bool index_to_params(u64 id, struct sys_reg_params *params)
|
|
|
|
{
|
|
|
|
switch (id & KVM_REG_SIZE_MASK) {
|
|
|
|
case KVM_REG_SIZE_U64:
|
|
|
|
/* Any unused index bits means it's not valid. */
|
|
|
|
if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
|
|
|
|
| KVM_REG_ARM_COPROC_MASK
|
|
|
|
| KVM_REG_ARM64_SYSREG_OP0_MASK
|
|
|
|
| KVM_REG_ARM64_SYSREG_OP1_MASK
|
|
|
|
| KVM_REG_ARM64_SYSREG_CRN_MASK
|
|
|
|
| KVM_REG_ARM64_SYSREG_CRM_MASK
|
|
|
|
| KVM_REG_ARM64_SYSREG_OP2_MASK))
|
|
|
|
return false;
|
|
|
|
params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
|
|
|
|
>> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
|
|
|
|
params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
|
|
|
|
>> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
|
|
|
|
params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
|
|
|
|
>> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
|
|
|
|
params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
|
|
|
|
>> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
|
|
|
|
params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
|
|
|
|
>> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-03 21:08:46 +08:00
|
|
|
const struct sys_reg_desc *get_reg_by_id(u64 id,
|
|
|
|
const struct sys_reg_desc table[],
|
|
|
|
unsigned int num)
|
2017-01-26 22:20:48 +08:00
|
|
|
{
|
2022-07-03 21:08:46 +08:00
|
|
|
struct sys_reg_params params;
|
|
|
|
|
|
|
|
if (!index_to_params(id, ¶ms))
|
2017-01-26 22:20:48 +08:00
|
|
|
return NULL;
|
|
|
|
|
2022-07-03 21:08:46 +08:00
|
|
|
return find_reg(¶ms, table, num);
|
2017-01-26 22:20:48 +08:00
|
|
|
}
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/* Decode an index value, and find the sys_reg_desc entry. */
|
2022-07-03 23:06:51 +08:00
|
|
|
static const struct sys_reg_desc *
|
|
|
|
id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
|
|
|
|
const struct sys_reg_desc table[], unsigned int num)
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2020-06-22 19:33:16 +08:00
|
|
|
const struct sys_reg_desc *r;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
/* We only do sys_reg for now. */
|
|
|
|
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
|
|
|
|
return NULL;
|
|
|
|
|
2022-07-03 23:06:51 +08:00
|
|
|
r = get_reg_by_id(id, table, num);
|
2012-12-11 00:15:34 +08:00
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
/* Not saved in the sys_reg array and not otherwise accessible? */
|
2022-07-03 23:06:51 +08:00
|
|
|
if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
|
2012-12-11 00:15:34 +08:00
|
|
|
r = NULL;
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are the invariant sys_reg registers: we let the guest see the
|
|
|
|
* host versions of these, so they're part of the guest state.
|
|
|
|
*
|
|
|
|
* A future CPU may provide a mechanism to present different values to
|
|
|
|
* the guest, or a future kvm may trap them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define FUNCTION_INVARIANT(reg) \
|
2024-06-20 01:40:35 +08:00
|
|
|
static u64 reset_##reg(struct kvm_vcpu *v, \
|
|
|
|
const struct sys_reg_desc *r) \
|
2012-12-11 00:15:34 +08:00
|
|
|
{ \
|
2016-09-08 20:55:37 +08:00
|
|
|
((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
|
2023-06-10 03:00:48 +08:00
|
|
|
return ((struct sys_reg_desc *)r)->val; \
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
FUNCTION_INVARIANT(midr_el1)
|
|
|
|
FUNCTION_INVARIANT(revidr_el1)
|
|
|
|
FUNCTION_INVARIANT(aidr_el1)
|
|
|
|
|
|
|
|
/* ->val is filled in by kvm_sys_reg_table_init() */
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 07:09:18 +08:00
|
|
|
static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
|
2024-06-20 01:40:35 +08:00
|
|
|
{ SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 },
|
|
|
|
{ SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
|
|
|
|
{ SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
|
2012-12-11 00:15:34 +08:00
|
|
|
};
|
|
|
|
|
2022-07-05 00:55:43 +08:00
|
|
|
static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
|
|
|
const struct sys_reg_desc *r;
|
|
|
|
|
2022-07-03 21:08:46 +08:00
|
|
|
r = get_reg_by_id(id, invariant_sys_regs,
|
|
|
|
ARRAY_SIZE(invariant_sys_regs));
|
2012-12-11 00:15:34 +08:00
|
|
|
if (!r)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2022-07-05 00:55:43 +08:00
|
|
|
return put_user(r->val, uaddr);
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
2022-07-05 00:55:43 +08:00
|
|
|
static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
|
|
|
const struct sys_reg_desc *r;
|
2022-07-05 00:55:43 +08:00
|
|
|
u64 val;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2022-07-03 21:08:46 +08:00
|
|
|
r = get_reg_by_id(id, invariant_sys_regs,
|
|
|
|
ARRAY_SIZE(invariant_sys_regs));
|
2012-12-11 00:15:34 +08:00
|
|
|
if (!r)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2022-07-05 00:55:43 +08:00
|
|
|
if (get_user(val, uaddr))
|
|
|
|
return -EFAULT;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
/* This is what we mean by invariant: you can't change it. */
|
|
|
|
if (r->val != val)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
u32 __user *uval = uaddr;
|
|
|
|
|
|
|
|
/* Fail if we have unknown bits set. */
|
|
|
|
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
|
|
|
|
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
|
|
|
|
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
|
|
|
|
if (KVM_REG_SIZE(id) != 4)
|
|
|
|
return -ENOENT;
|
|
|
|
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
|
|
|
|
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
|
2023-01-12 10:38:52 +08:00
|
|
|
if (val >= CSSELR_MAX)
|
2012-12-11 00:15:34 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
return put_user(get_ccsidr(vcpu, val), uval);
|
2012-12-11 00:15:34 +08:00
|
|
|
default:
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
|
|
|
u32 val, newval;
|
|
|
|
u32 __user *uval = uaddr;
|
|
|
|
|
|
|
|
/* Fail if we have unknown bits set. */
|
|
|
|
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
|
|
|
|
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
|
|
|
|
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
|
|
|
|
if (KVM_REG_SIZE(id) != 4)
|
|
|
|
return -ENOENT;
|
|
|
|
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
|
|
|
|
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
|
2023-01-12 10:38:52 +08:00
|
|
|
if (val >= CSSELR_MAX)
|
2012-12-11 00:15:34 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (get_user(newval, uval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2023-01-12 10:38:52 +08:00
|
|
|
return set_ccsidr(vcpu, val, newval);
|
2012-12-11 00:15:34 +08:00
|
|
|
default:
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-03 23:06:51 +08:00
|
|
|
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
|
|
|
const struct sys_reg_desc table[], unsigned int num)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
2012-12-11 00:15:34 +08:00
|
|
|
const struct sys_reg_desc *r;
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val;
|
|
|
|
int ret;
|
2022-07-03 23:06:51 +08:00
|
|
|
|
|
|
|
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
2023-02-10 01:58:17 +08:00
|
|
|
if (!r || sysreg_hidden_user(vcpu, r))
|
2022-07-03 23:06:51 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2022-07-05 00:27:00 +08:00
|
|
|
if (r->get_user) {
|
|
|
|
ret = (r->get_user)(vcpu, r, &val);
|
|
|
|
} else {
|
|
|
|
val = __vcpu_sys_reg(vcpu, r->reg);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = put_user(val, uaddr);
|
2022-07-03 23:06:51 +08:00
|
|
|
|
2022-07-05 00:27:00 +08:00
|
|
|
return ret;
|
2022-07-03 23:06:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|
|
|
{
|
2012-12-11 00:15:34 +08:00
|
|
|
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
|
2022-07-03 22:11:50 +08:00
|
|
|
int err;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
|
2023-01-12 10:38:52 +08:00
|
|
|
return demux_c15_get(vcpu, reg->id, uaddr);
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2022-07-03 22:11:50 +08:00
|
|
|
err = get_invariant_sys_reg(reg->id, uaddr);
|
|
|
|
if (err != -ENOENT)
|
|
|
|
return err;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2022-07-03 23:06:51 +08:00
|
|
|
return kvm_sys_reg_get_user(vcpu, reg,
|
|
|
|
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
|
|
|
}
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2022-07-03 23:06:51 +08:00
|
|
|
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
|
|
|
const struct sys_reg_desc table[], unsigned int num)
|
|
|
|
{
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
2022-07-03 23:06:51 +08:00
|
|
|
const struct sys_reg_desc *r;
|
2022-07-05 00:27:00 +08:00
|
|
|
u64 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (get_user(val, uaddr))
|
|
|
|
return -EFAULT;
|
2022-07-03 23:06:51 +08:00
|
|
|
|
|
|
|
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
2023-02-10 01:58:17 +08:00
|
|
|
if (!r || sysreg_hidden_user(vcpu, r))
|
2018-09-28 21:39:15 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2022-09-13 17:44:38 +08:00
|
|
|
if (sysreg_user_write_ignore(vcpu, r))
|
|
|
|
return 0;
|
|
|
|
|
2022-07-05 00:27:00 +08:00
|
|
|
if (r->set_user) {
|
|
|
|
ret = (r->set_user)(vcpu, r, val);
|
|
|
|
} else {
|
|
|
|
__vcpu_sys_reg(vcpu, r->reg) = val;
|
|
|
|
ret = 0;
|
|
|
|
}
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2022-07-05 00:27:00 +08:00
|
|
|
return ret;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
|
2022-07-03 22:11:50 +08:00
|
|
|
int err;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
|
2023-01-12 10:38:52 +08:00
|
|
|
return demux_c15_set(vcpu, reg->id, uaddr);
|
2012-12-11 00:15:34 +08:00
|
|
|
|
2022-07-03 22:11:50 +08:00
|
|
|
err = set_invariant_sys_reg(reg->id, uaddr);
|
|
|
|
if (err != -ENOENT)
|
|
|
|
return err;
|
2015-07-08 00:30:00 +08:00
|
|
|
|
2022-07-03 23:06:51 +08:00
|
|
|
return kvm_sys_reg_set_user(vcpu, reg,
|
|
|
|
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int num_demux_regs(void)
|
|
|
|
{
|
2023-01-12 10:38:52 +08:00
|
|
|
return CSSELR_MAX;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int write_demux_regids(u64 __user *uindices)
|
|
|
|
{
|
2014-07-01 23:53:13 +08:00
|
|
|
u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
|
2012-12-11 00:15:34 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
|
|
|
|
for (i = 0; i < CSSELR_MAX; i++) {
|
|
|
|
if (put_user(val | i, uindices))
|
|
|
|
return -EFAULT;
|
|
|
|
uindices++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
|
|
|
|
{
|
|
|
|
return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
|
|
|
|
KVM_REG_ARM64_SYSREG |
|
|
|
|
(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
|
|
|
|
(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
|
|
|
|
(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
|
|
|
|
(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
|
|
|
|
(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
|
|
|
|
{
|
|
|
|
if (!*uind)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (put_user(sys_reg_to_index(reg), *uind))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
(*uind)++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-09-28 21:39:15 +08:00
|
|
|
static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
|
|
|
|
const struct sys_reg_desc *rd,
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
u64 __user **uind,
|
|
|
|
unsigned int *total)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ignore registers we trap but don't save,
|
|
|
|
* and for which no custom user accessor is provided.
|
|
|
|
*/
|
|
|
|
if (!(rd->reg || rd->get_user))
|
|
|
|
return 0;
|
|
|
|
|
2023-02-10 01:58:17 +08:00
|
|
|
if (sysreg_hidden_user(vcpu, rd))
|
2018-09-28 21:39:15 +08:00
|
|
|
return 0;
|
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
if (!copy_reg_to_user(rd, uind))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
(*total)++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-11 00:15:34 +08:00
|
|
|
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
|
|
|
|
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
|
|
|
|
{
|
2020-06-22 19:33:16 +08:00
|
|
|
const struct sys_reg_desc *i2, *end2;
|
2012-12-11 00:15:34 +08:00
|
|
|
unsigned int total = 0;
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
int err;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
i2 = sys_reg_descs;
|
|
|
|
end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
|
|
|
|
|
2020-06-22 19:33:16 +08:00
|
|
|
while (i2 != end2) {
|
|
|
|
err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:50:56 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return ARRAY_SIZE(invariant_sys_regs)
|
|
|
|
+ num_demux_regs()
|
|
|
|
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Then give them all the invariant registers' indices. */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
|
|
|
|
if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
|
|
|
|
return -EFAULT;
|
|
|
|
uindices++;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = walk_sys_regs(vcpu, uindices);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
uindices += err;
|
|
|
|
|
|
|
|
return write_demux_regids(uindices);
|
|
|
|
}
|
|
|
|
|
KVM: arm64: Allow userspace to get the writable masks for feature ID registers
While the Feature ID range is well defined and pretty large, it isn't
inconceivable that the architecture will eventually grow some other
ranges that will need to similarly be described to userspace.
Add a VM ioctl to allow userspace to get writable masks for feature ID
registers in below system register space:
op0 = 3, op1 = {0, 1, 3}, CRn = 0, CRm = {0 - 7}, op2 = {0 - 7}
This is used to support mix-and-match userspace and kernels for writable
ID registers, where userspace may want to know upfront whether it can
actually tweak the contents of an idreg or not.
Add a new capability (KVM_CAP_ARM_SUPPORTED_FEATURE_ID_RANGES) that
returns a bitmap of the valid ranges, which can subsequently be
retrieved, one at a time by setting the index of the set bit as the
range identifier.
Suggested-by: Marc Zyngier <maz@kernel.org>
Suggested-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Jing Zhang <jingzhangos@google.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231003230408.3405722-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-04 07:03:57 +08:00
|
|
|
#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
|
|
|
|
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
|
|
|
|
sys_reg_Op1(r), \
|
|
|
|
sys_reg_CRn(r), \
|
|
|
|
sys_reg_CRm(r), \
|
|
|
|
sys_reg_Op2(r))
|
|
|
|
|
|
|
|
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
|
|
|
|
{
|
|
|
|
const void *zero_page = page_to_virt(ZERO_PAGE(0));
|
|
|
|
u64 __user *masks = (u64 __user *)range->addr;
|
|
|
|
|
|
|
|
/* Only feature id range is supported, reserved[13] must be zero. */
|
|
|
|
if (range->range ||
|
|
|
|
memcmp(range->reserved, zero_page, sizeof(range->reserved)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Wipe the whole thing first */
|
|
|
|
if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
|
|
|
|
const struct sys_reg_desc *reg = &sys_reg_descs[i];
|
|
|
|
u32 encoding = reg_to_encoding(reg);
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
if (!is_feature_id_reg(encoding) || !reg->set_user)
|
|
|
|
continue;
|
|
|
|
|
2024-06-20 01:40:34 +08:00
|
|
|
if (!reg->val ||
|
|
|
|
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
|
|
|
|
continue;
|
KVM: arm64: Allow userspace to get the writable masks for feature ID registers
While the Feature ID range is well defined and pretty large, it isn't
inconceivable that the architecture will eventually grow some other
ranges that will need to similarly be described to userspace.
Add a VM ioctl to allow userspace to get writable masks for feature ID
registers in below system register space:
op0 = 3, op1 = {0, 1, 3}, CRn = 0, CRm = {0 - 7}, op2 = {0 - 7}
This is used to support mix-and-match userspace and kernels for writable
ID registers, where userspace may want to know upfront whether it can
actually tweak the contents of an idreg or not.
Add a new capability (KVM_CAP_ARM_SUPPORTED_FEATURE_ID_RANGES) that
returns a bitmap of the valid ranges, which can subsequently be
retrieved, one at a time by setting the index of the set bit as the
range identifier.
Suggested-by: Marc Zyngier <maz@kernel.org>
Suggested-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Jing Zhang <jingzhangos@google.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231003230408.3405722-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-04 07:03:57 +08:00
|
|
|
}
|
2024-06-20 01:40:34 +08:00
|
|
|
val = reg->val;
|
KVM: arm64: Allow userspace to get the writable masks for feature ID registers
While the Feature ID range is well defined and pretty large, it isn't
inconceivable that the architecture will eventually grow some other
ranges that will need to similarly be described to userspace.
Add a VM ioctl to allow userspace to get writable masks for feature ID
registers in below system register space:
op0 = 3, op1 = {0, 1, 3}, CRn = 0, CRm = {0 - 7}, op2 = {0 - 7}
This is used to support mix-and-match userspace and kernels for writable
ID registers, where userspace may want to know upfront whether it can
actually tweak the contents of an idreg or not.
Add a new capability (KVM_CAP_ARM_SUPPORTED_FEATURE_ID_RANGES) that
returns a bitmap of the valid ranges, which can subsequently be
retrieved, one at a time by setting the index of the set bit as the
range identifier.
Suggested-by: Marc Zyngier <maz@kernel.org>
Suggested-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Jing Zhang <jingzhangos@google.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231003230408.3405722-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-10-04 07:03:57 +08:00
|
|
|
|
|
|
|
if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-20 01:40:32 +08:00
|
|
|
static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
|
2024-02-14 21:18:20 +08:00
|
|
|
{
|
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
|
|
2024-06-20 01:40:32 +08:00
|
|
|
if (has_vhe() || has_hvhe())
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_E2H;
|
|
|
|
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
|
|
|
|
/* route synchronous external abort exceptions to EL2 */
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_TEA;
|
|
|
|
/* trap error record accesses */
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_TERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_FWB;
|
|
|
|
|
|
|
|
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
|
2024-06-20 01:40:33 +08:00
|
|
|
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
|
|
|
|
kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
|
2024-06-20 01:40:32 +08:00
|
|
|
vcpu->arch.hcr_el2 |= HCR_TID4;
|
|
|
|
else
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_TID2;
|
|
|
|
|
|
|
|
if (vcpu_el1_is_32bit(vcpu))
|
|
|
|
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
|
|
|
|
|
|
|
if (kvm_has_mte(vcpu->kvm))
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_ATA;
|
2024-02-14 21:18:20 +08:00
|
|
|
|
2024-02-14 21:18:22 +08:00
|
|
|
/*
|
|
|
|
* In the absence of FGT, we cannot independently trap TLBI
|
|
|
|
* Range instructions. This isn't great, but trapping all
|
|
|
|
* TLBIs would be far worse. Live with it...
|
|
|
|
*/
|
|
|
|
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
|
|
|
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
|
2024-06-20 01:40:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_calculate_traps(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->arch.config_lock);
|
|
|
|
vcpu_set_hcr(vcpu);
|
2024-08-27 23:25:07 +08:00
|
|
|
vcpu_set_ich_hcr(vcpu);
|
2024-02-14 21:18:22 +08:00
|
|
|
|
2024-02-14 21:18:25 +08:00
|
|
|
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
|
2024-06-25 21:00:38 +08:00
|
|
|
/*
|
|
|
|
* In general, all HCRX_EL2 bits are gated by a feature.
|
|
|
|
* The only reason we can set SMPME without checking any
|
|
|
|
* feature is that its effects are not directly observable
|
|
|
|
* from the guest.
|
|
|
|
*/
|
|
|
|
vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
|
2024-02-14 21:18:25 +08:00
|
|
|
|
|
|
|
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
|
|
|
|
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
|
2024-06-25 21:00:37 +08:00
|
|
|
|
|
|
|
if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
|
|
|
|
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
|
2024-02-14 21:18:25 +08:00
|
|
|
}
|
|
|
|
|
2024-02-14 21:18:20 +08:00
|
|
|
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
|
|
|
|
HFGxTR_EL2_nMAIR2_EL1 |
|
|
|
|
HFGxTR_EL2_nS2POR_EL1 |
|
|
|
|
HFGxTR_EL2_nPOR_EL1 |
|
|
|
|
HFGxTR_EL2_nPOR_EL0 |
|
|
|
|
HFGxTR_EL2_nACCDATA_EL1 |
|
|
|
|
HFGxTR_EL2_nSMPRI_EL1_MASK |
|
|
|
|
HFGxTR_EL2_nTPIDR2_EL0_MASK);
|
|
|
|
|
2024-02-14 21:18:22 +08:00
|
|
|
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
|
|
|
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
|
|
|
|
HFGITR_EL2_TLBIRVALE1OS |
|
|
|
|
HFGITR_EL2_TLBIRVAAE1OS |
|
|
|
|
HFGITR_EL2_TLBIRVAE1OS |
|
|
|
|
HFGITR_EL2_TLBIVAALE1OS |
|
|
|
|
HFGITR_EL2_TLBIVALE1OS |
|
|
|
|
HFGITR_EL2_TLBIVAAE1OS |
|
|
|
|
HFGITR_EL2_TLBIASIDE1OS |
|
|
|
|
HFGITR_EL2_TLBIVAE1OS |
|
|
|
|
HFGITR_EL2_TLBIVMALLE1OS);
|
|
|
|
|
|
|
|
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
|
|
|
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
|
|
|
|
HFGITR_EL2_TLBIRVALE1 |
|
|
|
|
HFGITR_EL2_TLBIRVAAE1 |
|
|
|
|
HFGITR_EL2_TLBIRVAE1 |
|
|
|
|
HFGITR_EL2_TLBIRVAALE1IS|
|
|
|
|
HFGITR_EL2_TLBIRVALE1IS |
|
|
|
|
HFGITR_EL2_TLBIRVAAE1IS |
|
|
|
|
HFGITR_EL2_TLBIRVAE1IS |
|
|
|
|
HFGITR_EL2_TLBIRVAALE1OS|
|
|
|
|
HFGITR_EL2_TLBIRVALE1OS |
|
|
|
|
HFGITR_EL2_TLBIRVAAE1OS |
|
|
|
|
HFGITR_EL2_TLBIRVAE1OS);
|
|
|
|
|
2024-02-14 21:18:23 +08:00
|
|
|
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
|
|
|
|
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
|
|
|
|
HFGxTR_EL2_nPIR_EL1);
|
|
|
|
|
2024-02-14 21:18:24 +08:00
|
|
|
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
|
|
|
|
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
|
|
|
|
HAFGRTR_EL2_RES1);
|
|
|
|
|
2024-02-14 21:18:20 +08:00
|
|
|
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&kvm->arch.config_lock);
|
|
|
|
}
|
|
|
|
|
2024-08-27 23:25:10 +08:00
|
|
|
/*
|
|
|
|
* Perform last adjustments to the ID registers that are implied by the
|
|
|
|
* configuration outside of the ID regs themselves, as well as any
|
|
|
|
* initialisation that directly depend on these ID registers (such as
|
|
|
|
* RES0/RES1 behaviours). This is not the place to configure traps though.
|
|
|
|
*
|
|
|
|
* Because this can be called once per CPU, changes must be idempotent.
|
|
|
|
*/
|
|
|
|
int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
|
|
|
|
|
guard(mutex)(&kvm->arch.config_lock);
|
|
|
|
|
2024-08-27 23:25:11 +08:00
|
|
|
if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
|
|
|
|
irqchip_in_kernel(kvm) &&
|
|
|
|
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
|
|
|
|
kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
|
|
|
|
kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
|
|
|
|
}
|
|
|
|
|
2024-08-27 23:25:10 +08:00
|
|
|
if (vcpu_has_nv(vcpu)) {
|
|
|
|
int ret = kvm_init_nv_sysregs(kvm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 07:09:18 +08:00
|
|
|
int __init kvm_sys_reg_table_init(void)
|
2012-12-11 00:15:34 +08:00
|
|
|
{
|
2022-04-28 18:34:04 +08:00
|
|
|
bool valid = true;
|
2012-12-11 00:15:34 +08:00
|
|
|
unsigned int i;
|
2024-02-14 21:18:15 +08:00
|
|
|
int ret = 0;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
/* Make sure tables are unique and in order. */
|
2022-04-28 18:34:04 +08:00
|
|
|
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
|
|
|
|
valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
|
|
|
|
valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
|
|
|
|
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
|
|
|
|
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
|
|
|
|
valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
|
2024-02-14 21:18:13 +08:00
|
|
|
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
|
2022-04-28 18:34:04 +08:00
|
|
|
|
|
|
|
if (!valid)
|
|
|
|
return -EINVAL;
|
2012-12-11 00:15:34 +08:00
|
|
|
|
|
|
|
/* We abuse the reset function to overwrite the table itself. */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
|
|
|
|
invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
|
|
|
|
|
2024-02-14 21:18:15 +08:00
|
|
|
ret = populate_nv_trap_config();
|
|
|
|
|
|
|
|
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
|
|
|
|
ret = populate_sysreg_config(sys_reg_descs + i, i);
|
|
|
|
|
|
|
|
for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
|
|
|
|
ret = populate_sysreg_config(sys_insn_descs + i, i);
|
|
|
|
|
|
|
|
return ret;
|
2012-12-11 00:15:34 +08:00
|
|
|
}
|