mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
Merge branch kvm-arm64/burn-the-flags into kvmarm-master/next
* kvm-arm64/burn-the-flags: : . : Rework the per-vcpu flags to make them more manageable, : splitting them in different sets that have specific : uses: : : - configuration flags : - input to the world-switch : - state bookkeeping for the kernel itself : : The FP tracking is also simplified and tracked outside : of the flags as a separate state. : . KVM: arm64: Move the handling of !FP outside of the fast path KVM: arm64: Document why pause cannot be turned into a flag KVM: arm64: Reduce the size of the vcpu flag members KVM: arm64: Add build-time sanity checks for flags KVM: arm64: Warn when PENDING_EXCEPTION and INCREMENT_PC are set together KVM: arm64: Convert vcpu sysregs_loaded_on_cpu to a state flag KVM: arm64: Kill unused vcpu flags field KVM: arm64: Move vcpu WFIT flag to the state flag set KVM: arm64: Move vcpu ON_UNSUPPORTED_CPU flag to the state flag set KVM: arm64: Move vcpu SVE/SME flags to the state flag set KVM: arm64: Move vcpu debug/SPE/TRBE flags to the input flag set KVM: arm64: Move vcpu PC/Exception flags to the input flag set KVM: arm64: Move vcpu configuration flags into their own set KVM: arm64: Add three sets of flags to the vcpu state KVM: arm64: Add helpers to manipulate vcpu flags among a set KVM: arm64: Move FP state ownership from flag to a tristate KVM: arm64: Drop FP_FOREIGN_STATE from the hypervisor code Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
dc94f89ae6
@ -473,9 +473,18 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
||||
|
||||
static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
|
||||
WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
|
||||
vcpu_set_flag(vcpu, INCREMENT_PC);
|
||||
}
|
||||
|
||||
#define kvm_pend_exception(v, e) \
|
||||
do { \
|
||||
WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
|
||||
vcpu_set_flag((v), PENDING_EXCEPTION); \
|
||||
vcpu_set_flag((v), e); \
|
||||
} while (0)
|
||||
|
||||
|
||||
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
|
||||
{
|
||||
return test_bit(feature, vcpu->arch.features);
|
||||
|
@ -325,8 +325,30 @@ struct kvm_vcpu_arch {
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
/* Miscellaneous vcpu state flags */
|
||||
u64 flags;
|
||||
/* Ownership of the FP regs */
|
||||
enum {
|
||||
FP_STATE_FREE,
|
||||
FP_STATE_HOST_OWNED,
|
||||
FP_STATE_GUEST_OWNED,
|
||||
} fp_state;
|
||||
|
||||
/* Configuration flags, set once and for all before the vcpu can run */
|
||||
u8 cflags;
|
||||
|
||||
/* Input flags to the hypervisor code, potentially cleared after use */
|
||||
u8 iflags;
|
||||
|
||||
/* State flags for kernel bookkeeping, unused by the hypervisor code */
|
||||
u8 sflags;
|
||||
|
||||
/*
|
||||
* Don't run the guest (internal implementation need).
|
||||
*
|
||||
* Contrary to the flags above, this is set/cleared outside of
|
||||
* a vcpu context, and thus cannot be mixed with the flags
|
||||
* themselves (or the flag accesses need to be made atomic).
|
||||
*/
|
||||
bool pause;
|
||||
|
||||
/*
|
||||
* We maintain more than a single set of debug registers to support
|
||||
@ -376,9 +398,6 @@ struct kvm_vcpu_arch {
|
||||
/* vcpu power state */
|
||||
struct kvm_mp_state mp_state;
|
||||
|
||||
/* Don't run the guest (internal implementation need) */
|
||||
bool pause;
|
||||
|
||||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
@ -392,10 +411,6 @@ struct kvm_vcpu_arch {
|
||||
/* Additional reset state */
|
||||
struct vcpu_reset_state reset_state;
|
||||
|
||||
/* True when deferrable sysregs are loaded on the physical CPU,
|
||||
* see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
|
||||
bool sysregs_loaded_on_cpu;
|
||||
|
||||
/* Guest PV state */
|
||||
struct {
|
||||
u64 last_steal;
|
||||
@ -403,6 +418,124 @@ struct kvm_vcpu_arch {
|
||||
} steal;
|
||||
};
|
||||
|
||||
/*
|
||||
* Each 'flag' is composed of a comma-separated triplet:
|
||||
*
|
||||
* - the flag-set it belongs to in the vcpu->arch structure
|
||||
* - the value for that flag
|
||||
* - the mask for that flag
|
||||
*
|
||||
* __vcpu_single_flag() builds such a triplet for a single-bit flag.
|
||||
* unpack_vcpu_flag() extract the flag value from the triplet for
|
||||
* direct use outside of the flag accessors.
|
||||
*/
|
||||
#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
|
||||
|
||||
#define __unpack_flag(_set, _f, _m) _f
|
||||
#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
|
||||
|
||||
#define __build_check_flag(v, flagset, f, m) \
|
||||
do { \
|
||||
typeof(v->arch.flagset) *_fset; \
|
||||
\
|
||||
/* Check that the flags fit in the mask */ \
|
||||
BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
|
||||
/* Check that the flags fit in the type */ \
|
||||
BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_get_flag(v, flagset, f, m) \
|
||||
({ \
|
||||
__build_check_flag(v, flagset, f, m); \
|
||||
\
|
||||
v->arch.flagset & (m); \
|
||||
})
|
||||
|
||||
#define __vcpu_set_flag(v, flagset, f, m) \
|
||||
do { \
|
||||
typeof(v->arch.flagset) *fset; \
|
||||
\
|
||||
__build_check_flag(v, flagset, f, m); \
|
||||
\
|
||||
fset = &v->arch.flagset; \
|
||||
if (HWEIGHT(m) > 1) \
|
||||
*fset &= ~(m); \
|
||||
*fset |= (f); \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_clear_flag(v, flagset, f, m) \
|
||||
do { \
|
||||
typeof(v->arch.flagset) *fset; \
|
||||
\
|
||||
__build_check_flag(v, flagset, f, m); \
|
||||
\
|
||||
fset = &v->arch.flagset; \
|
||||
*fset &= ~(m); \
|
||||
} while (0)
|
||||
|
||||
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
|
||||
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
|
||||
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
|
||||
|
||||
/* SVE exposed to guest */
|
||||
#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
|
||||
/* SVE config completed */
|
||||
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
|
||||
/* PTRAUTH exposed to guest */
|
||||
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
|
||||
|
||||
/* Exception pending */
|
||||
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
|
||||
/*
|
||||
* PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
|
||||
* be set together with an exception...
|
||||
*/
|
||||
#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
|
||||
/* Target EL/MODE (not a single flag, but let's abuse the macro) */
|
||||
#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
|
||||
|
||||
/* Helpers to encode exceptions with minimum fuss */
|
||||
#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
|
||||
#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
|
||||
#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
|
||||
|
||||
/*
|
||||
* When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
|
||||
* values:
|
||||
*
|
||||
* For AArch32 EL1:
|
||||
*/
|
||||
#define EXCEPT_AA32_UND __vcpu_except_flags(0)
|
||||
#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
|
||||
#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
|
||||
/* For AArch64: */
|
||||
#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
|
||||
#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
|
||||
#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
|
||||
#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
|
||||
/* For AArch64 with NV (one day): */
|
||||
#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
|
||||
#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
|
||||
#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
|
||||
#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
|
||||
/* Guest debug is live */
|
||||
#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
|
||||
/* Save SPE context if active */
|
||||
#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
|
||||
/* Save TRBE context if active */
|
||||
#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
|
||||
|
||||
/* SVE enabled for host EL0 */
|
||||
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
|
||||
/* SME enabled for EL0 */
|
||||
#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
|
||||
/* Physical CPU not in supported_cpus */
|
||||
#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
|
||||
/* WFIT instruction trapped */
|
||||
#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
|
||||
/* vcpu system registers loaded on physical CPU */
|
||||
#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
|
||||
|
||||
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
|
||||
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
|
||||
sve_ffr_offset((vcpu)->arch.sve_max_vl))
|
||||
@ -423,70 +556,31 @@ struct kvm_vcpu_arch {
|
||||
__size_ret; \
|
||||
})
|
||||
|
||||
/* vcpu_arch flags field values: */
|
||||
#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
|
||||
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
|
||||
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
|
||||
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
|
||||
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
|
||||
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
|
||||
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
|
||||
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
|
||||
/*
|
||||
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
|
||||
* set together with an exception...
|
||||
*/
|
||||
#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
|
||||
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
|
||||
/*
|
||||
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
|
||||
* take the following values:
|
||||
*
|
||||
* For AArch32 EL1:
|
||||
*/
|
||||
#define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
|
||||
#define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
|
||||
#define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
|
||||
/* For AArch64: */
|
||||
#define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
|
||||
#define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
|
||||
#define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
|
||||
#define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
|
||||
#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
|
||||
#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
|
||||
|
||||
#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
|
||||
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
|
||||
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
|
||||
#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
|
||||
#define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
|
||||
#define KVM_ARM64_WFIT (1 << 17) /* WFIT instruction trapped */
|
||||
|
||||
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
|
||||
KVM_GUESTDBG_USE_SW_BP | \
|
||||
KVM_GUESTDBG_USE_HW | \
|
||||
KVM_GUESTDBG_SINGLESTEP)
|
||||
|
||||
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
|
||||
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
|
||||
vcpu_get_flag(vcpu, GUEST_HAS_SVE))
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
#define vcpu_has_ptrauth(vcpu) \
|
||||
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
|
||||
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
|
||||
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
|
||||
vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
|
||||
#else
|
||||
#define vcpu_has_ptrauth(vcpu) false
|
||||
#endif
|
||||
|
||||
#define vcpu_on_unsupported_cpu(vcpu) \
|
||||
((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU)
|
||||
vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
|
||||
|
||||
#define vcpu_set_on_unsupported_cpu(vcpu) \
|
||||
((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU)
|
||||
vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
|
||||
|
||||
#define vcpu_clear_on_unsupported_cpu(vcpu) \
|
||||
((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU)
|
||||
vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
|
||||
|
||||
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
|
||||
|
||||
@ -831,8 +925,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
|
||||
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define kvm_arm_vcpu_sve_finalized(vcpu) \
|
||||
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
|
||||
#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
|
||||
|
||||
#define kvm_has_mte(kvm) \
|
||||
(system_supports_mte() && \
|
||||
|
@ -242,7 +242,7 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
|
||||
static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
|
||||
(vcpu->arch.flags & KVM_ARM64_WFIT));
|
||||
vcpu_get_flag(vcpu, IN_WFIT));
|
||||
}
|
||||
|
||||
static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
|
||||
|
@ -330,6 +330,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
|
||||
|
||||
/*
|
||||
* Default value for the FP state, will be overloaded at load
|
||||
* time if we support FP (pretty likely)
|
||||
*/
|
||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
||||
|
||||
/* Set up the timer */
|
||||
kvm_timer_vcpu_init(vcpu);
|
||||
|
||||
@ -659,7 +665,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||
preempt_enable();
|
||||
|
||||
kvm_vcpu_halt(vcpu);
|
||||
vcpu->arch.flags &= ~KVM_ARM64_WFIT;
|
||||
vcpu_clear_flag(vcpu, IN_WFIT);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
|
||||
preempt_disable();
|
||||
@ -1015,8 +1021,8 @@ out:
|
||||
* the vcpu state. Note that this relies on __kvm_adjust_pc()
|
||||
* being preempt-safe on VHE.
|
||||
*/
|
||||
if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
|
||||
KVM_ARM64_INCREMENT_PC)))
|
||||
if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
|
||||
vcpu_get_flag(vcpu, INCREMENT_PC)))
|
||||
kvm_call_hyp(__kvm_adjust_pc, vcpu);
|
||||
|
||||
vcpu_put(vcpu);
|
||||
|
@ -104,11 +104,11 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
|
||||
* Trap debug register access when one of the following is true:
|
||||
* - Userspace is using the hardware to debug the guest
|
||||
* (KVM_GUESTDBG_USE_HW is set).
|
||||
* - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
|
||||
* - The guest is not using debug (DEBUG_DIRTY clear).
|
||||
* - The guest has enabled the OS Lock (debug exceptions are blocked).
|
||||
*/
|
||||
if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
|
||||
!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) ||
|
||||
!vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
|
||||
kvm_vcpu_os_lock_enabled(vcpu))
|
||||
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
|
||||
|
||||
@ -147,8 +147,8 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
|
||||
* debug related registers.
|
||||
*
|
||||
* Additionally, KVM only traps guest accesses to the debug registers if
|
||||
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
|
||||
* flag on vcpu->arch.flags). Since the guest must not interfere
|
||||
* the guest is not actively using them (see the DEBUG_DIRTY
|
||||
* flag on vcpu->arch.iflags). Since the guest must not interfere
|
||||
* with the hardware state when debugging the guest, we must ensure that
|
||||
* trapping is enabled whenever we are debugging the guest using the
|
||||
* debug registers.
|
||||
@ -205,9 +205,8 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
|
||||
*
|
||||
* We simply switch the debug_ptr to point to our new
|
||||
* external_debug_state which has been populated by the
|
||||
* debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
|
||||
* mechanism ensures the registers are updated on the
|
||||
* world switch.
|
||||
* debug ioctl. The existing DEBUG_DIRTY mechanism ensures
|
||||
* the registers are updated on the world switch.
|
||||
*/
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
|
||||
/* Enable breakpoints/watchpoints */
|
||||
@ -216,7 +215,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
|
||||
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
|
||||
|
||||
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||
vcpu_set_flag(vcpu, DEBUG_DIRTY);
|
||||
|
||||
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
|
||||
&vcpu->arch.debug_ptr->dbg_bcr[0],
|
||||
@ -246,7 +245,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* If KDE or MDE are set, perform a full save/restore cycle. */
|
||||
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||
vcpu_set_flag(vcpu, DEBUG_DIRTY);
|
||||
|
||||
/* Write mdcr_el2 changes since vcpu_load on VHE systems */
|
||||
if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
|
||||
@ -298,16 +297,16 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
|
||||
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
|
||||
|
||||
/* Check if we have TRBE implemented and available at the host */
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
|
||||
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
|
||||
KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
|
||||
vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
|
||||
vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
|
||||
}
|
||||
|
@ -77,12 +77,14 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
BUG_ON(!current->mm);
|
||||
BUG_ON(test_thread_flag(TIF_SVE));
|
||||
|
||||
vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
|
||||
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
|
||||
if (!system_supports_fpsimd())
|
||||
return;
|
||||
|
||||
vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED;
|
||||
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
|
||||
|
||||
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
|
||||
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
|
||||
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
|
||||
|
||||
/*
|
||||
* We don't currently support SME guests but if we leave
|
||||
@ -94,29 +96,28 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
* operations. Do this for ZA as well for now for simplicity.
|
||||
*/
|
||||
if (system_supports_sme()) {
|
||||
vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED;
|
||||
vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
|
||||
vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
|
||||
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
|
||||
|
||||
if (read_sysreg_s(SYS_SVCR) &
|
||||
(SVCR_SM_MASK | SVCR_ZA_MASK)) {
|
||||
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
|
||||
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
|
||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
||||
fpsimd_save_and_flush_cpu_state();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called just before entering the guest once we are no longer
|
||||
* preemptable. Syncs the host's TIF_FOREIGN_FPSTATE with the KVM
|
||||
* mirror of the flag used by the hypervisor.
|
||||
* Called just before entering the guest once we are no longer preemptable
|
||||
* and interrupts are disabled. If we have managed to run anything using
|
||||
* FP while we were preemptible (such as off the back of an interrupt),
|
||||
* then neither the host nor the guest own the FP hardware (and it was the
|
||||
* responsibility of the code that used FP to save the existing state).
|
||||
*/
|
||||
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
|
||||
vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE;
|
||||
else
|
||||
vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE;
|
||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -130,7 +131,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
|
||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
||||
/*
|
||||
* Currently we do not support SME guests so SVCR is
|
||||
* always 0 and we just need a variable to point to.
|
||||
@ -163,7 +164,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (has_vhe() && system_supports_sme()) {
|
||||
/* Also restore EL0 state seen on entry */
|
||||
if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
|
||||
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
|
||||
sysreg_clear_set(CPACR_EL1, 0,
|
||||
CPACR_EL1_SMEN_EL0EN |
|
||||
CPACR_EL1_SMEN_EL1EN);
|
||||
@ -173,7 +174,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
CPACR_EL1_SMEN_EL1EN);
|
||||
}
|
||||
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
|
||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||
|
||||
@ -192,7 +193,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
* for EL0. To avoid spurious traps, restore the trap state
|
||||
* seen by kvm_arch_vcpu_load_fp():
|
||||
*/
|
||||
if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
|
||||
if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
|
||||
else
|
||||
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
|
||||
|
@ -120,7 +120,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
||||
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
|
||||
} else {
|
||||
if (esr & ESR_ELx_WFx_ISS_WFxT)
|
||||
vcpu->arch.flags |= KVM_ARM64_WFIT;
|
||||
vcpu_set_flag(vcpu, IN_WFIT);
|
||||
|
||||
kvm_vcpu_wfi(vcpu);
|
||||
}
|
||||
|
@ -303,14 +303,14 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_el1_is_32bit(vcpu)) {
|
||||
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
|
||||
case KVM_ARM64_EXCEPT_AA32_UND:
|
||||
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
|
||||
case unpack_vcpu_flag(EXCEPT_AA32_UND):
|
||||
enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
|
||||
break;
|
||||
case KVM_ARM64_EXCEPT_AA32_IABT:
|
||||
case unpack_vcpu_flag(EXCEPT_AA32_IABT):
|
||||
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
|
||||
break;
|
||||
case KVM_ARM64_EXCEPT_AA32_DABT:
|
||||
case unpack_vcpu_flag(EXCEPT_AA32_DABT):
|
||||
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
|
||||
break;
|
||||
default:
|
||||
@ -318,9 +318,8 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
|
||||
case (KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
|
||||
KVM_ARM64_EXCEPT_AA64_EL1):
|
||||
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
|
||||
case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
|
||||
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
|
||||
break;
|
||||
default:
|
||||
@ -340,12 +339,12 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
|
||||
if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
|
||||
kvm_inject_exception(vcpu);
|
||||
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
|
||||
KVM_ARM64_EXCEPT_MASK);
|
||||
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
|
||||
vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
|
||||
vcpu_clear_flag(vcpu, EXCEPT_MASK);
|
||||
} else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
|
||||
kvm_skip_instr(vcpu);
|
||||
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
|
||||
vcpu_clear_flag(vcpu, INCREMENT_PC);
|
||||
}
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
|
||||
struct kvm_guest_debug_arch *host_dbg;
|
||||
struct kvm_guest_debug_arch *guest_dbg;
|
||||
|
||||
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
||||
if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
|
||||
return;
|
||||
|
||||
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
@ -151,7 +151,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
|
||||
struct kvm_guest_debug_arch *host_dbg;
|
||||
struct kvm_guest_debug_arch *guest_dbg;
|
||||
|
||||
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
||||
if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
|
||||
return;
|
||||
|
||||
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
@ -162,7 +162,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
|
||||
__debug_save_state(guest_dbg, guest_ctxt);
|
||||
__debug_restore_state(host_dbg, host_ctxt);
|
||||
|
||||
vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
|
||||
vcpu_clear_flag(vcpu, DEBUG_DIRTY);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */
|
||||
|
@ -37,22 +37,10 @@ struct kvm_exception_table_entry {
|
||||
extern struct kvm_exception_table_entry __start___kvm_ex_table;
|
||||
extern struct kvm_exception_table_entry __stop___kvm_ex_table;
|
||||
|
||||
/* Check whether the FP regs were dirtied while in the host-side run loop: */
|
||||
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
|
||||
/* Check whether the FP regs are owned by the guest */
|
||||
static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* When the system doesn't support FP/SIMD, we cannot rely on
|
||||
* the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
|
||||
* abort on the very first access to FP and thus we should never
|
||||
* see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
|
||||
* trap the accesses.
|
||||
*/
|
||||
if (!system_supports_fpsimd() ||
|
||||
vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
|
||||
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
|
||||
KVM_ARM64_FP_HOST);
|
||||
|
||||
return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
|
||||
return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
|
||||
}
|
||||
|
||||
/* Save the 32-bit only FPSIMD system register state */
|
||||
@ -191,10 +179,8 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
isb();
|
||||
|
||||
/* Write out the host state if it's in the registers */
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
|
||||
if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
|
||||
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
|
||||
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
|
||||
}
|
||||
|
||||
/* Restore the guest state */
|
||||
if (sve_guest)
|
||||
@ -206,7 +192,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
if (!(read_sysreg(hcr_el2) & HCR_RW))
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
|
||||
|
||||
vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
|
||||
vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
|
||||
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
|
||||
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
|
||||
|
||||
if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
|
||||
if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
|
||||
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
|
||||
|
||||
if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
|
||||
if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
|
||||
}
|
||||
|
||||
|
@ -84,10 +84,10 @@ static void __debug_restore_trace(u64 trfcr_el1)
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Disable and flush SPE data generation */
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
|
||||
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
/* Disable and flush Self-Hosted Trace generation */
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
|
||||
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
|
||||
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
|
||||
}
|
||||
|
||||
@ -98,9 +98,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
|
||||
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
|
||||
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
|
||||
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
val = vcpu->arch.cptr_el2;
|
||||
val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
|
||||
if (!update_fp_enabled(vcpu)) {
|
||||
if (!guest_owns_fp_regs(vcpu)) {
|
||||
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
@ -123,7 +123,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
cptr = CPTR_EL2_DEFAULT;
|
||||
if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
|
||||
if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
|
||||
cptr |= CPTR_EL2_TZ;
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
cptr &= ~CPTR_EL2_TSM;
|
||||
@ -335,7 +335,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
__sysreg_restore_state_nvhe(host_ctxt);
|
||||
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
|
||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
|
||||
__fpsimd_save_fpexc32(vcpu);
|
||||
|
||||
__debug_switch_to_host(vcpu);
|
||||
|
@ -38,9 +38,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
|
||||
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
|
||||
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
||||
|
||||
__kvm_adjust_pc(vcpu);
|
||||
|
||||
|
@ -55,7 +55,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
val |= CPTR_EL2_TAM;
|
||||
|
||||
if (update_fp_enabled(vcpu)) {
|
||||
if (guest_owns_fp_regs(vcpu)) {
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
|
||||
} else {
|
||||
@ -175,7 +175,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
|
||||
sysreg_restore_host_state_vhe(host_ctxt);
|
||||
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
|
||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
|
||||
__fpsimd_save_fpexc32(vcpu);
|
||||
|
||||
__debug_switch_to_host(vcpu);
|
||||
|
@ -79,7 +79,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
|
||||
__sysreg_restore_user_state(guest_ctxt);
|
||||
__sysreg_restore_el1_state(guest_ctxt);
|
||||
|
||||
vcpu->arch.sysregs_loaded_on_cpu = true;
|
||||
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
|
||||
|
||||
activate_traps_vhe_load(vcpu);
|
||||
}
|
||||
@ -110,5 +110,5 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
|
||||
/* Restore host user state */
|
||||
__sysreg_restore_user_state(host_ctxt);
|
||||
|
||||
vcpu->arch.sysregs_loaded_on_cpu = false;
|
||||
vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
|
||||
}
|
||||
|
@ -20,9 +20,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
|
||||
u64 esr = 0;
|
||||
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
|
||||
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
||||
|
||||
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
|
||||
|
||||
@ -52,9 +50,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
|
||||
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
||||
|
||||
/*
|
||||
* Build an unknown exception, depending on the instruction
|
||||
@ -73,8 +69,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void inject_undef32(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_UND |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -97,14 +92,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
|
||||
far = vcpu_read_sys_reg(vcpu, FAR_EL1);
|
||||
|
||||
if (is_pabt) {
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
|
||||
far &= GENMASK(31, 0);
|
||||
far |= (u64)addr << 32;
|
||||
vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
|
||||
} else { /* !iabt */
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
|
||||
far &= GENMASK(63, 32);
|
||||
far |= addr;
|
||||
vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
|
||||
|
@ -81,7 +81,7 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
|
||||
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
|
||||
* kvm_arm_vcpu_finalize(), which freezes the configuration.
|
||||
*/
|
||||
vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
|
||||
vcpu_set_flag(vcpu, GUEST_HAS_SVE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -120,7 +120,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
vcpu->arch.sve_state = buf;
|
||||
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
|
||||
vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
!system_has_full_ptr_auth())
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
|
||||
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
{
|
||||
u64 val = 0x8badf00d8badf00d;
|
||||
|
||||
if (vcpu->arch.sysregs_loaded_on_cpu &&
|
||||
if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
|
||||
__vcpu_read_sys_reg_from_cpu(reg, &val))
|
||||
return val;
|
||||
|
||||
@ -81,7 +81,7 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||
{
|
||||
if (vcpu->arch.sysregs_loaded_on_cpu &&
|
||||
if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
|
||||
__vcpu_write_sys_reg_to_cpu(val, reg))
|
||||
return;
|
||||
|
||||
@ -387,7 +387,7 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
if (p->is_write) {
|
||||
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||
vcpu_set_flag(vcpu, DEBUG_DIRTY);
|
||||
} else {
|
||||
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
@ -403,8 +403,8 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
||||
* A 32 bit write to a debug register leave top bits alone
|
||||
* A 32 bit read from a debug register only returns the bottom bits
|
||||
*
|
||||
* All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
|
||||
* hyp.S code switches between host and guest values in future.
|
||||
* All writes will set the DEBUG_DIRTY flag to ensure the hyp code
|
||||
* switches between host and guest values in future.
|
||||
*/
|
||||
static void reg_to_dbg(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
@ -420,7 +420,7 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu,
|
||||
val |= (p->regval & (mask >> shift)) << shift;
|
||||
*dbg_reg = val;
|
||||
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||
vcpu_set_flag(vcpu, DEBUG_DIRTY);
|
||||
}
|
||||
|
||||
static void dbg_to_reg(struct kvm_vcpu *vcpu,
|
||||
|
Loading…
Reference in New Issue
Block a user