KVM: arm64: Provide KVM's own save/restore SVE primitives

as we are about to change the way KVM deals with SVE, provide
KVM with its own save/restore SVE primitives.

No functional change intended.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2021-03-11 11:52:38 +00:00
parent a38fd87484
commit 297b8603e3
4 changed files with 19 additions and 5 deletions

View File

@ -6,6 +6,8 @@
* Author: Catalin Marinas <catalin.marinas@arm.com>
*/
#include <asm/assembler.h>
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]

View File

@ -85,6 +85,8 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);

View File

@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
fpsimd_restore x0, 1
ret
SYM_FUNC_END(__fpsimd_restore_state)
SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3, x4
ret
SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state)
sve_save 0, x1, 2
ret
SYM_FUNC_END(__sve_save_state)

View File

@ -256,8 +256,8 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
vcpu->arch.host_fpsimd_state,
struct thread_struct, uw.fpsimd_state);
sve_save_state(sve_pffr(thread),
&vcpu->arch.host_fpsimd_state->fpsr);
__sve_save_state(sve_pffr(thread),
&vcpu->arch.host_fpsimd_state->fpsr);
} else {
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
@ -266,9 +266,9 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
}
if (sve_guest) {
sve_load_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr,
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
__sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr,
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
} else {
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);