mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
KVM: arm64: Eagerly restore host fpsimd/sve state in pKVM
When running in protected mode we don't want to leak protected guest state to the host, including whether a guest has used fpsimd/sve. Therefore, eagerly restore the host state on guest exit when running in protected mode, which happens only if the guest has used fpsimd/sve. Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Fuad Tabba <tabba@google.com> Link: https://lore.kernel.org/r/20240603122852.3923848-7-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
66d5b53e20
commit
b5b9955617
@ -321,6 +321,17 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
|
||||
}
|
||||
|
||||
static inline void __hyp_sve_save_host(void)
|
||||
{
|
||||
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
|
||||
|
||||
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
|
||||
&sve_state->fpsr,
|
||||
true);
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
|
||||
|
||||
/*
|
||||
@ -355,7 +366,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
/* Valid trap. Switch the context: */
|
||||
|
||||
/* First disable enough traps to allow us to update the registers */
|
||||
if (sve_guest)
|
||||
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
else
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN);
|
||||
|
@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
|
||||
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
|
||||
|
||||
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||
/*
|
||||
* On saving/restoring guest sve state, always use the maximum VL for
|
||||
* the guest. The layout of the data when saving the sve state depends
|
||||
* on the VL, so use a consistent (i.e., the maximum) guest VL.
|
||||
*/
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
__sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
}
|
||||
|
||||
static void __hyp_sve_restore_host(void)
|
||||
{
|
||||
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
|
||||
|
||||
/*
|
||||
* On saving/restoring host sve state, always use the maximum VL for
|
||||
* the host. The layout of the data when saving the sve state depends
|
||||
* on the VL, so use a consistent (i.e., the maximum) host VL.
|
||||
*
|
||||
* Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
|
||||
* supported by the system (or limited at EL3).
|
||||
*/
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
__sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
|
||||
&sve_state->fpsr,
|
||||
true);
|
||||
write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
|
||||
}
|
||||
|
||||
static void fpsimd_sve_flush(void)
|
||||
{
|
||||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||
}
|
||||
|
||||
static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!guest_owns_fp_regs())
|
||||
return;
|
||||
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
isb();
|
||||
|
||||
if (vcpu_has_sve(vcpu))
|
||||
__hyp_sve_save_guest(vcpu);
|
||||
else
|
||||
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
|
||||
|
||||
if (system_supports_sve())
|
||||
__hyp_sve_restore_host();
|
||||
else
|
||||
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
|
||||
|
||||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||
}
|
||||
|
||||
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
|
||||
fpsimd_sve_flush();
|
||||
|
||||
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
|
||||
|
||||
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
|
||||
hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
|
||||
/* Limit guest vector length to the maximum supported by the host. */
|
||||
hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
|
||||
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
|
||||
|
||||
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
||||
|
||||
@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
|
||||
unsigned int i;
|
||||
|
||||
fpsimd_sve_sync(&hyp_vcpu->vcpu);
|
||||
|
||||
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
|
||||
|
||||
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
|
||||
host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
|
||||
|
||||
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
|
||||
|
||||
|
@ -588,6 +588,8 @@ unlock:
|
||||
if (ret)
|
||||
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
|
||||
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,21 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
/*
|
||||
* Non-protected kvm relies on the host restoring its sve state.
|
||||
* Protected kvm restores the host's sve state as not to reveal that
|
||||
* fpsimd was used by a guest nor leak upper sve bits.
|
||||
*/
|
||||
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
|
||||
__hyp_sve_save_host();
|
||||
|
||||
/* Re-enable SVE traps if not supported for the guest vcpu. */
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
cpacr_clear_set(CPACR_ELx_ZEN, 0);
|
||||
|
||||
} else {
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
}
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
|
Loading…
Reference in New Issue
Block a user