2018-04-06 21:55:59 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers
|
|
|
|
*
|
|
|
|
* Copyright 2018 Arm Limited
|
|
|
|
* Author: Dave Martin <Dave.Martin@arm.com>
|
|
|
|
*/
|
2018-06-15 23:47:24 +08:00
|
|
|
#include <linux/irqflags.h>
|
2018-04-06 21:55:59 +08:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kvm_host.h>
|
2018-09-28 21:39:11 +08:00
|
|
|
#include <asm/fpsimd.h>
|
2018-04-06 21:55:59 +08:00
|
|
|
#include <asm/kvm_asm.h>
|
2021-03-11 21:51:44 +08:00
|
|
|
#include <asm/kvm_hyp.h>
|
2018-04-06 21:55:59 +08:00
|
|
|
#include <asm/kvm_mmu.h>
|
2018-06-15 23:47:25 +08:00
|
|
|
#include <asm/sysreg.h>
|
2018-04-06 21:55:59 +08:00
|
|
|
|
2021-12-16 00:12:31 +08:00
|
|
|
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct task_struct *p = vcpu->arch.parent_task;
|
|
|
|
struct user_fpsimd_state *fpsimd;
|
|
|
|
|
|
|
|
if (!is_protected_kvm_enabled() || !p)
|
|
|
|
return;
|
|
|
|
|
|
|
|
fpsimd = &p->thread.uw.fpsimd_state;
|
|
|
|
kvm_unshare_hyp(fpsimd, fpsimd + 1);
|
|
|
|
put_task_struct(p);
|
|
|
|
}
|
|
|
|
|
2018-04-06 21:55:59 +08:00
|
|
|
/*
|
|
|
|
* Called on entry to KVM_RUN unless this vcpu previously ran at least
|
|
|
|
* once and the most recent prior KVM_RUN for this vcpu was called from
|
|
|
|
* the same task as current (highly likely).
|
|
|
|
*
|
|
|
|
* This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
|
|
|
|
* such that on entering hyp the relevant parts of current are already
|
|
|
|
* mapped.
|
|
|
|
*/
|
|
|
|
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state;
|
|
|
|
|
2021-12-16 00:12:31 +08:00
|
|
|
kvm_vcpu_unshare_task_fp(vcpu);
|
|
|
|
|
2021-10-21 21:18:00 +08:00
|
|
|
/* Make sure the host task fpsimd state is visible to hyp: */
|
2021-12-16 00:12:23 +08:00
|
|
|
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
|
2021-12-16 00:12:31 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to keep current's task_struct pinned until its data has been
|
|
|
|
* unshared with the hypervisor to make sure it is not re-used by the
|
|
|
|
* kernel and donated to someone else while already shared -- see
|
|
|
|
* kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
|
|
|
|
*/
|
|
|
|
if (is_protected_kvm_enabled()) {
|
|
|
|
get_task_struct(current);
|
|
|
|
vcpu->arch.parent_task = current;
|
|
|
|
}
|
2018-04-06 21:55:59 +08:00
|
|
|
|
2021-12-16 00:12:31 +08:00
|
|
|
return 0;
|
2018-04-06 21:55:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
|
|
|
|
* The actual loading is done by the FPSIMD access trap taken to hyp.
|
|
|
|
*
|
|
|
|
* Here, we just set the correct metadata to indicate that the FPSIMD
|
|
|
|
* state in the cpu regs (if any) belongs to current on the host.
|
|
|
|
*/
|
|
|
|
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
BUG_ON(!current->mm);
|
|
|
|
|
2022-06-08 21:22:31 +08:00
|
|
|
if (!system_supports_fpsimd())
|
|
|
|
return;
|
|
|
|
|
2022-11-15 17:46:33 +08:00
|
|
|
fpsimd_kvm_prepare();
|
|
|
|
|
2023-03-08 01:37:14 +08:00
|
|
|
/*
|
|
|
|
* We will check TIF_FOREIGN_FPSTATE just before entering the
|
|
|
|
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
|
|
|
|
* FP_STATE_FREE if the flag set.
|
|
|
|
*/
|
2022-05-28 19:38:14 +08:00
|
|
|
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
|
2018-06-15 23:47:25 +08:00
|
|
|
|
2022-05-28 19:38:20 +08:00
|
|
|
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
|
2018-06-15 23:47:25 +08:00
|
|
|
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
|
2022-05-28 19:38:20 +08:00
|
|
|
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
|
2022-04-19 19:22:34 +08:00
|
|
|
|
|
|
|
if (system_supports_sme()) {
|
2022-05-28 19:38:20 +08:00
|
|
|
vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
|
2022-04-19 19:22:34 +08:00
|
|
|
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
|
2022-05-28 19:38:20 +08:00
|
|
|
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
|
2022-04-19 19:22:34 +08:00
|
|
|
|
KVM: arm64: Clarify host SME state management
Normally when running a guest we do not touch the floating point
register state until first use of floating point by the guest, saving
the current state and loading the guest state at that point. This has
been found to offer a performance benefit in common cases. However
currently if SME is active when switching to a guest then we exit
streaming mode, disable ZA and invalidate the floating point register
state prior to starting the guest.
The exit from streaming mode is required for correct guest operation, if
we leave streaming mode enabled then many non-SME operations can
generate SME traps (eg, SVE operations will become streaming SVE
operations). If EL1 leaves CPACR_EL1.SMEN disabled then the host is
unable to intercept these traps. This will mean that a SME unaware guest
will see SME exceptions which will confuse it. Disabling streaming mode
also avoids creating spurious indications of usage of the SME hardware
which could impact system performance, especially with shared SME
implementations. Document the requirement to exit streaming mode
clearly.
There is no issue with guest operation caused by PSTATE.ZA so we can
defer handling for that until first floating point usage, do so if the
register state is not that of the current task and hence has already
been saved. We could also do this for the case where the register state
is that for the current task however this is very unlikely to happen and
would require disproportionate effort so continue to save the state in
that case.
Saving this state on first use would require that we map and unmap
storage for the host version of these registers for use by the
hypervisor, taking care to deal with protected KVM and the fact that the
host can free or reallocate the backing storage. Given that the strong
recommendation is that applications should only keep PSTATE.ZA enabled
when the state it enables is in active use it is difficult to see a case
where a VMM would wish to do this, it would need to not only be using
SME but also running the guest in the middle of SME usage. This can be
revisited in the future if a use case does arises, in the interim such
tasks will work but experience a performance overhead.
This brings our handling of SME more into line with our handling of
other floating point state and documents more clearly the constraints we
have, especially around streaming mode.
Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221214-kvm-arm64-sme-context-switch-v2-3-57ba0082e9ff@kernel.org
2023-03-08 01:37:16 +08:00
|
|
|
/*
|
|
|
|
* If PSTATE.SM is enabled then save any pending FP
|
|
|
|
* state and disable PSTATE.SM. If we leave PSTATE.SM
|
|
|
|
* enabled and the guest does not enable SME via
|
|
|
|
* CPACR_EL1.SMEN then operations that should be valid
|
|
|
|
* may generate SME traps from EL1 to EL1 which we
|
|
|
|
* can't intercept and which would confuse the guest.
|
|
|
|
*
|
|
|
|
* Do the same for PSTATE.ZA in the case where there
|
|
|
|
* is state in the registers which has not already
|
|
|
|
* been saved, this is very unlikely to happen.
|
|
|
|
*/
|
2022-05-28 19:38:14 +08:00
|
|
|
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
|
|
|
|
vcpu->arch.fp_state = FP_STATE_FREE;
|
2022-04-19 19:22:34 +08:00
|
|
|
fpsimd_save_and_flush_cpu_state();
|
|
|
|
}
|
|
|
|
}
|
2018-04-06 21:55:59 +08:00
|
|
|
}
|
|
|
|
|
2022-01-24 23:57:19 +08:00
|
|
|
/*
|
2022-05-28 19:38:13 +08:00
|
|
|
* Called just before entering the guest once we are no longer preemptable
|
|
|
|
* and interrupts are disabled. If we have managed to run anything using
|
|
|
|
* FP while we were preemptible (such as off the back of an interrupt),
|
|
|
|
* then neither the host nor the guest own the FP hardware (and it was the
|
|
|
|
* responsibility of the code that used FP to save the existing state).
|
2022-01-24 23:57:19 +08:00
|
|
|
*/
|
2021-10-21 21:10:35 +08:00
|
|
|
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2022-06-08 21:22:31 +08:00
|
|
|
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
|
2022-05-28 19:38:14 +08:00
|
|
|
vcpu->arch.fp_state = FP_STATE_FREE;
|
2021-10-21 21:10:35 +08:00
|
|
|
}
|
|
|
|
|
2018-04-06 21:55:59 +08:00
|
|
|
/*
|
2022-01-24 23:57:19 +08:00
|
|
|
* Called just after exiting the guest. If the guest FPSIMD state
|
|
|
|
* was loaded, update the host's context tracking data mark the CPU
|
|
|
|
* FPSIMD regs as dirty and belonging to vcpu so that they will be
|
|
|
|
* written back if the kernel clobbers them due to kernel-mode NEON
|
|
|
|
* before re-entry into the guest.
|
2018-04-06 21:55:59 +08:00
|
|
|
*/
|
|
|
|
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2022-11-15 17:46:40 +08:00
|
|
|
struct cpu_fp_state fp_state;
|
2022-11-15 17:46:35 +08:00
|
|
|
|
2018-04-06 21:55:59 +08:00
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
|
2022-05-28 19:38:14 +08:00
|
|
|
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
2022-11-15 17:46:35 +08:00
|
|
|
|
2022-04-19 19:22:21 +08:00
|
|
|
/*
|
|
|
|
* Currently we do not support SME guests so SVCR is
|
|
|
|
* always 0 and we just need a variable to point to.
|
|
|
|
*/
|
2022-11-15 17:46:40 +08:00
|
|
|
fp_state.st = &vcpu->arch.ctxt.fp_regs;
|
|
|
|
fp_state.sve_state = vcpu->arch.sve_state;
|
|
|
|
fp_state.sve_vl = vcpu->arch.sve_max_vl;
|
2023-01-17 00:04:36 +08:00
|
|
|
fp_state.sme_state = NULL;
|
2022-11-15 17:46:40 +08:00
|
|
|
fp_state.svcr = &vcpu->arch.svcr;
|
|
|
|
fp_state.fp_type = &vcpu->arch.fp_type;
|
|
|
|
|
|
|
|
if (vcpu_has_sve(vcpu))
|
|
|
|
fp_state.to_save = FP_STATE_SVE;
|
|
|
|
else
|
|
|
|
fp_state.to_save = FP_STATE_FPSIMD;
|
|
|
|
|
|
|
|
fpsimd_bind_state_to_cpu(&fp_state);
|
2018-09-28 21:39:11 +08:00
|
|
|
|
2018-04-06 21:55:59 +08:00
|
|
|
clear_thread_flag(TIF_FOREIGN_FPSTATE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
|
|
|
|
* cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
|
|
|
|
* disappears and another task or vcpu appears that recycles the same
|
|
|
|
* struct fpsimd_state.
|
|
|
|
*/
|
|
|
|
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-06-15 23:47:24 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
2018-04-06 21:55:59 +08:00
|
|
|
|
2022-04-19 19:22:34 +08:00
|
|
|
/*
|
|
|
|
* If we have VHE then the Hyp code will reset CPACR_EL1 to
|
2023-06-10 00:21:56 +08:00
|
|
|
* the default value and we need to reenable SME.
|
2022-04-19 19:22:34 +08:00
|
|
|
*/
|
|
|
|
if (has_vhe() && system_supports_sme()) {
|
|
|
|
/* Also restore EL0 state seen on entry */
|
2022-05-28 19:38:20 +08:00
|
|
|
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
|
2022-04-19 19:22:34 +08:00
|
|
|
sysreg_clear_set(CPACR_EL1, 0,
|
|
|
|
CPACR_EL1_SMEN_EL0EN |
|
|
|
|
CPACR_EL1_SMEN_EL1EN);
|
|
|
|
else
|
|
|
|
sysreg_clear_set(CPACR_EL1,
|
|
|
|
CPACR_EL1_SMEN_EL0EN,
|
|
|
|
CPACR_EL1_SMEN_EL1EN);
|
2022-12-20 18:50:24 +08:00
|
|
|
isb();
|
2022-04-19 19:22:34 +08:00
|
|
|
}
|
|
|
|
|
2022-05-28 19:38:14 +08:00
|
|
|
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
2021-10-27 18:18:00 +08:00
|
|
|
if (vcpu_has_sve(vcpu)) {
|
2021-03-11 21:51:44 +08:00
|
|
|
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
2021-03-12 22:30:52 +08:00
|
|
|
|
2021-03-12 02:29:55 +08:00
|
|
|
/* Restore the VL that was saved when bound to the CPU */
|
|
|
|
if (!has_vhe())
|
|
|
|
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
|
|
|
|
SYS_ZCR_EL1);
|
|
|
|
}
|
|
|
|
|
2021-03-12 22:30:52 +08:00
|
|
|
fpsimd_save_and_flush_cpu_state();
|
2021-10-27 18:18:00 +08:00
|
|
|
} else if (has_vhe() && system_supports_sve()) {
|
2018-06-15 23:47:25 +08:00
|
|
|
/*
|
|
|
|
* The FPSIMD/SVE state in the CPU has not been touched, and we
|
|
|
|
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
|
2023-06-10 00:21:56 +08:00
|
|
|
* reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
|
2018-06-15 23:47:25 +08:00
|
|
|
* for EL0. To avoid spurious traps, restore the trap state
|
|
|
|
* seen by kvm_arch_vcpu_load_fp():
|
|
|
|
*/
|
2022-05-28 19:38:20 +08:00
|
|
|
if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
|
2018-06-15 23:47:25 +08:00
|
|
|
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
|
|
|
|
else
|
|
|
|
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
|
2018-04-06 21:55:59 +08:00
|
|
|
}
|
|
|
|
|
2018-06-15 23:47:24 +08:00
|
|
|
local_irq_restore(flags);
|
2018-04-06 21:55:59 +08:00
|
|
|
}
|