mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
KVM/arm64 fixes for 6.3, part #3
- Ensure the guest PMU context is restored before the first KVM_RUN, fixing an issue where EL0 event counting is broken after vCPU save/restore - Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the sanitized, system-wide values for protected VMs -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQSNXHjWXuzMZutrKNKivnWIJHzdFgUCZC2ZbwAKCRCivnWIJHzd FvovAP9aooqUBBs8w2myh8SXCv7dJOg88r6fKS5vCqMdkY7OhwD9GXA7hWU2dXdy X1L8qq6C7R+GtIY/kDm9E2HkNKg7rQc= =nfXd -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-6.3-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 6.3, part #3 - Ensure the guest PMU context is restored before the first KVM_RUN, fixing an issue where EL0 event counting is broken after vCPU save/restore - Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the sanitized, system-wide values for protected VMs
This commit is contained in:
commit
0bf9601f8e
@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 get_hyp_id_aa64pfr0_el1(void)
|
||||
{
|
||||
/*
|
||||
* Track whether the system isn't affected by spectre/meltdown in the
|
||||
* hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
|
||||
* Although this is per-CPU, we make it global for simplicity, e.g., not
|
||||
* to have to worry about vcpu migration.
|
||||
*
|
||||
* Unlike for non-protected VMs, userspace cannot override this for
|
||||
* protected VMs.
|
||||
*/
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
|
||||
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
||||
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void kvm_hyp_init_symbols(void)
|
||||
{
|
||||
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
|
||||
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
|
||||
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
|
||||
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
|
||||
|
@ -33,11 +33,14 @@
|
||||
* Allow for protected VMs:
|
||||
* - Floating-point and Advanced SIMD
|
||||
* - Data Independent Timing
|
||||
* - Spectre/Meltdown Mitigation
|
||||
*/
|
||||
#define PVM_ID_AA64PFR0_ALLOW (\
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
|
||||
|
||||
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
|
||||
u64 set_mask = 0;
|
||||
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
|
||||
|
||||
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
|
||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
|
||||
|
||||
/* Spectre and Meltdown mitigation in KVM */
|
||||
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
||||
(u64)kvm->arch.pfr0_csv2);
|
||||
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
||||
(u64)kvm->arch.pfr0_csv3);
|
||||
|
||||
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
|
||||
}
|
||||
|
||||
|
@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
|
||||
}
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
|
@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
/* PMCR.P & PMCR.C are RAZ */
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
|
||||
|
Loading…
Reference in New Issue
Block a user