KVM: arm64: Upgrade PMU support to ARMv8.4

Upgrading the PMU code from ARMv8.1 to ARMv8.4 turns out to be
pretty easy. All that is required is support for PMMIR_EL1, which
is read-only, and for which returning 0 is a valid option as long
as we don't advertise STALL_SLOT as an implemented event.

Let's just do that and adjust what we return to the guest.

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2020-02-16 18:17:22 +00:00
parent 94893fc9ad
commit 46081078fe
3 changed files with 16 additions and 4 deletions

View File

@ -846,7 +846,10 @@
#define ID_DFR0_PERFMON_SHIFT 24 #define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_PERFMON_8_0 0x3
#define ID_DFR0_PERFMON_8_1 0x4 #define ID_DFR0_PERFMON_8_1 0x4
#define ID_DFR0_PERFMON_8_4 0x5
#define ID_DFR0_PERFMON_8_5 0x6
#define ID_ISAR4_SWP_FRAC_SHIFT 28 #define ID_ISAR4_SWP_FRAC_SHIFT 28
#define ID_ISAR4_PSR_M_SHIFT 24 #define ID_ISAR4_PSR_M_SHIFT 24

View File

@ -795,6 +795,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
base = 0; base = 0;
} else { } else {
val = read_sysreg(pmceid1_el0); val = read_sysreg(pmceid1_el0);
/*
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ
*/
if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32; base = 32;
} }

View File

@ -1051,16 +1051,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
/* Limit debug to ARMv8.0 */ /* Limit debug to ARMv8.0 */
val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
/* Limit guests to PMUv3 for ARMv8.1 */ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT, ID_AA64DFR0_PMUVER_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_1 : 0); kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.1 */ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_DFR0_PERFMON_SHIFT, ID_DFR0_PERFMON_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_1 : 0); kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
break; break;
} }
@ -1496,6 +1496,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@ -1918,6 +1919,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
{ AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid }, { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
{ AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid }, { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
/* PMMIR */
{ Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
/* PRRR/MAIR0 */ /* PRRR/MAIR0 */
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },