mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
arm64: KVM: Add access handler for event counter register
These kind of registers include PMEVCNTRn, PMCCNTR and PMXEVCNTR which is mapped to PMEVCNTRn. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When reading these registers, return the sum of register value and the value perf event counts. Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org> Reviewed-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
a86b550530
commit
051ff581ce
@ -120,6 +120,9 @@ enum vcpu_sysreg {
|
||||
/* Performance Monitors Registers */
|
||||
PMCR_EL0, /* Control Register */
|
||||
PMSELR_EL0, /* Event Counter Selection Register */
|
||||
PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
|
||||
PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
|
||||
PMCCNTR_EL0, /* Cycle Counter Register */
|
||||
|
||||
/* 32bit specific registers. Keep them at the end of the range */
|
||||
DACR32_EL2, /* Domain Access Control Register */
|
||||
|
@ -26,3 +26,4 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
|
||||
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
|
||||
|
@ -513,6 +513,56 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
|
||||
pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
||||
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 idx;
|
||||
|
||||
if (!kvm_arm_pmu_v3_ready(vcpu))
|
||||
return trap_raz_wi(vcpu, p, r);
|
||||
|
||||
if (r->CRn == 9 && r->CRm == 13) {
|
||||
if (r->Op2 == 2) {
|
||||
/* PMXEVCNTR_EL0 */
|
||||
idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
|
||||
& ARMV8_PMU_COUNTER_MASK;
|
||||
} else if (r->Op2 == 0) {
|
||||
/* PMCCNTR_EL0 */
|
||||
idx = ARMV8_PMU_CYCLE_IDX;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
|
||||
/* PMEVCNTRn_EL0 */
|
||||
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (!pmu_counter_idx_valid(vcpu, idx))
|
||||
return false;
|
||||
|
||||
if (p->is_write)
|
||||
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
|
||||
else
|
||||
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
/* DBGBVRn_EL1 */ \
|
||||
@ -528,6 +578,13 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
|
||||
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
/* PMEVCNTRn_EL0 */ \
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), \
|
||||
CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
|
||||
access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
@ -721,13 +778,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
access_pmceid },
|
||||
/* PMCCNTR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
|
||||
/* PMXEVTYPER_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
|
||||
trap_raz_wi },
|
||||
/* PMXEVCNTR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
|
||||
trap_raz_wi },
|
||||
access_pmu_evcntr },
|
||||
/* PMUSERENR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
@ -742,6 +799,39 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
|
||||
NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
|
||||
/* PMEVCNTRn_EL0 */
|
||||
PMU_PMEVCNTR_EL0(0),
|
||||
PMU_PMEVCNTR_EL0(1),
|
||||
PMU_PMEVCNTR_EL0(2),
|
||||
PMU_PMEVCNTR_EL0(3),
|
||||
PMU_PMEVCNTR_EL0(4),
|
||||
PMU_PMEVCNTR_EL0(5),
|
||||
PMU_PMEVCNTR_EL0(6),
|
||||
PMU_PMEVCNTR_EL0(7),
|
||||
PMU_PMEVCNTR_EL0(8),
|
||||
PMU_PMEVCNTR_EL0(9),
|
||||
PMU_PMEVCNTR_EL0(10),
|
||||
PMU_PMEVCNTR_EL0(11),
|
||||
PMU_PMEVCNTR_EL0(12),
|
||||
PMU_PMEVCNTR_EL0(13),
|
||||
PMU_PMEVCNTR_EL0(14),
|
||||
PMU_PMEVCNTR_EL0(15),
|
||||
PMU_PMEVCNTR_EL0(16),
|
||||
PMU_PMEVCNTR_EL0(17),
|
||||
PMU_PMEVCNTR_EL0(18),
|
||||
PMU_PMEVCNTR_EL0(19),
|
||||
PMU_PMEVCNTR_EL0(20),
|
||||
PMU_PMEVCNTR_EL0(21),
|
||||
PMU_PMEVCNTR_EL0(22),
|
||||
PMU_PMEVCNTR_EL0(23),
|
||||
PMU_PMEVCNTR_EL0(24),
|
||||
PMU_PMEVCNTR_EL0(25),
|
||||
PMU_PMEVCNTR_EL0(26),
|
||||
PMU_PMEVCNTR_EL0(27),
|
||||
PMU_PMEVCNTR_EL0(28),
|
||||
PMU_PMEVCNTR_EL0(29),
|
||||
PMU_PMEVCNTR_EL0(30),
|
||||
|
||||
/* DACR32_EL2 */
|
||||
{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_unknown, DACR32_EL2 },
|
||||
@ -931,6 +1021,13 @@ static const struct sys_reg_desc cp14_64_regs[] = {
|
||||
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
|
||||
};
|
||||
|
||||
/* Macro to expand the PMEVCNTRn register */
|
||||
#define PMU_PMEVCNTR(n) \
|
||||
/* PMEVCNTRn */ \
|
||||
{ Op1(0), CRn(0b1110), \
|
||||
CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
|
||||
access_pmu_evcntr }
|
||||
|
||||
/*
|
||||
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
|
||||
* depending on the way they are accessed (as a 32bit or a 64bit
|
||||
@ -966,9 +1063,9 @@ static const struct sys_reg_desc cp15_regs[] = {
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
|
||||
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
|
||||
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
|
||||
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
|
||||
@ -982,10 +1079,44 @@ static const struct sys_reg_desc cp15_regs[] = {
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
|
||||
|
||||
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
||||
|
||||
/* PMEVCNTRn */
|
||||
PMU_PMEVCNTR(0),
|
||||
PMU_PMEVCNTR(1),
|
||||
PMU_PMEVCNTR(2),
|
||||
PMU_PMEVCNTR(3),
|
||||
PMU_PMEVCNTR(4),
|
||||
PMU_PMEVCNTR(5),
|
||||
PMU_PMEVCNTR(6),
|
||||
PMU_PMEVCNTR(7),
|
||||
PMU_PMEVCNTR(8),
|
||||
PMU_PMEVCNTR(9),
|
||||
PMU_PMEVCNTR(10),
|
||||
PMU_PMEVCNTR(11),
|
||||
PMU_PMEVCNTR(12),
|
||||
PMU_PMEVCNTR(13),
|
||||
PMU_PMEVCNTR(14),
|
||||
PMU_PMEVCNTR(15),
|
||||
PMU_PMEVCNTR(16),
|
||||
PMU_PMEVCNTR(17),
|
||||
PMU_PMEVCNTR(18),
|
||||
PMU_PMEVCNTR(19),
|
||||
PMU_PMEVCNTR(20),
|
||||
PMU_PMEVCNTR(21),
|
||||
PMU_PMEVCNTR(22),
|
||||
PMU_PMEVCNTR(23),
|
||||
PMU_PMEVCNTR(24),
|
||||
PMU_PMEVCNTR(25),
|
||||
PMU_PMEVCNTR(26),
|
||||
PMU_PMEVCNTR(27),
|
||||
PMU_PMEVCNTR(28),
|
||||
PMU_PMEVCNTR(29),
|
||||
PMU_PMEVCNTR(30),
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc cp15_64_regs[] = {
|
||||
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
|
||||
{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
|
||||
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
|
||||
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
|
||||
};
|
||||
|
@ -23,6 +23,8 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
|
||||
|
||||
struct kvm_pmc {
|
||||
u8 idx; /* index into the pmu->pmc array */
|
||||
struct perf_event *perf_event;
|
||||
@ -36,11 +38,20 @@ struct kvm_pmu {
|
||||
};
|
||||
|
||||
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
|
||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
|
||||
#else
|
||||
struct kvm_pmu {
|
||||
};
|
||||
|
||||
#define kvm_arm_pmu_v3_ready(v) (false)
|
||||
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
|
||||
u64 select_idx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
|
||||
u64 select_idx, u64 val) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
63
virt/kvm/arm/pmu.c
Normal file
63
virt/kvm/arm/pmu.c
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (C) 2015 Linaro Ltd.
|
||||
* Author: Shannon Zhao <shannon.zhao@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <kvm/arm_pmu.h>
|
||||
|
||||
/**
|
||||
* kvm_pmu_get_counter_value - get PMU counter value
|
||||
* @vcpu: The vcpu pointer
|
||||
* @select_idx: The counter index
|
||||
*/
|
||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
{
|
||||
u64 counter, reg, enabled, running;
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
|
||||
|
||||
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||
counter = vcpu_sys_reg(vcpu, reg);
|
||||
|
||||
/* The real counter value is equal to the value of counter register plus
|
||||
* the value perf event counts.
|
||||
*/
|
||||
if (pmc->perf_event)
|
||||
counter += perf_event_read_value(pmc->perf_event, &enabled,
|
||||
&running);
|
||||
|
||||
return counter & pmc->bitmask;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_set_counter_value - set PMU counter value
|
||||
* @vcpu: The vcpu pointer
|
||||
* @select_idx: The counter index
|
||||
* @val: The counter value
|
||||
*/
|
||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||
{
|
||||
u64 reg;
|
||||
|
||||
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||
vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||
}
|
Loading…
Reference in New Issue
Block a user