Merge branch kvm-arm64/per-vcpu-host-pmu-data into kvmarm-master/next

* kvm-arm64/per-vcpu-host-pmu-data:
  : .
  : Pass the host PMU state in the vcpu to avoid the use of additional
  : shared memory between EL1 and EL2 (this obviously only applies
  : to nVHE and Protected setups).
  :
  : Patches courtesy of Fuad Tabba.
  : .
  KVM: arm64: pmu: Restore compilation when HW_PERF_EVENTS isn't selected
  KVM: arm64: Reenable pmu in Protected Mode
  KVM: arm64: Pass pmu events to hyp via vcpu
  KVM: arm64: Repack struct kvm_pmu to reduce size
  KVM: arm64: Wrapper for getting pmu_events

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2022-05-16 17:48:36 +01:00
commit 8794b4f510
7 changed files with 71 additions and 48 deletions

View File

@ -273,14 +273,8 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu; struct kvm_vcpu *__hyp_running_vcpu;
}; };
struct kvm_pmu_events {
u32 events_host;
u32 events_guest;
};
struct kvm_host_data { struct kvm_host_data {
struct kvm_cpu_context host_ctxt; struct kvm_cpu_context host_ctxt;
struct kvm_pmu_events pmu_events;
}; };
struct kvm_host_psci_config { struct kvm_host_psci_config {
@ -820,9 +814,6 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr); void kvm_clr_pmu_events(u32 clr);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
#else #else
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {} static inline void kvm_clr_pmu_events(u32 clr) {}
@ -854,8 +845,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_has_mte(kvm) \ #define kvm_has_mte(kvm) \
(system_supports_mte() && \ (system_supports_mte() && \
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
int kvm_trng_call(struct kvm_vcpu *vcpu); int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM #ifdef CONFIG_KVM

View File

@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \ inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \ guest.o debug.o reset.o sys_regs.o \
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \
arch_timer.o trng.o vmid.o \ arch_timer.o trng.o vmid.o \
vgic/vgic.o vgic/vgic-init.o \ vgic/vgic.o vgic/vgic-init.o \
vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \
@ -22,7 +22,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \ vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
always-y := hyp_constants.h hyp-constants.s always-y := hyp_constants.h hyp-constants.s

View File

@ -882,6 +882,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_vgic_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu);
kvm_pmu_update_vcpu_events(vcpu);
/* /*
* Ensure we set mode to IN_GUEST_MODE after we disable * Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check. * interrupts and before the final VCPU requests check.

View File

@ -153,13 +153,10 @@ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
/** /**
* Disable host events, enable guest events * Disable host events, enable guest events
*/ */
static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) #ifdef CONFIG_HW_PERF_EVENTS
static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_data *host; struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
struct kvm_pmu_events *pmu;
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;
if (pmu->events_host) if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenclr_el0); write_sysreg(pmu->events_host, pmcntenclr_el0);
@ -173,13 +170,9 @@ static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
/** /**
* Disable guest events, enable host events * Disable guest events, enable host events
*/ */
static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_data *host; struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
struct kvm_pmu_events *pmu;
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;
if (pmu->events_guest) if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenclr_el0); write_sysreg(pmu->events_guest, pmcntenclr_el0);
@ -187,6 +180,10 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
if (pmu->events_host) if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenset_el0); write_sysreg(pmu->events_host, pmcntenset_el0);
} }
#else
#define __pmu_switch_to_guest(v) ({ false; })
#define __pmu_switch_to_host(v) do {} while (0)
#endif
/** /**
* Handler for protected VM MSR, MRS or System instruction execution in AArch64. * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
@ -304,7 +301,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
host_ctxt->__hyp_running_vcpu = vcpu; host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); pmu_switch_needed = __pmu_switch_to_guest(vcpu);
__sysreg_save_state_nvhe(host_ctxt); __sysreg_save_state_nvhe(host_ctxt);
/* /*
@ -366,7 +363,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_restore_host_buffers_nvhe(vcpu); __debug_restore_host_buffers_nvhe(vcpu);
if (pmu_switch_needed) if (pmu_switch_needed)
__pmu_switch_to_host(host_ctxt); __pmu_switch_to_host(vcpu);
/* Returning to host will clear PSR.I, remask PMR if needed */ /* Returning to host will clear PSR.I, remask PMR if needed */
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())

View File

@ -774,8 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{ {
struct arm_pmu_entry *entry; struct arm_pmu_entry *entry;
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
is_protected_kvm_enabled())
return; return;
mutex_lock(&arm_pmus_lock); mutex_lock(&arm_pmus_lock);

View File

@ -5,7 +5,8 @@
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <asm/kvm_hyp.h>
static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
/* /*
* Given the perf event attributes and system type, determine * Given the perf event attributes and system type, determine
@ -25,21 +26,26 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
return (attr->exclude_host != attr->exclude_guest); return (attr->exclude_host != attr->exclude_guest);
} }
struct kvm_pmu_events *kvm_get_pmu_events(void)
{
return this_cpu_ptr(&kvm_pmu_events);
}
/* /*
* Add events to track that we may want to switch at guest entry/exit * Add events to track that we may want to switch at guest entry/exit
* time. * time.
*/ */
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{ {
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); struct kvm_pmu_events *pmu = kvm_get_pmu_events();
if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr)) if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
return; return;
if (!attr->exclude_host) if (!attr->exclude_host)
ctx->pmu_events.events_host |= set; pmu->events_host |= set;
if (!attr->exclude_guest) if (!attr->exclude_guest)
ctx->pmu_events.events_guest |= set; pmu->events_guest |= set;
} }
/* /*
@ -47,13 +53,13 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/ */
void kvm_clr_pmu_events(u32 clr) void kvm_clr_pmu_events(u32 clr)
{ {
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); struct kvm_pmu_events *pmu = kvm_get_pmu_events();
if (!kvm_arm_support_pmu_v3() || !ctx) if (!kvm_arm_support_pmu_v3() || !pmu)
return; return;
ctx->pmu_events.events_host &= ~clr; pmu->events_host &= ~clr;
ctx->pmu_events.events_guest &= ~clr; pmu->events_guest &= ~clr;
} }
#define PMEVTYPER_READ_CASE(idx) \ #define PMEVTYPER_READ_CASE(idx) \
@ -169,16 +175,16 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/ */
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_data *host; struct kvm_pmu_events *pmu;
u32 events_guest, events_host; u32 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe()) if (!kvm_arm_support_pmu_v3() || !has_vhe())
return; return;
preempt_disable(); preempt_disable();
host = this_cpu_ptr_hyp_sym(kvm_host_data); pmu = kvm_get_pmu_events();
events_guest = host->pmu_events.events_guest; events_guest = pmu->events_guest;
events_host = host->pmu_events.events_host; events_host = pmu->events_host;
kvm_vcpu_pmu_enable_el0(events_guest); kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host); kvm_vcpu_pmu_disable_el0(events_host);
@ -190,15 +196,15 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/ */
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_data *host; struct kvm_pmu_events *pmu;
u32 events_guest, events_host; u32 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe()) if (!kvm_arm_support_pmu_v3() || !has_vhe())
return; return;
host = this_cpu_ptr_hyp_sym(kvm_host_data); pmu = kvm_get_pmu_events();
events_guest = host->pmu_events.events_guest; events_guest = pmu->events_guest;
events_host = host->pmu_events.events_host; events_host = pmu->events_host;
kvm_vcpu_pmu_enable_el0(events_host); kvm_vcpu_pmu_enable_el0(events_host);
kvm_vcpu_pmu_disable_el0(events_guest); kvm_vcpu_pmu_disable_el0(events_guest);

View File

@ -20,13 +20,19 @@ struct kvm_pmc {
struct perf_event *perf_event; struct perf_event *perf_event;
}; };
struct kvm_pmu_events {
u32 events_host;
u32 events_guest;
};
struct kvm_pmu { struct kvm_pmu {
int irq_num; struct irq_work overflow_work;
struct kvm_pmu_events events;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
int irq_num;
bool created; bool created;
bool irq_level; bool irq_level;
struct irq_work overflow_work;
}; };
struct arm_pmu_entry { struct arm_pmu_entry {
@ -66,6 +72,25 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
/*
* Updates the vcpu's view of the pmu events for this cpu.
* Must be called before every vcpu run after disabling interrupts, to ensure
* that an interrupt cannot fire and update the structure.
*/
#define kvm_pmu_update_vcpu_events(vcpu) \
do { \
if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
#else #else
struct kvm_pmu { struct kvm_pmu {
}; };
@ -127,6 +152,11 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return 0; return 0;
} }
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
#endif #endif
#endif #endif