mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
KVM: x86: Adjust counter sample period after a wrmsr
The sample_period of a counter tracks when that counter will overflow and set global status/trigger a PMI. However this currently only gets set when the initial counter is created or when a counter is resumed; this updates the sample period after a wrmsr so running counters will accurately reflect their new value. Signed-off-by: Eric Hankland <ehankland@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
7f42aa76d4
commit
168d918f26
@ -111,7 +111,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
||||
.config = config,
|
||||
};
|
||||
|
||||
attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
|
||||
attr.sample_period = get_sample_period(pmc, pmc->counter);
|
||||
|
||||
if (in_tx)
|
||||
attr.config |= HSW_IN_TX;
|
||||
@ -158,7 +158,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
|
||||
|
||||
/* recalibrate sample period and check if it's accepted by perf core */
|
||||
if (perf_event_period(pmc->perf_event,
|
||||
(-pmc->counter) & pmc_bitmask(pmc)))
|
||||
get_sample_period(pmc, pmc->counter)))
|
||||
return false;
|
||||
|
||||
/* reuse perf_event to serve as pmc_reprogram_counter() does*/
|
||||
|
@ -129,6 +129,15 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
|
||||
{
|
||||
u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
|
||||
|
||||
if (!sample_period)
|
||||
sample_period = pmc_bitmask(pmc) + 1;
|
||||
return sample_period;
|
||||
}
|
||||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
|
||||
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
|
||||
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
|
||||
|
@ -263,9 +263,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
if (!msr_info->host_initiated)
|
||||
data = (s64)(s32)data;
|
||||
pmc->counter += data - pmc_read_counter(pmc);
|
||||
if (pmc->perf_event)
|
||||
perf_event_period(pmc->perf_event,
|
||||
get_sample_period(pmc, data));
|
||||
return 0;
|
||||
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
||||
pmc->counter += data - pmc_read_counter(pmc);
|
||||
if (pmc->perf_event)
|
||||
perf_event_period(pmc->perf_event,
|
||||
get_sample_period(pmc, data));
|
||||
return 0;
|
||||
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
||||
if (data == pmc->eventsel)
|
||||
|
Loading…
Reference in New Issue
Block a user