mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
KVM: x86/pmu: Rewrite reprogram_counters() to improve performance
A valid pmc is always tested before using pmu->reprogram_pmi. Eliminate this part of the redundancy by setting the counter's bitmask directly, and in addition, trigger KVM_REQ_PMU only once to save more cpu cycles. Signed-off-by: Like Xu <likexu@tencent.com> Link: https://lore.kernel.org/r/20230214050757.9623-4-likexu@tencent.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
8bca8c5ce4
commit
649bccd7fa
@ -76,13 +76,13 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
||||
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
|
||||
{
|
||||
int bit;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
|
||||
pmc = intel_pmc_idx_to_pmc(pmu, bit);
|
||||
if (pmc)
|
||||
kvm_pmu_request_counter_reprogam(pmc);
|
||||
}
|
||||
if (!diff)
|
||||
return;
|
||||
|
||||
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
|
||||
set_bit(bit, pmu->reprogram_pmi);
|
||||
kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
|
||||
}
|
||||
|
||||
static bool intel_hw_event_available(struct kvm_pmc *pmc)
|
||||
|
Loading…
Reference in New Issue
Block a user