KVM: x86/mmu: Move flushing for "slot" handlers to caller for legacy MMU

Place the onus on the caller of slot_handle_*() to flush the TLB, rather
than handling the flush in the helper, and rename parameters accordingly.
This will allow future patches to coalesce flushes between address spaces
and between the legacy and TDP MMUs.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-03-25 19:19:41 -07:00 committed by Paolo Bonzini
parent af95b53e56
commit 302695a574

View File

@ -5249,7 +5249,7 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield)
{
struct slot_rmap_walk_iterator iterator;
bool flush = false;
@ -5260,7 +5260,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
flush |= fn(kvm, iterator.rmap, memslot);
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
if (flush && flush_on_yield) {
kvm_flush_remote_tlbs_with_address(kvm,
start_gfn,
iterator.gfn - start_gfn + 1);
@ -5270,32 +5270,26 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
}
}
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
end_gfn - start_gfn + 1);
flush = false;
}
return flush;
}
static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb)
bool flush_on_yield)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
lock_flush_tlb);
flush_on_yield);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
slot_level_handler fn, bool flush_on_yield)
{
return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
PG_LEVEL_4K, lock_flush_tlb);
PG_LEVEL_4K, flush_on_yield);
}
static void free_mmu_pages(struct kvm_mmu *mmu)
@ -5531,10 +5525,14 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end)
continue;
slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true);
flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true);
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
gfn_end);
}
}
@ -5627,9 +5625,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
bool flush;
write_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
@ -5641,7 +5642,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
{
/*
* All current use cases for flushing the TLBs for a specific memslot
* are related to dirty logging, and do the TLB flush out of mmu_lock.
* related to dirty logging, and many do the TLB flush out of mmu_lock.
* The interaction between the various operations on memslot must be
* serialized by slots_locks to ensure the TLB flush from one operation
* is observed by any other operation on the same memslot.