mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 09:43:59 +08:00
KVM: x86/mmu: Ensure TLBs are flushed when yielding during GFN range zap
When flushing a range of GFNs across multiple roots, ensure any pending
flush from a previous root is honored before yielding while walking the
tables of the current root.
Note, kvm_tdp_mmu_zap_gfn_range() now intentionally overwrites its local
"flush" with the result to avoid redundant flushes. zap_gfn_range()
preserves and return the incoming "flush", unless of course the flush was
performed prior to yielding and no new flush was triggered.
Fixes: 1af4a96025
("KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed")
Cc: stable@vger.kernel.org
Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210325200119.1359384-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1e28eed176
commit
a835429cda
@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
||||
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
|
||||
|
||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t start, gfn_t end, bool can_yield);
|
||||
gfn_t start, gfn_t end, bool can_yield, bool flush);
|
||||
|
||||
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||
{
|
||||
@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||
|
||||
list_del(&root->link);
|
||||
|
||||
zap_gfn_range(kvm, root, 0, max_gfn, false);
|
||||
zap_gfn_range(kvm, root, 0, max_gfn, false, false);
|
||||
|
||||
free_page((unsigned long)root->spt);
|
||||
kmem_cache_free(mmu_page_header_cache, root);
|
||||
@ -678,20 +678,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
|
||||
* scheduler needs the CPU or there is contention on the MMU lock. If this
|
||||
* function cannot yield, it will not release the MMU lock or reschedule and
|
||||
* the caller must ensure it does not supply too large a GFN range, or the
|
||||
* operation can cause a soft lockup.
|
||||
* operation can cause a soft lockup. Note, in some use cases a flush may be
|
||||
* required by prior actions. Ensure the pending flush is performed prior to
|
||||
* yielding.
|
||||
*/
|
||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t start, gfn_t end, bool can_yield)
|
||||
gfn_t start, gfn_t end, bool can_yield, bool flush)
|
||||
{
|
||||
struct tdp_iter iter;
|
||||
bool flush_needed = false;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
if (can_yield &&
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
|
||||
flush_needed = false;
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
|
||||
flush = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -709,11 +710,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
continue;
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, 0);
|
||||
flush_needed = true;
|
||||
flush = true;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
return flush_needed;
|
||||
return flush;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -728,7 +729,7 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
|
||||
bool flush = false;
|
||||
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root)
|
||||
flush |= zap_gfn_range(kvm, root, start, end, true);
|
||||
flush = zap_gfn_range(kvm, root, start, end, true, flush);
|
||||
|
||||
return flush;
|
||||
}
|
||||
@ -940,7 +941,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
|
||||
struct kvm_mmu_page *root, gfn_t start,
|
||||
gfn_t end, unsigned long unused)
|
||||
{
|
||||
return zap_gfn_range(kvm, root, start, end, false);
|
||||
return zap_gfn_range(kvm, root, start, end, false, false);
|
||||
}
|
||||
|
||||
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
||||
|
Loading…
Reference in New Issue
Block a user