mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
Revert "KVM: x86/mmu: Zap only TDP MMU leafs in kvm_zap_gfn_range()"
This reverts commit cf3e26427c
.
Multi-vCPU Hyper-V guests started crashing randomly on boot with the
latest kvm/queue and the problem can be bisected the problem to this
particular patch. Basically, I'm not able to boot e.g. 16-vCPU guest
successfully anymore. Both Intel and AMD seem to be affected. Reverting
the commit saves the day.
Reported-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fcb93eb6d0
commit
873dd12217
@ -5842,8 +5842,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
|
||||
if (is_tdp_mmu_enabled(kvm)) {
|
||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
|
||||
flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
|
||||
gfn_end, true, flush);
|
||||
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
|
||||
gfn_end, flush);
|
||||
}
|
||||
|
||||
if (flush)
|
||||
|
@ -906,8 +906,10 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs
|
||||
* have been cleared and a TLB flush is needed before releasing the MMU lock.
|
||||
* Tears down the mappings for the range of gfns, [start, end), and frees the
|
||||
* non-root pages mapping GFNs strictly within that range. Returns true if
|
||||
* SPTEs have been cleared and a TLB flush is needed before releasing the
|
||||
* MMU lock.
|
||||
*
|
||||
* If can_yield is true, will release the MMU lock and reschedule if the
|
||||
* scheduler needs the CPU or there is contention on the MMU lock. If this
|
||||
@ -915,25 +917,42 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
* the caller must ensure it does not supply too large a GFN range, or the
|
||||
* operation can cause a soft lockup.
|
||||
*/
|
||||
static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t start, gfn_t end, bool can_yield, bool flush)
|
||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t start, gfn_t end, bool can_yield, bool flush)
|
||||
{
|
||||
bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
|
||||
struct tdp_iter iter;
|
||||
|
||||
/*
|
||||
* No need to try to step down in the iterator when zapping all SPTEs,
|
||||
* zapping the top-level non-leaf SPTEs will recurse on their children.
|
||||
*/
|
||||
int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
|
||||
|
||||
end = min(end, tdp_mmu_max_gfn_host());
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
|
||||
for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
|
||||
if (can_yield &&
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
|
||||
flush = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_shadow_present_pte(iter.old_spte) ||
|
||||
if (!is_shadow_present_pte(iter.old_spte))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If this is a non-last-level SPTE that covers a larger range
|
||||
* than should be zapped, continue, and zap the mappings at a
|
||||
* lower level, except when zapping all SPTEs.
|
||||
*/
|
||||
if (!zap_all &&
|
||||
(iter.gfn < start ||
|
||||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
|
||||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
|
||||
@ -960,13 +979,13 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
* SPTEs have been cleared and a TLB flush is needed before releasing the
|
||||
* MMU lock.
|
||||
*/
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
|
||||
bool can_yield, bool flush)
|
||||
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
|
||||
gfn_t end, bool can_yield, bool flush)
|
||||
{
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
|
||||
flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, false);
|
||||
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
|
||||
|
||||
return flush;
|
||||
}
|
||||
@ -1214,8 +1233,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
|
||||
bool flush)
|
||||
{
|
||||
return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
|
||||
range->end, range->may_block, flush);
|
||||
return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
|
||||
range->end, range->may_block, flush);
|
||||
}
|
||||
|
||||
typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
|
||||
|
@ -15,8 +15,14 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
|
||||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool shared);
|
||||
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
|
||||
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
|
||||
gfn_t end, bool can_yield, bool flush);
|
||||
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
|
||||
gfn_t start, gfn_t end, bool flush)
|
||||
{
|
||||
return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
|
||||
}
|
||||
|
||||
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
|
||||
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
||||
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
|
||||
|
Loading…
Reference in New Issue
Block a user