2020-10-15 02:26:43 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#ifndef __KVM_X86_MMU_TDP_MMU_H
|
|
|
|
#define __KVM_X86_MMU_TDP_MMU_H
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
|
2020-10-15 02:26:44 +08:00
|
|
|
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
|
|
|
|
KVM: x86/mmu: Don't allow TDP MMU to yield when recovering NX pages
Prevent the TDP MMU from yielding when zapping a gfn range during NX
page recovery. If a flush is pending from a previous invocation of the
zapping helper, either in the TDP MMU or the legacy MMU, but the TDP MMU
has not accumulated a flush for the current invocation, then yielding
will release mmu_lock with stale TLB entries.
That being said, this isn't technically a bug fix in the current code, as
the TDP MMU will never yield in this case. tdp_mmu_iter_cond_resched()
will yield if and only if it has made forward progress, as defined by the
current gfn vs. the last yielded (or starting) gfn. Because zapping a
single shadow page is guaranteed to (a) find that page and (b) step
sideways at the level of the shadow page, the TDP iter will break its loop
before getting a chance to yield.
But that is all very, very subtle, and will break at the slightest sneeze,
e.g. zapping while holding mmu_lock for read would break as the TDP MMU
wouldn't be guaranteed to see the present shadow page, and thus could step
sideways at a lower level.
Cc: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210325200119.1359384-4-seanjc@google.com>
[Add lockdep assertion. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-26 04:01:19 +08:00
|
|
|
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
|
|
|
|
bool can_yield);
|
|
|
|
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
|
|
|
|
gfn_t end)
|
|
|
|
{
|
|
|
|
return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
|
|
|
|
}
|
|
|
|
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
|
{
|
|
|
|
gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't allow yielding, as the caller may have a flush pending. Note,
|
|
|
|
* if mmu_lock is held for write, zapping will never yield in this case,
|
|
|
|
* but explicitly disallow it for safety. The TDP MMU does not yield
|
|
|
|
* until it has made forward progress (steps sideways), and when zapping
|
|
|
|
* a single shadow page that it's guaranteed to see (thus the mmu_lock
|
|
|
|
* requirement), its "step sideways" will always step beyond the bounds
|
|
|
|
* of the shadow page's gfn range and stop iterating before yielding.
|
|
|
|
*/
|
|
|
|
lockdep_assert_held_write(&kvm->mmu_lock);
|
|
|
|
return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
|
|
|
|
}
|
2020-10-15 02:26:47 +08:00
|
|
|
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
2020-10-15 02:26:50 +08:00
|
|
|
|
|
|
|
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
|
|
|
int map_writable, int max_level, kvm_pfn_t pfn,
|
|
|
|
bool prefault);
|
2020-10-15 02:26:52 +08:00
|
|
|
|
|
|
|
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
|
|
|
unsigned long end);
|
2020-10-15 02:26:53 +08:00
|
|
|
|
|
|
|
int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
|
|
|
|
unsigned long end);
|
|
|
|
int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
|
2020-10-15 02:26:54 +08:00
|
|
|
|
|
|
|
int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
|
|
|
|
pte_t *host_ptep);
|
2020-10-15 02:26:55 +08:00
|
|
|
|
|
|
|
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
|
|
int min_level);
|
|
|
|
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
|
|
|
|
struct kvm_memory_slot *slot);
|
|
|
|
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|
|
|
struct kvm_memory_slot *slot,
|
|
|
|
gfn_t gfn, unsigned long mask,
|
|
|
|
bool wrprot);
|
2020-10-15 02:26:56 +08:00
|
|
|
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
2021-02-13 08:50:06 +08:00
|
|
|
struct kvm_memory_slot *slot);
|
2020-10-15 02:26:57 +08:00
|
|
|
|
|
|
|
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
|
|
|
struct kvm_memory_slot *slot, gfn_t gfn);
|
2020-10-15 02:26:58 +08:00
|
|
|
|
2020-12-18 08:31:37 +08:00
|
|
|
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
|
|
|
|
int *root_level);
|
|
|
|
|
2021-02-06 22:53:33 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
|
|
|
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
|
|
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
|
|
|
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
|
|
|
#else
|
|
|
|
static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
|
|
|
|
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
|
|
|
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
|
|
|
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
|
|
|
|
{
|
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
|
|
if (!is_tdp_mmu_enabled(kvm))
|
|
|
|
return false;
|
|
|
|
if (WARN_ON(!VALID_PAGE(hpa)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
sp = to_shadow_page(hpa);
|
|
|
|
if (WARN_ON(!sp))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return is_tdp_mmu_page(sp) && sp->root_count;
|
|
|
|
}
|
|
|
|
|
2020-10-15 02:26:43 +08:00
|
|
|
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|