mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 13:34:10 +08:00
KVM: x86: compile out TDP MMU on 32-bit systems
The TDP MMU assumes that it can do atomic accesses to 64-bit PTEs. Rather than just disabling it, compile it out completely so that it is possible to use for example 64-bit xchg. To limit the number of stubs, wrap all accesses to tdp_mmu_enabled or tdp_mmu_page with a function. Calls to all other functions in tdp_mmu.c are eliminated and do not even reach the linker. Reviewed-by: Sean Christopherson <seanjc@google.com> Tested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e36b250e50
commit
897218ff7c
@ -1033,6 +1033,7 @@ struct kvm_arch {
|
|||||||
struct kvm_pmu_event_filter *pmu_event_filter;
|
struct kvm_pmu_event_filter *pmu_event_filter;
|
||||||
struct task_struct *nx_lpage_recovery_thread;
|
struct task_struct *nx_lpage_recovery_thread;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
/*
|
/*
|
||||||
* Whether the TDP MMU is enabled for this VM. This contains a
|
* Whether the TDP MMU is enabled for this VM. This contains a
|
||||||
* snapshot of the TDP MMU module parameter from when the VM was
|
* snapshot of the TDP MMU module parameter from when the VM was
|
||||||
@ -1071,6 +1072,7 @@ struct kvm_arch {
|
|||||||
* the thread holds the MMU lock in write mode.
|
* the thread holds the MMU lock in write mode.
|
||||||
*/
|
*/
|
||||||
spinlock_t tdp_mmu_pages_lock;
|
spinlock_t tdp_mmu_pages_lock;
|
||||||
|
#endif /* CONFIG_X86_64 */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_vm_stat {
|
struct kvm_vm_stat {
|
||||||
|
@ -17,7 +17,8 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
|
|||||||
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o xen.o \
|
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o xen.o \
|
||||||
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
|
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
|
||||||
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
|
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
|
||||||
mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o
|
mmu/spte.o
|
||||||
|
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
|
||||||
|
|
||||||
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
|
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
|
||||||
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|
||||||
|
@ -1225,7 +1225,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
|
|||||||
{
|
{
|
||||||
struct kvm_rmap_head *rmap_head;
|
struct kvm_rmap_head *rmap_head;
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
|
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
|
||||||
slot->base_gfn + gfn_offset, mask, true);
|
slot->base_gfn + gfn_offset, mask, true);
|
||||||
while (mask) {
|
while (mask) {
|
||||||
@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|||||||
{
|
{
|
||||||
struct kvm_rmap_head *rmap_head;
|
struct kvm_rmap_head *rmap_head;
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
|
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
|
||||||
slot->base_gfn + gfn_offset, mask, false);
|
slot->base_gfn + gfn_offset, mask, false);
|
||||||
while (mask) {
|
while (mask) {
|
||||||
@ -1310,7 +1310,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
|||||||
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
|
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
write_protected |=
|
write_protected |=
|
||||||
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
|
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
|
||||||
|
|
||||||
@ -1522,7 +1522,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
|||||||
|
|
||||||
r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
|
r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
@ -1534,7 +1534,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|||||||
|
|
||||||
r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
|
r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
@ -1589,7 +1589,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
|||||||
int young = false;
|
int young = false;
|
||||||
|
|
||||||
young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
|
young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
|
young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
|
||||||
|
|
||||||
return young;
|
return young;
|
||||||
@ -1600,7 +1600,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|||||||
int young = false;
|
int young = false;
|
||||||
|
|
||||||
young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
|
young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
|
young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
|
||||||
|
|
||||||
return young;
|
return young;
|
||||||
@ -3155,7 +3155,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
|
|||||||
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
|
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
|
||||||
|
|
||||||
if (kvm_mmu_put_root(kvm, sp)) {
|
if (kvm_mmu_put_root(kvm, sp)) {
|
||||||
if (sp->tdp_mmu_page)
|
if (is_tdp_mmu_page(sp))
|
||||||
kvm_tdp_mmu_free_root(kvm, sp);
|
kvm_tdp_mmu_free_root(kvm, sp);
|
||||||
else if (sp->role.invalid)
|
else if (sp->role.invalid)
|
||||||
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
||||||
@ -3249,7 +3249,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|||||||
hpa_t root;
|
hpa_t root;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (vcpu->kvm->arch.tdp_mmu_enabled) {
|
if (is_tdp_mmu_enabled(vcpu->kvm)) {
|
||||||
root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
|
root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
|
||||||
|
|
||||||
if (!VALID_PAGE(root))
|
if (!VALID_PAGE(root))
|
||||||
@ -5411,7 +5411,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
|
|||||||
|
|
||||||
kvm_zap_obsolete_pages(kvm);
|
kvm_zap_obsolete_pages(kvm);
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
kvm_tdp_mmu_zap_all(kvm);
|
kvm_tdp_mmu_zap_all(kvm);
|
||||||
|
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
@ -5474,7 +5474,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled) {
|
if (is_tdp_mmu_enabled(kvm)) {
|
||||||
flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
|
flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
@ -5498,7 +5498,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
|
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
|
||||||
start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
|
start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
|
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
@ -5564,7 +5564,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
|||||||
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
|
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
|
||||||
kvm_mmu_zap_collapsible_spte, true);
|
kvm_mmu_zap_collapsible_spte, true);
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
|
kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
@ -5591,7 +5591,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
|||||||
|
|
||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
|
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
|
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
@ -5614,7 +5614,7 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
|||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
|
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
|
||||||
false);
|
false);
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
|
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
@ -5630,7 +5630,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
|||||||
|
|
||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
|
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
|
flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
@ -5658,7 +5658,7 @@ restart:
|
|||||||
|
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||||
|
|
||||||
if (kvm->arch.tdp_mmu_enabled)
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
kvm_tdp_mmu_zap_all(kvm);
|
kvm_tdp_mmu_zap_all(kvm);
|
||||||
|
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
@ -5969,7 +5969,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
|
|||||||
struct kvm_mmu_page,
|
struct kvm_mmu_page,
|
||||||
lpage_disallowed_link);
|
lpage_disallowed_link);
|
||||||
WARN_ON_ONCE(!sp->lpage_disallowed);
|
WARN_ON_ONCE(!sp->lpage_disallowed);
|
||||||
if (sp->tdp_mmu_page) {
|
if (is_tdp_mmu_page(sp)) {
|
||||||
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
|
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
|
||||||
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
|
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
|
||||||
} else {
|
} else {
|
||||||
|
@ -56,10 +56,12 @@ struct kvm_mmu_page {
|
|||||||
/* Number of writes since the last time traversal visited this page. */
|
/* Number of writes since the last time traversal visited this page. */
|
||||||
atomic_t write_flooding_count;
|
atomic_t write_flooding_count;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
bool tdp_mmu_page;
|
bool tdp_mmu_page;
|
||||||
|
|
||||||
/* Used for freeing the page asyncronously if it is a TDP MMU page. */
|
/* Used for freeing the page asyncronously if it is a TDP MMU page. */
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct kmem_cache *mmu_page_header_cache;
|
extern struct kmem_cache *mmu_page_header_cache;
|
||||||
|
@ -10,24 +10,13 @@
|
|||||||
#include <asm/cmpxchg.h>
|
#include <asm/cmpxchg.h>
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
static bool __read_mostly tdp_mmu_enabled = false;
|
static bool __read_mostly tdp_mmu_enabled = false;
|
||||||
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
|
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
|
||||||
#endif
|
|
||||||
|
|
||||||
static bool is_tdp_mmu_enabled(void)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif /* CONFIG_X86_64 */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initializes the TDP MMU for the VM, if enabled. */
|
/* Initializes the TDP MMU for the VM, if enabled. */
|
||||||
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
|
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
if (!is_tdp_mmu_enabled())
|
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* This should not be changed for the lifetime of the VM. */
|
/* This should not be changed for the lifetime of the VM. */
|
||||||
@ -96,22 +85,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
|||||||
#define for_each_tdp_mmu_root(_kvm, _root) \
|
#define for_each_tdp_mmu_root(_kvm, _root) \
|
||||||
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
|
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
|
||||||
|
|
||||||
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
|
|
||||||
{
|
|
||||||
struct kvm_mmu_page *sp;
|
|
||||||
|
|
||||||
if (!kvm->arch.tdp_mmu_enabled)
|
|
||||||
return false;
|
|
||||||
if (WARN_ON(!VALID_PAGE(hpa)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
sp = to_shadow_page(hpa);
|
|
||||||
if (WARN_ON(!sp))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return sp->tdp_mmu_page && sp->root_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
gfn_t start, gfn_t end, bool can_yield);
|
gfn_t start, gfn_t end, bool can_yield);
|
||||||
|
|
||||||
|
@ -5,10 +5,6 @@
|
|||||||
|
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
|
||||||
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
|
||||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
|
||||||
|
|
||||||
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
|
|
||||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
||||||
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
||||||
|
|
||||||
@ -47,4 +43,32 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
|||||||
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
|
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
|
||||||
int *root_level);
|
int *root_level);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||||
|
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||||
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
|
||||||
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
||||||
|
#else
|
||||||
|
static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
|
||||||
|
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
|
||||||
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
|
||||||
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
|
||||||
|
{
|
||||||
|
struct kvm_mmu_page *sp;
|
||||||
|
|
||||||
|
if (!is_tdp_mmu_enabled(kvm))
|
||||||
|
return false;
|
||||||
|
if (WARN_ON(!VALID_PAGE(hpa)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sp = to_shadow_page(hpa);
|
||||||
|
if (WARN_ON(!sp))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return is_tdp_mmu_page(sp) && sp->root_count;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user