mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-22 18:44:44 +08:00
39b4d43e60
Get the so called "root" level from the low level shadow page table
walkers instead of manually attempting to calculate it higher up the
stack, e.g. in get_mmio_spte(). When KVM is using PAE shadow paging,
the starting level of the walk, from the callers perspective, is not
the CR3 root but rather the PDPTR "root". Checking for reserved bits
from the CR3 root causes get_mmio_spte() to consume uninitialized stack
data due to indexing into sptes[] for a level that was not filled by
get_walk(). This can result in false positives and/or negatives
depending on what garbage happens to be on the stack.
Opportunistically nuke a few extra newlines.
Fixes: 95fb5b0258
("kvm: x86/mmu: Support MMIO in the TDP MMU")
Reported-by: Richard Herbert <rherbert@sympatico.ca>
Cc: Ben Gardon <bgardon@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20201218003139.2167891-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
51 lines
1.8 KiB
C
51 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#ifndef __KVM_X86_MMU_TDP_MMU_H
|
|
#define __KVM_X86_MMU_TDP_MMU_H
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
|
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
|
|
|
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
|
|
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
|
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
|
|
|
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
|
|
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
|
|
|
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
|
int map_writable, int max_level, kvm_pfn_t pfn,
|
|
bool prefault);
|
|
|
|
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
|
unsigned long end);
|
|
|
|
int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
|
|
unsigned long end);
|
|
int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
|
|
|
|
int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
|
|
pte_t *host_ptep);
|
|
|
|
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
int min_level);
|
|
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot);
|
|
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot,
|
|
gfn_t gfn, unsigned long mask,
|
|
bool wrprot);
|
|
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
|
|
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
|
const struct kvm_memory_slot *slot);
|
|
|
|
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot, gfn_t gfn);
|
|
|
|
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
|
|
int *root_level);
|
|
|
|
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|