mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-14 07:44:21 +08:00
KVM: MMU: Do not unconditionally read PDPTE from guest memory
Architecturally, PDPTEs are cached in the PDPTRs when CR3 is reloaded. On SVM, it is not possible to implement this, but on VMX this is possible and was indeed implemented until nested SVM changed this to unconditionally read PDPTEs dynamically. This has noticable impact when running PAE guests. Fix by changing the MMU to read PDPTRs from the cache, falling back to reading from memory for the nested MMU. Signed-off-by: Avi Kivity <avi@redhat.com> Tested-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
cf3ace79c0
commit
e4e517b4be
@ -265,6 +265,7 @@ struct kvm_mmu {
|
|||||||
void (*new_cr3)(struct kvm_vcpu *vcpu);
|
void (*new_cr3)(struct kvm_vcpu *vcpu);
|
||||||
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
|
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
|
||||||
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
|
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
|
||||||
|
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
|
||||||
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
|
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
|
||||||
bool prefault);
|
bool prefault);
|
||||||
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
|
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
|
||||||
|
@ -45,13 +45,6 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
|
|||||||
return vcpu->arch.walk_mmu->pdptrs[index];
|
return vcpu->arch.walk_mmu->pdptrs[index];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
|
|
||||||
{
|
|
||||||
load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
|
|
||||||
|
|
||||||
return mmu->pdptrs[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||||
{
|
{
|
||||||
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
|
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||||
|
@ -2770,7 +2770,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
ASSERT(!VALID_PAGE(root));
|
ASSERT(!VALID_PAGE(root));
|
||||||
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
|
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
|
||||||
pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
|
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
|
||||||
if (!is_present_gpte(pdptr)) {
|
if (!is_present_gpte(pdptr)) {
|
||||||
vcpu->arch.mmu.pae_root[i] = 0;
|
vcpu->arch.mmu.pae_root[i] = 0;
|
||||||
continue;
|
continue;
|
||||||
@ -3318,6 +3318,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|||||||
context->direct_map = true;
|
context->direct_map = true;
|
||||||
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
|
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
|
||||||
context->get_cr3 = get_cr3;
|
context->get_cr3 = get_cr3;
|
||||||
|
context->get_pdptr = kvm_pdptr_read;
|
||||||
context->inject_page_fault = kvm_inject_page_fault;
|
context->inject_page_fault = kvm_inject_page_fault;
|
||||||
context->nx = is_nx(vcpu);
|
context->nx = is_nx(vcpu);
|
||||||
|
|
||||||
@ -3376,6 +3377,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
|
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
|
||||||
vcpu->arch.walk_mmu->get_cr3 = get_cr3;
|
vcpu->arch.walk_mmu->get_cr3 = get_cr3;
|
||||||
|
vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
|
||||||
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
|
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
@ -3386,6 +3388,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
|
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
|
||||||
|
|
||||||
g_context->get_cr3 = get_cr3;
|
g_context->get_cr3 = get_cr3;
|
||||||
|
g_context->get_pdptr = kvm_pdptr_read;
|
||||||
g_context->inject_page_fault = kvm_inject_page_fault;
|
g_context->inject_page_fault = kvm_inject_page_fault;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -163,7 +163,7 @@ retry_walk:
|
|||||||
|
|
||||||
#if PTTYPE == 64
|
#if PTTYPE == 64
|
||||||
if (walker->level == PT32E_ROOT_LEVEL) {
|
if (walker->level == PT32E_ROOT_LEVEL) {
|
||||||
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
|
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
|
||||||
trace_kvm_mmu_paging_element(pte, walker->level);
|
trace_kvm_mmu_paging_element(pte, walker->level);
|
||||||
if (!is_present_gpte(pte))
|
if (!is_present_gpte(pte))
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -1844,6 +1844,20 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
|
|||||||
return svm->nested.nested_cr3;
|
return svm->nested.nested_cr3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
|
||||||
|
{
|
||||||
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
u64 cr3 = svm->nested.nested_cr3;
|
||||||
|
u64 pdpte;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
|
||||||
|
offset_in_page(cr3) + index * 8, 8);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
return pdpte;
|
||||||
|
}
|
||||||
|
|
||||||
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
|
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
|
||||||
unsigned long root)
|
unsigned long root)
|
||||||
{
|
{
|
||||||
@ -1875,6 +1889,7 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
|
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
|
||||||
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
|
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
|
||||||
|
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
|
||||||
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
|
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
|
||||||
vcpu->arch.mmu.shadow_root_level = get_npt_level();
|
vcpu->arch.mmu.shadow_root_level = get_npt_level();
|
||||||
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
|
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
|
||||||
|
Loading…
Reference in New Issue
Block a user