mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-20 09:34:44 +08:00
KVM: x86/mmu: look for a cached PGD when going from 32-bit to 64-bit
Right now, PGD caching avoids placing a PAE root in the cache by using the old value of mmu->root_level and mmu->shadow_root_level; it does not look for a cached PGD if the old root is a PAE one, and then frees it using kvm_mmu_free_roots. Change the logic instead to free the uncacheable root early. This way, __kvm_new_mmu_pgd is able to look up the cache when going from 32-bit to 64-bit (if there is a hit, the invalid root becomes the least recently used). An example of this is nested virtualization with shadow paging, when a 64-bit L1 runs a 32-bit L2. As a side effect (which is actually the reason why this patch was written), PGD caching does not use the old value of mmu->root_level and mmu->shadow_root_level anymore. Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0c1c92f15f
commit
5499ea73e7
@ -4085,52 +4085,88 @@ static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
|
||||
union kvm_mmu_page_role role)
|
||||
{
|
||||
return (role.direct || pgd == root->pgd) &&
|
||||
VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
|
||||
VALID_PAGE(root->hpa) &&
|
||||
role.word == to_shadow_page(root->hpa)->role.word;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find out if a previously cached root matching the new pgd/role is available.
|
||||
* The current root is also inserted into the cache.
|
||||
* If a matching root was found, it is assigned to kvm_mmu->root.hpa and true is
|
||||
* returned.
|
||||
* Otherwise, the LRU root from the cache is assigned to kvm_mmu->root.hpa and
|
||||
* false is returned. This root should now be freed by the caller.
|
||||
* Find out if a previously cached root matching the new pgd/role is available,
|
||||
* and insert the current root as the MRU in the cache.
|
||||
* If a matching root is found, it is assigned to kvm_mmu->root and
|
||||
* true is returned.
|
||||
* If no match is found, kvm_mmu->root is left invalid, the LRU root is
|
||||
* evicted to make room for the current root, and false is returned.
|
||||
*/
|
||||
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
|
||||
union kvm_mmu_page_role new_role)
|
||||
static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
gpa_t new_pgd,
|
||||
union kvm_mmu_page_role new_role)
|
||||
{
|
||||
uint i;
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
|
||||
if (is_root_usable(&mmu->root, new_pgd, new_role))
|
||||
return true;
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
|
||||
/*
|
||||
* The swaps end up rotating the cache like this:
|
||||
* C 0 1 2 3 (on entry to the function)
|
||||
* 0 C 1 2 3
|
||||
* 1 C 0 2 3
|
||||
* 2 C 0 1 3
|
||||
* 3 C 0 1 2 (on exit from the loop)
|
||||
*/
|
||||
swap(mmu->root, mmu->prev_roots[i]);
|
||||
|
||||
if (is_root_usable(&mmu->root, new_pgd, new_role))
|
||||
break;
|
||||
return true;
|
||||
}
|
||||
|
||||
return i < KVM_MMU_NUM_PREV_ROOTS;
|
||||
kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
|
||||
union kvm_mmu_page_role new_role)
|
||||
/*
|
||||
* Find out if a previously cached root matching the new pgd/role is available.
|
||||
* On entry, mmu->root is invalid.
|
||||
* If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
|
||||
* of the cache becomes invalid, and true is returned.
|
||||
* If no match is found, kvm_mmu->root is left invalid and false is returned.
|
||||
*/
|
||||
static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
gpa_t new_pgd,
|
||||
union kvm_mmu_page_role new_role)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
uint i;
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
||||
if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
|
||||
goto hit;
|
||||
|
||||
return false;
|
||||
|
||||
hit:
|
||||
swap(mmu->root, mmu->prev_roots[i]);
|
||||
/* Bubble up the remaining roots. */
|
||||
for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
|
||||
mmu->prev_roots[i] = mmu->prev_roots[i + 1];
|
||||
mmu->prev_roots[i].hpa = INVALID_PAGE;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
gpa_t new_pgd, union kvm_mmu_page_role new_role)
|
||||
{
|
||||
/*
|
||||
* For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
|
||||
* For now, limit the caching to 64-bit hosts+VMs in order to avoid
|
||||
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
|
||||
* later if necessary.
|
||||
*/
|
||||
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
|
||||
mmu->root_level >= PT64_ROOT_4LEVEL)
|
||||
return cached_root_available(vcpu, new_pgd, new_role);
|
||||
if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa))
|
||||
kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
|
||||
|
||||
return false;
|
||||
if (VALID_PAGE(mmu->root.hpa))
|
||||
return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
|
||||
else
|
||||
return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
|
||||
}
|
||||
|
||||
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
|
||||
@ -4138,8 +4174,8 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
|
||||
if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
|
||||
kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
|
||||
if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
|
||||
/* kvm_mmu_ensure_valid_pgd will set up a new root. */
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user