mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-30 14:34:51 +08:00
KVM: x86/mmu: Split out max mapping level calculation to helper
Factor out the logic for determining the maximum mapping level given a memslot and a gpa. The helper will be used when zapping collapsible SPTEs when disabling dirty logging, e.g. to avoid zapping SPTEs that can't possibly be rebuilt as hugepages. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210213005015.1651772-4-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
c060c72ffe
commit
1b6d9d9ed5
@ -2756,8 +2756,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||
__direct_pte_prefetch(vcpu, sp, sptep);
|
||||
}
|
||||
|
||||
static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
kvm_pfn_t pfn, struct kvm_memory_slot *slot)
|
||||
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
unsigned long hva;
|
||||
pte_t *pte;
|
||||
@ -2776,19 +2776,36 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
*/
|
||||
hva = __gfn_to_hva_memslot(slot, gfn);
|
||||
|
||||
pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
|
||||
pte = lookup_address_in_mm(kvm->mm, hva, &level);
|
||||
if (unlikely(!pte))
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, kvm_pfn_t pfn, int max_level)
|
||||
{
|
||||
struct kvm_lpage_info *linfo;
|
||||
|
||||
max_level = min(max_level, max_huge_page_level);
|
||||
for ( ; max_level > PG_LEVEL_4K; max_level--) {
|
||||
linfo = lpage_info_slot(gfn, slot, max_level);
|
||||
if (!linfo->disallow_lpage)
|
||||
break;
|
||||
}
|
||||
|
||||
if (max_level == PG_LEVEL_4K)
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
return host_pfn_mapping_level(kvm, gfn, pfn, slot);
|
||||
}
|
||||
|
||||
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
int max_level, kvm_pfn_t *pfnp,
|
||||
bool huge_page_disallowed, int *req_level)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
struct kvm_lpage_info *linfo;
|
||||
kvm_pfn_t pfn = *pfnp;
|
||||
kvm_pfn_t mask;
|
||||
int level;
|
||||
@ -2805,17 +2822,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
if (!slot)
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
max_level = min(max_level, max_huge_page_level);
|
||||
for ( ; max_level > PG_LEVEL_4K; max_level--) {
|
||||
linfo = lpage_info_slot(gfn, slot, max_level);
|
||||
if (!linfo->disallow_lpage)
|
||||
break;
|
||||
}
|
||||
|
||||
if (max_level == PG_LEVEL_4K)
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
|
||||
level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
|
||||
if (level == PG_LEVEL_4K)
|
||||
return level;
|
||||
|
||||
|
@ -138,6 +138,8 @@ enum {
|
||||
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
|
||||
#define SET_SPTE_SPURIOUS BIT(2)
|
||||
|
||||
int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, kvm_pfn_t pfn, int max_level);
|
||||
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
int max_level, kvm_pfn_t *pfnp,
|
||||
bool huge_page_disallowed, int *req_level);
|
||||
|
Loading…
Reference in New Issue
Block a user