2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 12:43:55 +08:00

kvm: x86/mmu: Separate making SPTEs from set_spte

Separate the functions for generating leaf page table entries from the
function that inserts them into the paging structure. This refactoring
will facilitate changes to the MMU sychronization model to use atomic
compare / exchanges (which are not guaranteed to succeed) instead of a
monolithic MMU lock.

No functional change expected.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This commit introduced no new failures.

This series can be viewed in Gerrit at:
	https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538

Signed-off-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Ben Gardon 2020-10-14 20:26:41 +02:00 committed by Paolo Bonzini
parent cc4674d0de
commit 799a4190e7

View File

@ -2996,20 +2996,15 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
#define SET_SPTE_SPURIOUS BIT(2) #define SET_SPTE_SPURIOUS BIT(2)
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, static int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled,
bool can_unsync, bool host_writable) u64 *new_spte)
{ {
u64 spte = 0; u64 spte = 0;
int ret = 0; int ret = 0;
struct kvm_mmu_page *sp;
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) if (ad_disabled)
return 0;
sp = sptep_to_sp(sptep);
if (sp_ad_disabled(sp))
spte |= SPTE_AD_DISABLED_MASK; spte |= SPTE_AD_DISABLED_MASK;
else if (kvm_vcpu_ad_need_write_protect(vcpu)) else if (kvm_vcpu_ad_need_write_protect(vcpu))
spte |= SPTE_AD_WRPROT_ONLY_MASK; spte |= SPTE_AD_WRPROT_ONLY_MASK;
@ -3062,8 +3057,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* is responsibility of mmu_get_page / kvm_sync_page. * is responsibility of mmu_get_page / kvm_sync_page.
* Same reasoning can be applied to dirty page accounting. * Same reasoning can be applied to dirty page accounting.
*/ */
if (!can_unsync && is_writable_pte(*sptep)) if (!can_unsync && is_writable_pte(old_spte))
goto set_pte; goto out;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n", pgprintk("%s: found shadow page for %llx, marking ro\n",
@ -3074,15 +3069,37 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
} }
} }
if (pte_access & ACC_WRITE_MASK) { if (pte_access & ACC_WRITE_MASK)
kvm_vcpu_mark_page_dirty(vcpu, gfn);
spte |= spte_shadow_dirty_mask(spte); spte |= spte_shadow_dirty_mask(spte);
}
if (speculative) if (speculative)
spte = mark_spte_for_access_track(spte); spte = mark_spte_for_access_track(spte);
set_pte: out:
*new_spte = spte;
return ret;
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte;
struct kvm_mmu_page *sp;
int ret;
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0;
sp = sptep_to_sp(sptep);
ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
can_unsync, host_writable, sp_ad_disabled(sp), &spte);
if (spte & PT_WRITABLE_MASK)
kvm_vcpu_mark_page_dirty(vcpu, gfn);
if (*sptep == spte) if (*sptep == spte)
ret |= SET_SPTE_SPURIOUS; ret |= SET_SPTE_SPURIOUS;
else if (mmu_spte_update(sptep, spte)) else if (mmu_spte_update(sptep, spte))