mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 01:04:19 +08:00
KVM: MMU: retry #PF for softmmu
Retry #PF for softmmu only when the current vcpu has the same cr3 as the time when #PF occurs Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
2ec4739ddc
commit
fb67e14fc9
@ -593,6 +593,7 @@ struct kvm_x86_ops {
|
||||
struct kvm_arch_async_pf {
|
||||
u32 token;
|
||||
gfn_t gfn;
|
||||
unsigned long cr3;
|
||||
bool direct_map;
|
||||
};
|
||||
|
||||
|
@ -2608,9 +2608,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
||||
{
|
||||
struct kvm_arch_async_pf arch;
|
||||
|
||||
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
|
||||
arch.gfn = gfn;
|
||||
arch.direct_map = vcpu->arch.mmu.direct_map;
|
||||
arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
|
||||
|
||||
return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
|
||||
}
|
||||
|
@ -438,7 +438,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
struct guest_walker *gw,
|
||||
int user_fault, int write_fault, int hlevel,
|
||||
int *ptwrite, pfn_t pfn, bool map_writable)
|
||||
int *ptwrite, pfn_t pfn, bool map_writable,
|
||||
bool prefault)
|
||||
{
|
||||
unsigned access = gw->pt_access;
|
||||
struct kvm_mmu_page *sp = NULL;
|
||||
@ -512,7 +513,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
|
||||
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
|
||||
user_fault, write_fault, dirty, ptwrite, it.level,
|
||||
gw->gfn, pfn, false, map_writable);
|
||||
gw->gfn, pfn, prefault, map_writable);
|
||||
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
|
||||
|
||||
return it.sptep;
|
||||
@ -568,8 +569,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
*/
|
||||
if (!r) {
|
||||
pgprintk("%s: guest page fault\n", __func__);
|
||||
inject_page_fault(vcpu, &walker.fault);
|
||||
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
|
||||
if (!prefault) {
|
||||
inject_page_fault(vcpu, &walker.fault);
|
||||
/* reset fork detector */
|
||||
vcpu->arch.last_pt_write_count = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -599,7 +603,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
|
||||
kvm_mmu_free_some_pages(vcpu);
|
||||
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
|
||||
level, &write_pt, pfn, map_writable);
|
||||
level, &write_pt, pfn, map_writable, prefault);
|
||||
(void)sptep;
|
||||
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
|
||||
sptep, *sptep, write_pt);
|
||||
|
@ -6182,7 +6182,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map ||
|
||||
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
|
||||
is_error_page(work->page))
|
||||
return;
|
||||
|
||||
@ -6190,6 +6190,10 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
|
||||
if (unlikely(r))
|
||||
return;
|
||||
|
||||
if (!vcpu->arch.mmu.direct_map &&
|
||||
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
|
||||
return;
|
||||
|
||||
vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user