mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
KVM: RISC-V: Retry fault if vma_lookup() results become invalid
Read mmu_invalidate_seq before dropping the mmap_lock so that KVM can
detect if the results of vma_lookup() (e.g. vma_shift) become stale
before it acquires kvm->mmu_lock. This fixes a theoretical bug where a
VMA could be changed by userspace after vma_lookup() and before KVM
reads the mmu_invalidate_seq, causing KVM to install page table entries
based on a (possibly) no-longer-valid vma_shift.
Re-order the MMU cache top-up to earlier in user_mem_abort() so that it
is not done after KVM has read mmu_invalidate_seq (i.e. so as to avoid
inducing spurious fault retries).
It's unlikely that any sane userspace currently modifies VMAs in such a
way as to trigger this race. And even with directed testing I was unable
to reproduce it. But a sufficiently motivated host userspace might be
able to exploit this race.
Note KVM/ARM had the same bug and was fixed in a separate, near
identical patch (see Link).
Link: https://lore.kernel.org/kvm/20230313235454.2964067-1-dmatlack@google.com/
Fixes: 9955371cc0
("RISC-V: KVM: Implement MMU notifiers")
Cc: stable@vger.kernel.org
Signed-off-by: David Matlack <dmatlack@google.com>
Tested-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
parent
6a8f57ae2e
commit
2ed90cb093
@ -628,6 +628,13 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
|
||||
unsigned long vma_pagesize, mmu_seq;
|
||||
|
||||
/* We need minimum second+third level pages */
|
||||
ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
|
||||
if (ret) {
|
||||
kvm_err("Failed to topup G-stage cache\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
|
||||
vma = vma_lookup(current->mm, hva);
|
||||
@ -648,6 +655,15 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
|
||||
gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* Read mmu_invalidate_seq so that KVM can detect if the results of
|
||||
* vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
|
||||
* kvm->mmu_lock.
|
||||
*
|
||||
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
|
||||
* with the smp_wmb() in kvm_mmu_invalidate_end().
|
||||
*/
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
if (vma_pagesize != PUD_SIZE &&
|
||||
@ -657,15 +673,6 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* We need minimum second+third level pages */
|
||||
ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
|
||||
if (ret) {
|
||||
kvm_err("Failed to topup G-stage cache\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
|
||||
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
|
||||
if (hfn == KVM_PFN_ERR_HWPOISON) {
|
||||
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
|
||||
|
Loading…
Reference in New Issue
Block a user