2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 05:04:00 +08:00

ARM: KVM: user_mem_abort: support stage 2 MMIO page mapping

A userspace process can map device MMIO memory via VFIO or /dev/mem,
e.g., for platform device passthrough support in QEMU.

During early development, we found the PAGE_S2 memory type being used
for MMIO mappings.  This patch corrects that by using the more strongly
ordered memory type for device MMIO mappings: PAGE_S2_DEVICE.

Signed-off-by: Kim Phillips <kim.phillips@linaro.org>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Kim Phillips 2014-06-26 01:45:51 +01:00 committed by Christoffer Dall
parent df6ce24f2e
commit b88657674d

View File

@ -759,6 +759,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma; struct vm_area_struct *vma;
pfn_t pfn; pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
if (fault_status == FSC_PERM && !write_fault) { if (fault_status == FSC_PERM && !write_fault) {
@ -809,6 +810,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_pfn(pfn)) if (is_error_pfn(pfn))
return -EFAULT; return -EFAULT;
if (kvm_is_mmio_pfn(pfn))
mem_type = PAGE_S2_DEVICE;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq)) if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock; goto out_unlock;
@ -816,7 +820,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
if (hugetlb) { if (hugetlb) {
pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd); new_pmd = pmd_mkhuge(new_pmd);
if (writable) { if (writable) {
kvm_set_s2pmd_writable(&new_pmd); kvm_set_s2pmd_writable(&new_pmd);
@ -825,13 +829,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else { } else {
pte_t new_pte = pfn_pte(pfn, PAGE_S2); pte_t new_pte = pfn_pte(pfn, mem_type);
if (writable) { if (writable) {
kvm_set_s2pte_writable(&new_pte); kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
} }
coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
mem_type == PAGE_S2_DEVICE);
} }