2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-13 07:44:00 +08:00

s390/mm: enable fixup_user_fault retrying

By passing a non-null flag we allow fixup_user_fault to retry, which
enables userfaultfd.  As during these retries we might drop the mmap_sem
we need to check if that happened and redo the complete chain of
actions.

Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: "Jason J. Herne" <jjherne@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Eric B Munson <emunson@akamai.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Dominik Dingel 2016-01-15 16:57:07 -08:00 committed by Linus Torvalds
parent 4a9e1cda27
commit fef8953ae4

View File

@ -578,17 +578,29 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
{ {
unsigned long vmaddr; unsigned long vmaddr;
int rc; int rc;
bool unlocked;
down_read(&gmap->mm->mmap_sem); down_read(&gmap->mm->mmap_sem);
retry:
unlocked = false;
vmaddr = __gmap_translate(gmap, gaddr); vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr)) { if (IS_ERR_VALUE(vmaddr)) {
rc = vmaddr; rc = vmaddr;
goto out_up; goto out_up;
} }
if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, NULL)) { if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
&unlocked)) {
rc = -EFAULT; rc = -EFAULT;
goto out_up; goto out_up;
} }
/*
* In the case that fixup_user_fault unlocked the mmap_sem during
* faultin redo __gmap_translate to not race with a map/unmap_segment.
*/
if (unlocked)
goto retry;
rc = __gmap_link(gmap, gaddr, vmaddr); rc = __gmap_link(gmap, gaddr, vmaddr);
out_up: out_up:
up_read(&gmap->mm->mmap_sem); up_read(&gmap->mm->mmap_sem);
@ -714,12 +726,14 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
spinlock_t *ptl; spinlock_t *ptl;
pte_t *ptep, entry; pte_t *ptep, entry;
pgste_t pgste; pgste_t pgste;
bool unlocked;
int rc = 0; int rc = 0;
if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK)) if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
return -EINVAL; return -EINVAL;
down_read(&gmap->mm->mmap_sem); down_read(&gmap->mm->mmap_sem);
while (len) { while (len) {
unlocked = false;
/* Convert gmap address and connect the page tables */ /* Convert gmap address and connect the page tables */
addr = __gmap_translate(gmap, gaddr); addr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(addr)) { if (IS_ERR_VALUE(addr)) {
@ -728,10 +742,13 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
} }
/* Get the page mapped */ /* Get the page mapped */
if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE, if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
NULL)) { &unlocked)) {
rc = -EFAULT; rc = -EFAULT;
break; break;
} }
/* While trying to map mmap_sem got unlocked. Let us retry */
if (unlocked)
continue;
rc = __gmap_link(gmap, gaddr, addr); rc = __gmap_link(gmap, gaddr, addr);
if (rc) if (rc)
break; break;
@ -792,9 +809,11 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
spinlock_t *ptl; spinlock_t *ptl;
pgste_t old, new; pgste_t old, new;
pte_t *ptep; pte_t *ptep;
bool unlocked;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
retry: retry:
unlocked = false;
ptep = get_locked_pte(mm, addr, &ptl); ptep = get_locked_pte(mm, addr, &ptl);
if (unlikely(!ptep)) { if (unlikely(!ptep)) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
@ -803,8 +822,12 @@ retry:
if (!(pte_val(*ptep) & _PAGE_INVALID) && if (!(pte_val(*ptep) & _PAGE_INVALID) &&
(pte_val(*ptep) & _PAGE_PROTECT)) { (pte_val(*ptep) & _PAGE_PROTECT)) {
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
/*
* We do not really care about unlocked. We will retry either
* way. But this allows fixup_user_fault to enable userfaultfd.
*/
if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE, if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE,
NULL)) { &unlocked)) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return -EFAULT; return -EFAULT;
} }