mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-09-21 12:11:49 +08:00
mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway
Syzbot reports a UAF in hugetlb_fault(). This happens because
vmf_anon_prepare() could drop the per-VMA lock and allow the current VMA
to be freed before hugetlb_vma_unlock_read() is called.
We can fix this by using a modified version of vmf_anon_prepare() that
doesn't release the VMA lock on failure, and then release it ourselves
after hugetlb_vma_unlock_read().
Link: https://lkml.kernel.org/r/20240914194243.245-2-vishal.moola@gmail.com
Fixes: 9acad7ba3e
("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2a058ab328
commit
98b74bb4d7
20
mm/hugetlb.c
20
mm/hugetlb.c
@ -6048,7 +6048,7 @@ retry_avoidcopy:
|
|||||||
* When the original hugepage is shared one, it does not have
|
* When the original hugepage is shared one, it does not have
|
||||||
* anon_vma prepared.
|
* anon_vma prepared.
|
||||||
*/
|
*/
|
||||||
ret = vmf_anon_prepare(vmf);
|
ret = __vmf_anon_prepare(vmf);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
goto out_release_all;
|
goto out_release_all;
|
||||||
|
|
||||||
@ -6247,7 +6247,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!(vma->vm_flags & VM_MAYSHARE)) {
|
if (!(vma->vm_flags & VM_MAYSHARE)) {
|
||||||
ret = vmf_anon_prepare(vmf);
|
ret = __vmf_anon_prepare(vmf);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -6378,6 +6378,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
|||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
out:
|
out:
|
||||||
hugetlb_vma_unlock_read(vma);
|
hugetlb_vma_unlock_read(vma);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must check to release the per-VMA lock. __vmf_anon_prepare() is
|
||||||
|
* the only way ret can be set to VM_FAULT_RETRY.
|
||||||
|
*/
|
||||||
|
if (unlikely(ret & VM_FAULT_RETRY))
|
||||||
|
vma_end_read(vma);
|
||||||
|
|
||||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -6599,6 +6607,14 @@ out_ptl:
|
|||||||
}
|
}
|
||||||
out_mutex:
|
out_mutex:
|
||||||
hugetlb_vma_unlock_read(vma);
|
hugetlb_vma_unlock_read(vma);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must check to release the per-VMA lock. __vmf_anon_prepare() in
|
||||||
|
* hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
|
||||||
|
*/
|
||||||
|
if (unlikely(ret & VM_FAULT_RETRY))
|
||||||
|
vma_end_read(vma);
|
||||||
|
|
||||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||||
/*
|
/*
|
||||||
* Generally it's safe to hold refcount during waiting page lock. But
|
* Generally it's safe to hold refcount during waiting page lock. But
|
||||||
|
Loading…
Reference in New Issue
Block a user