mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm: allow multiple error returns in try_grab_page()
In order to add checks for P2PDMA memory into try_grab_page(), expand the error return from a bool to an int/error code. Update all the callsites handle change in usage. Also remove the WARN_ON_ONCE() call at the callsites seeing there already is a WARN_ON_ONCE() inside the function if it fails. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20221021174116.7200-2-logang@deltatee.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5b2560c4c2
commit
0f0892356f
@ -1129,7 +1129,7 @@ static inline void get_page(struct page *page)
|
||||
folio_get(page_folio(page));
|
||||
}
|
||||
|
||||
bool __must_check try_grab_page(struct page *page, unsigned int flags);
|
||||
int __must_check try_grab_page(struct page *page, unsigned int flags);
|
||||
|
||||
static inline __must_check bool try_get_page(struct page *page)
|
||||
{
|
||||
|
26
mm/gup.c
26
mm/gup.c
@ -202,17 +202,19 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
|
||||
* time. Cases: please see the try_grab_folio() documentation, with
|
||||
* "refs=1".
|
||||
*
|
||||
* Return: true for success, or if no action was required (if neither FOLL_PIN
|
||||
* nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
|
||||
* FOLL_PIN was set, but the page could not be grabbed.
|
||||
* Return: 0 for success, or if no action was required (if neither FOLL_PIN
|
||||
* nor FOLL_GET was set, nothing is done). A negative error code for failure:
|
||||
*
|
||||
* -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
|
||||
* be grabbed.
|
||||
*/
|
||||
bool __must_check try_grab_page(struct page *page, unsigned int flags)
|
||||
int __must_check try_grab_page(struct page *page, unsigned int flags)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
|
||||
if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
|
||||
return false;
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & FOLL_GET)
|
||||
folio_ref_inc(folio);
|
||||
@ -232,7 +234,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags)
|
||||
node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -624,8 +626,9 @@ retry:
|
||||
!PageAnonExclusive(page), page);
|
||||
|
||||
/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
|
||||
if (unlikely(!try_grab_page(page, flags))) {
|
||||
page = ERR_PTR(-ENOMEM);
|
||||
ret = try_grab_page(page, flags);
|
||||
if (unlikely(ret)) {
|
||||
page = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
@ -960,10 +963,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
|
||||
goto unmap;
|
||||
*page = pte_page(*pte);
|
||||
}
|
||||
if (unlikely(!try_grab_page(*page, gup_flags))) {
|
||||
ret = -ENOMEM;
|
||||
ret = try_grab_page(*page, gup_flags);
|
||||
if (unlikely(ret))
|
||||
goto unmap;
|
||||
}
|
||||
out:
|
||||
ret = 0;
|
||||
unmap:
|
||||
@ -2536,7 +2538,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
|
||||
}
|
||||
SetPageReferenced(page);
|
||||
pages[*nr] = page;
|
||||
if (unlikely(!try_grab_page(page, flags))) {
|
||||
if (unlikely(try_grab_page(page, flags))) {
|
||||
undo_dev_pagemap(nr, nr_start, flags, pages);
|
||||
break;
|
||||
}
|
||||
|
@ -1035,6 +1035,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn = pmd_pfn(*pmd);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
assert_spin_locked(pmd_lockptr(mm, pmd));
|
||||
|
||||
@ -1066,8 +1067,9 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
if (!*pgmap)
|
||||
return ERR_PTR(-EFAULT);
|
||||
page = pfn_to_page(pfn);
|
||||
if (!try_grab_page(page, flags))
|
||||
page = ERR_PTR(-ENOMEM);
|
||||
ret = try_grab_page(page, flags);
|
||||
if (ret)
|
||||
page = ERR_PTR(ret);
|
||||
|
||||
return page;
|
||||
}
|
||||
@ -1193,6 +1195,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn = pud_pfn(*pud);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
assert_spin_locked(pud_lockptr(mm, pud));
|
||||
|
||||
@ -1226,8 +1229,10 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
if (!*pgmap)
|
||||
return ERR_PTR(-EFAULT);
|
||||
page = pfn_to_page(pfn);
|
||||
if (!try_grab_page(page, flags))
|
||||
page = ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = try_grab_page(page, flags);
|
||||
if (ret)
|
||||
page = ERR_PTR(ret);
|
||||
|
||||
return page;
|
||||
}
|
||||
@ -1435,6 +1440,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
assert_spin_locked(pmd_lockptr(mm, pmd));
|
||||
|
||||
@ -1459,8 +1465,9 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
|
||||
!PageAnonExclusive(page), page);
|
||||
|
||||
if (!try_grab_page(page, flags))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = try_grab_page(page, flags);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (flags & FOLL_TOUCH)
|
||||
touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
|
||||
|
17
mm/hugetlb.c
17
mm/hugetlb.c
@ -7243,14 +7243,15 @@ retry:
|
||||
page = pte_page(pte) +
|
||||
((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
|
||||
/*
|
||||
* try_grab_page() should always succeed here, because: a) we
|
||||
* hold the pmd (ptl) lock, and b) we've just checked that the
|
||||
* huge pmd (head) page is present in the page tables. The ptl
|
||||
* prevents the head page and tail pages from being rearranged
|
||||
* in any way. So this page must be available at this point,
|
||||
* unless the page refcount overflowed:
|
||||
* try_grab_page() should always be able to get the page here,
|
||||
* because: a) we hold the pmd (ptl) lock, and b) we've just
|
||||
* checked that the huge pmd (head) page is present in the
|
||||
* page tables. The ptl prevents the head page and tail pages
|
||||
* from being rearranged in any way. So this page must be
|
||||
* available at this point, unless the page refcount
|
||||
* overflowed:
|
||||
*/
|
||||
if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
|
||||
if (try_grab_page(page, flags)) {
|
||||
page = NULL;
|
||||
goto out;
|
||||
}
|
||||
@ -7288,7 +7289,7 @@ retry:
|
||||
pte = huge_ptep_get((pte_t *)pud);
|
||||
if (pte_present(pte)) {
|
||||
page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
|
||||
if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
|
||||
if (try_grab_page(page, flags)) {
|
||||
page = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user