2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 11:13:58 +08:00

mm/gup: pass gup flags to two more routines

In preparation for an upcoming patch, send gup flags args to two more
routines: put_compound_head(), and undo_dev_pagemap().

Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: http://lkml.kernel.org/r/20200211001536.1027652-5-jhubbard@nvidia.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
John Hubbard 2020-04-01 21:05:22 -07:00 committed by Linus Torvalds
parent 566d774a11
commit 3b78d8347d

View File

@ -1870,6 +1870,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
unsigned int flags,
struct page **pages)
{
while ((*nr) - nr_start) {
@ -1909,7 +1910,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
undo_dev_pagemap(nr, nr_start, flags, pages);
goto pte_unmap;
}
} else if (pte_special(pte))
@ -1974,7 +1975,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
pgmap = get_dev_pagemap(pfn, pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
SetPageReferenced(page);
@ -2001,7 +2002,7 @@ static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
undo_dev_pagemap(nr, nr_start, pages);
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
return 1;
@ -2019,7 +2020,7 @@ static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
undo_dev_pagemap(nr, nr_start, pages);
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
return 1;
@ -2053,7 +2054,7 @@ static int record_subpages(struct page *page, unsigned long addr,
return nr;
}
static void put_compound_head(struct page *page, int refs)
static void put_compound_head(struct page *page, int refs, unsigned int flags)
{
VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
/*
@ -2103,7 +2104,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_compound_head(head, refs);
put_compound_head(head, refs, flags);
return 0;
}
@ -2163,7 +2164,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
put_compound_head(head, refs);
put_compound_head(head, refs, flags);
return 0;
}
@ -2197,7 +2198,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
put_compound_head(head, refs);
put_compound_head(head, refs, flags);
return 0;
}
@ -2226,7 +2227,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
return 0;
if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
put_compound_head(head, refs);
put_compound_head(head, refs, flags);
return 0;
}