mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-07 13:13:57 +08:00
mm: remove folio_pincount_ptr() and head_compound_pincount()
We can use folio->_pincount directly, since all users are guarded by tests of compound/large. Link: https://lkml.kernel.org/r/20230111142915.1001531-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7d4a8be0c4
commit
94688e8eb4
@ -55,18 +55,17 @@ flags the caller provides. The caller is required to pass in a non-null struct
|
|||||||
pages* array, and the function then pins pages by incrementing each by a special
|
pages* array, and the function then pins pages by incrementing each by a special
|
||||||
value: GUP_PIN_COUNTING_BIAS.
|
value: GUP_PIN_COUNTING_BIAS.
|
||||||
|
|
||||||
For compound pages, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead,
|
For large folios, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead,
|
||||||
an exact form of pin counting is achieved, by using the 2nd struct page
|
the extra space available in the struct folio is used to store the
|
||||||
in the compound page. A new struct page field, compound_pincount, has
|
pincount directly.
|
||||||
been added in order to support this.
|
|
||||||
|
|
||||||
This approach for compound pages avoids the counting upper limit problems that
|
This approach for large folios avoids the counting upper limit problems
|
||||||
are discussed below. Those limitations would have been aggravated severely by
|
that are discussed below. Those limitations would have been aggravated
|
||||||
huge pages, because each tail page adds a refcount to the head page. And in
|
severely by huge pages, because each tail page adds a refcount to the
|
||||||
fact, testing revealed that, without a separate compound_pincount field,
|
head page. And in fact, testing revealed that, without a separate pincount
|
||||||
page overflows were seen in some huge page stress tests.
|
field, refcount overflows were seen in some huge page stress tests.
|
||||||
|
|
||||||
This also means that huge pages and compound pages do not suffer
|
This also means that huge pages and large folios do not suffer
|
||||||
from the false positives problem that is mentioned below.::
|
from the false positives problem that is mentioned below.::
|
||||||
|
|
||||||
Function
|
Function
|
||||||
@ -264,9 +263,9 @@ place.)
|
|||||||
Other diagnostics
|
Other diagnostics
|
||||||
=================
|
=================
|
||||||
|
|
||||||
dump_page() has been enhanced slightly, to handle these new counting
|
dump_page() has been enhanced slightly to handle these new counting
|
||||||
fields, and to better report on compound pages in general. Specifically,
|
fields, and to better report on large folios in general. Specifically,
|
||||||
for compound pages, the exact (compound_pincount) pincount is reported.
|
for large folios, the exact pincount is reported.
|
||||||
|
|
||||||
References
|
References
|
||||||
==========
|
==========
|
||||||
|
@ -1011,11 +1011,6 @@ static inline void folio_set_compound_dtor(struct folio *folio,
|
|||||||
|
|
||||||
void destroy_large_folio(struct folio *folio);
|
void destroy_large_folio(struct folio *folio);
|
||||||
|
|
||||||
static inline int head_compound_pincount(struct page *head)
|
|
||||||
{
|
|
||||||
return atomic_read(compound_pincount_ptr(head));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void set_compound_order(struct page *page, unsigned int order)
|
static inline void set_compound_order(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
page[1].compound_order = order;
|
page[1].compound_order = order;
|
||||||
@ -1641,11 +1636,6 @@ static inline struct folio *pfn_folio(unsigned long pfn)
|
|||||||
return page_folio(pfn_to_page(pfn));
|
return page_folio(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline atomic_t *folio_pincount_ptr(struct folio *folio)
|
|
||||||
{
|
|
||||||
return &folio_page(folio, 1)->compound_pincount;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
|
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
|
||||||
* @folio: The folio.
|
* @folio: The folio.
|
||||||
@ -1663,7 +1653,7 @@ static inline atomic_t *folio_pincount_ptr(struct folio *folio)
|
|||||||
* expected to be able to deal gracefully with a false positive.
|
* expected to be able to deal gracefully with a false positive.
|
||||||
*
|
*
|
||||||
* For large folios, the result will be exactly correct. That's because
|
* For large folios, the result will be exactly correct. That's because
|
||||||
* we have more tracking data available: the compound_pincount is used
|
* we have more tracking data available: the _pincount field is used
|
||||||
* instead of the GUP_PIN_COUNTING_BIAS scheme.
|
* instead of the GUP_PIN_COUNTING_BIAS scheme.
|
||||||
*
|
*
|
||||||
* For more information, please see Documentation/core-api/pin_user_pages.rst.
|
* For more information, please see Documentation/core-api/pin_user_pages.rst.
|
||||||
@ -1674,7 +1664,7 @@ static inline atomic_t *folio_pincount_ptr(struct folio *folio)
|
|||||||
static inline bool folio_maybe_dma_pinned(struct folio *folio)
|
static inline bool folio_maybe_dma_pinned(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (folio_test_large(folio))
|
if (folio_test_large(folio))
|
||||||
return atomic_read(folio_pincount_ptr(folio)) > 0;
|
return atomic_read(&folio->_pincount) > 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* folio_ref_count() is signed. If that refcount overflows, then
|
* folio_ref_count() is signed. If that refcount overflows, then
|
||||||
|
@ -443,11 +443,6 @@ static inline atomic_t *subpages_mapcount_ptr(struct page *page)
|
|||||||
return &page[1].subpages_mapcount;
|
return &page[1].subpages_mapcount;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline atomic_t *compound_pincount_ptr(struct page *page)
|
|
||||||
{
|
|
||||||
return &page[1].compound_pincount;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used for sizing the vmemmap region on some architectures
|
* Used for sizing the vmemmap region on some architectures
|
||||||
*/
|
*/
|
||||||
|
@ -94,11 +94,11 @@ static void __dump_page(struct page *page)
|
|||||||
page, page_ref_count(head), mapcount, mapping,
|
page, page_ref_count(head), mapcount, mapping,
|
||||||
page_to_pgoff(page), page_to_pfn(page));
|
page_to_pgoff(page), page_to_pfn(page));
|
||||||
if (compound) {
|
if (compound) {
|
||||||
pr_warn("head:%p order:%u compound_mapcount:%d subpages_mapcount:%d compound_pincount:%d\n",
|
pr_warn("head:%p order:%u compound_mapcount:%d subpages_mapcount:%d pincount:%d\n",
|
||||||
head, compound_order(head),
|
head, compound_order(head),
|
||||||
head_compound_mapcount(head),
|
head_compound_mapcount(head),
|
||||||
head_subpages_mapcount(head),
|
head_subpages_mapcount(head),
|
||||||
head_compound_pincount(head));
|
atomic_read(&folio->_pincount));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
|
8
mm/gup.c
8
mm/gup.c
@ -111,7 +111,7 @@ retry:
|
|||||||
* FOLL_GET: folio's refcount will be incremented by @refs.
|
* FOLL_GET: folio's refcount will be incremented by @refs.
|
||||||
*
|
*
|
||||||
* FOLL_PIN on large folios: folio's refcount will be incremented by
|
* FOLL_PIN on large folios: folio's refcount will be incremented by
|
||||||
* @refs, and its compound_pincount will be incremented by @refs.
|
* @refs, and its pincount will be incremented by @refs.
|
||||||
*
|
*
|
||||||
* FOLL_PIN on single-page folios: folio's refcount will be incremented by
|
* FOLL_PIN on single-page folios: folio's refcount will be incremented by
|
||||||
* @refs * GUP_PIN_COUNTING_BIAS.
|
* @refs * GUP_PIN_COUNTING_BIAS.
|
||||||
@ -157,7 +157,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
|
|||||||
* try_get_folio() is left intact.
|
* try_get_folio() is left intact.
|
||||||
*/
|
*/
|
||||||
if (folio_test_large(folio))
|
if (folio_test_large(folio))
|
||||||
atomic_add(refs, folio_pincount_ptr(folio));
|
atomic_add(refs, &folio->_pincount);
|
||||||
else
|
else
|
||||||
folio_ref_add(folio,
|
folio_ref_add(folio,
|
||||||
refs * (GUP_PIN_COUNTING_BIAS - 1));
|
refs * (GUP_PIN_COUNTING_BIAS - 1));
|
||||||
@ -182,7 +182,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
|
|||||||
if (flags & FOLL_PIN) {
|
if (flags & FOLL_PIN) {
|
||||||
node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
|
node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
|
||||||
if (folio_test_large(folio))
|
if (folio_test_large(folio))
|
||||||
atomic_sub(refs, folio_pincount_ptr(folio));
|
atomic_sub(refs, &folio->_pincount);
|
||||||
else
|
else
|
||||||
refs *= GUP_PIN_COUNTING_BIAS;
|
refs *= GUP_PIN_COUNTING_BIAS;
|
||||||
}
|
}
|
||||||
@ -232,7 +232,7 @@ int __must_check try_grab_page(struct page *page, unsigned int flags)
|
|||||||
*/
|
*/
|
||||||
if (folio_test_large(folio)) {
|
if (folio_test_large(folio)) {
|
||||||
folio_ref_add(folio, 1);
|
folio_ref_add(folio, 1);
|
||||||
atomic_add(1, folio_pincount_ptr(folio));
|
atomic_add(1, &folio->_pincount);
|
||||||
} else {
|
} else {
|
||||||
folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
|
folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
|
||||||
}
|
}
|
||||||
|
@ -2477,9 +2477,9 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
|||||||
* of swap cache pages that store the swp_entry_t in tail pages.
|
* of swap cache pages that store the swp_entry_t in tail pages.
|
||||||
* Fix up and warn once if private is unexpectedly set.
|
* Fix up and warn once if private is unexpectedly set.
|
||||||
*
|
*
|
||||||
* What of 32-bit systems, on which head[1].compound_pincount overlays
|
* What of 32-bit systems, on which folio->_pincount overlays
|
||||||
* head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and
|
* head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and
|
||||||
* compound_pincount must be 0 for folio_ref_freeze() to have succeeded.
|
* pincount must be 0 for folio_ref_freeze() to have succeeded.
|
||||||
*/
|
*/
|
||||||
if (!folio_test_swapcache(page_folio(head))) {
|
if (!folio_test_swapcache(page_folio(head))) {
|
||||||
VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
|
VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
|
||||||
|
@ -1476,7 +1476,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio,
|
|||||||
|
|
||||||
atomic_set(folio_mapcount_ptr(folio), 0);
|
atomic_set(folio_mapcount_ptr(folio), 0);
|
||||||
atomic_set(folio_subpages_mapcount_ptr(folio), 0);
|
atomic_set(folio_subpages_mapcount_ptr(folio), 0);
|
||||||
atomic_set(folio_pincount_ptr(folio), 0);
|
atomic_set(&folio->_pincount, 0);
|
||||||
|
|
||||||
for (i = 1; i < nr_pages; i++) {
|
for (i = 1; i < nr_pages; i++) {
|
||||||
p = folio_page(folio, i);
|
p = folio_page(folio, i);
|
||||||
@ -1998,7 +1998,7 @@ static bool __prep_compound_gigantic_folio(struct folio *folio,
|
|||||||
}
|
}
|
||||||
atomic_set(folio_mapcount_ptr(folio), -1);
|
atomic_set(folio_mapcount_ptr(folio), -1);
|
||||||
atomic_set(folio_subpages_mapcount_ptr(folio), 0);
|
atomic_set(folio_subpages_mapcount_ptr(folio), 0);
|
||||||
atomic_set(folio_pincount_ptr(folio), 0);
|
atomic_set(&folio->_pincount, 0);
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
out_error:
|
out_error:
|
||||||
|
@ -775,11 +775,13 @@ void free_compound_page(struct page *page)
|
|||||||
|
|
||||||
static void prep_compound_head(struct page *page, unsigned int order)
|
static void prep_compound_head(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
|
struct folio *folio = (struct folio *)page;
|
||||||
|
|
||||||
set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
|
set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
|
||||||
set_compound_order(page, order);
|
set_compound_order(page, order);
|
||||||
atomic_set(compound_mapcount_ptr(page), -1);
|
atomic_set(compound_mapcount_ptr(page), -1);
|
||||||
atomic_set(subpages_mapcount_ptr(page), 0);
|
atomic_set(subpages_mapcount_ptr(page), 0);
|
||||||
atomic_set(compound_pincount_ptr(page), 0);
|
atomic_set(&folio->_pincount, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prep_compound_tail(struct page *head, int tail_idx)
|
static void prep_compound_tail(struct page *head, int tail_idx)
|
||||||
@ -1291,6 +1293,7 @@ static inline bool free_page_is_bad(struct page *page)
|
|||||||
|
|
||||||
static int free_tail_pages_check(struct page *head_page, struct page *page)
|
static int free_tail_pages_check(struct page *head_page, struct page *page)
|
||||||
{
|
{
|
||||||
|
struct folio *folio = (struct folio *)head_page;
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1314,8 +1317,8 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
|
|||||||
bad_page(page, "nonzero subpages_mapcount");
|
bad_page(page, "nonzero subpages_mapcount");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (unlikely(head_compound_pincount(head_page))) {
|
if (unlikely(atomic_read(&folio->_pincount))) {
|
||||||
bad_page(page, "nonzero compound_pincount");
|
bad_page(page, "nonzero pincount");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user