mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
mm/page_alloc: check high-order pages for corruption during PCP operations
Eric Dumazet pointed out that commit44042b4498
("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") only checks the head page during PCP refill and allocation operations. This was an oversight and all pages should be checked. This will incur a small performance penalty but it's necessary for correctness. Link: https://lkml.kernel.org/r/20220310092456.GJ15701@techsingularity.net Fixes:44042b4498
("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reported-by: Eric Dumazet <edumazet@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Wei Xu <weixugc@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3313204c8a
commit
77fe7f136a
@ -2291,43 +2291,6 @@ static inline int check_new_page(struct page *page)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
/*
|
||||
* With DEBUG_VM enabled, order-0 pages are checked for expected state when
|
||||
* being allocated from pcp lists. With debug_pagealloc also enabled, they are
|
||||
* also checked when pcp lists are refilled from the free lists.
|
||||
*/
|
||||
static inline bool check_pcp_refill(struct page *page)
|
||||
{
|
||||
if (debug_pagealloc_enabled_static())
|
||||
return check_new_page(page);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool check_new_pcp(struct page *page)
|
||||
{
|
||||
return check_new_page(page);
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* With DEBUG_VM disabled, free order-0 pages are checked for expected state
|
||||
* when pcp lists are being refilled from the free lists. With debug_pagealloc
|
||||
* enabled, they are also checked when being allocated from the pcp lists.
|
||||
*/
|
||||
static inline bool check_pcp_refill(struct page *page)
|
||||
{
|
||||
return check_new_page(page);
|
||||
}
|
||||
static inline bool check_new_pcp(struct page *page)
|
||||
{
|
||||
if (debug_pagealloc_enabled_static())
|
||||
return check_new_page(page);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_VM */
|
||||
|
||||
static bool check_new_pages(struct page *page, unsigned int order)
|
||||
{
|
||||
int i;
|
||||
@ -2341,6 +2304,43 @@ static bool check_new_pages(struct page *page, unsigned int order)
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
/*
|
||||
* With DEBUG_VM enabled, order-0 pages are checked for expected state when
|
||||
* being allocated from pcp lists. With debug_pagealloc also enabled, they are
|
||||
* also checked when pcp lists are refilled from the free lists.
|
||||
*/
|
||||
static inline bool check_pcp_refill(struct page *page, unsigned int order)
|
||||
{
|
||||
if (debug_pagealloc_enabled_static())
|
||||
return check_new_pages(page, order);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool check_new_pcp(struct page *page, unsigned int order)
|
||||
{
|
||||
return check_new_pages(page, order);
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* With DEBUG_VM disabled, free order-0 pages are checked for expected state
|
||||
* when pcp lists are being refilled from the free lists. With debug_pagealloc
|
||||
* enabled, they are also checked when being allocated from the pcp lists.
|
||||
*/
|
||||
static inline bool check_pcp_refill(struct page *page, unsigned int order)
|
||||
{
|
||||
return check_new_pages(page, order);
|
||||
}
|
||||
static inline bool check_new_pcp(struct page *page, unsigned int order)
|
||||
{
|
||||
if (debug_pagealloc_enabled_static())
|
||||
return check_new_pages(page, order);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_VM */
|
||||
|
||||
inline void post_alloc_hook(struct page *page, unsigned int order,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
||||
if (unlikely(page == NULL))
|
||||
break;
|
||||
|
||||
if (unlikely(check_pcp_refill(page)))
|
||||
if (unlikely(check_pcp_refill(page, order)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
|
||||
page = list_first_entry(list, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
pcp->count -= 1 << order;
|
||||
} while (check_new_pcp(page));
|
||||
} while (check_new_pcp(page, order));
|
||||
|
||||
return page;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user