2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 23:54:26 +08:00

page-allocator: change migratetype for all pageblocks within a high-order page during __rmqueue_fallback

When there are no pages of a target migratetype free, the page allocator
selects a high-order block of another migratetype to allocate from.  When
the order of the page taken is greater than pageblock_order, all
pageblocks within that high-order page should change migratetype so that
pages are later freed to the correct free-lists.

The current behaviour is that pageblocks change migratetype if the order
being split matches the pageblock_order.  When pageblock_order <
MAX_ORDER-1, ownership is not changing correct and pages are being later
freed to the incorrect list and this impacts fragmentation avoidance.

This patch changes all pageblocks within the high-order page being split
to the correct migratetype.  Without the patch, allocation success rates
for hugepages under stress were about 59% of physical memory on x86-64.
With the patch applied, this goes up to 65%.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mel Gorman 2009-09-21 17:02:31 -07:00 committed by Linus Torvalds
parent fe1ff49d0d
commit 2f66a68f3f

View File

@ -783,6 +783,17 @@ static int move_freepages_block(struct zone *zone, struct page *page,
return move_freepages(zone, start_page, end_page, migratetype); return move_freepages(zone, start_page, end_page, migratetype);
} }
static void change_pageblock_range(struct page *pageblock_page,
int start_order, int migratetype)
{
int nr_pageblocks = 1 << (start_order - pageblock_order);
while (nr_pageblocks--) {
set_pageblock_migratetype(pageblock_page, migratetype);
pageblock_page += pageblock_nr_pages;
}
}
/* Remove an element from the buddy allocator from the fallback list */ /* Remove an element from the buddy allocator from the fallback list */
static inline struct page * static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@ -836,8 +847,9 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
list_del(&page->lru); list_del(&page->lru);
rmv_page_order(page); rmv_page_order(page);
if (current_order == pageblock_order) /* Take ownership for orders >= pageblock_order */
set_pageblock_migratetype(page, if (current_order >= pageblock_order)
change_pageblock_range(page, current_order,
start_migratetype); start_migratetype);
expand(zone, page, order, current_order, area, migratetype); expand(zone, page, order, current_order, area, migratetype);