mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 10:04:12 +08:00
mm/cma: change fallback behaviour for CMA freepage
Freepage with MIGRATE_CMA can be used only for MIGRATE_MOVABLE and they should not be expanded to other migratetype buddy list to protect them from unmovable/reclaimable allocation. Implementing these requirements in __rmqueue_fallback(), that is, finding largest possible block of freepage has bad effect that high order freepage with MIGRATE_CMA are broken continually although there are suitable order CMA freepage. Reason is that they are not be expanded to other migratetype buddy list and next __rmqueue_fallback() invocation try to finds another largest block of freepage and break it again. So, MIGRATE_CMA fallback should be handled separately. This patch introduces __rmqueue_cma_fallback(), that just wrapper of __rmqueue_smallest() and call it before __rmqueue_fallback() if migratetype == MIGRATE_MOVABLE. This results in unintended behaviour change that MIGRATE_CMA freepage is always used first rather than other migratetype as movable allocation's fallback. But, as already mentioned above, MIGRATE_CMA can be used only for MIGRATE_MOVABLE, so it is better to use MIGRATE_CMA freepage first as much as possible. Otherwise, we needlessly take up precious freepages with other migratetype and increase chance of fragmentation. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
30467e0b3b
commit
dc67647b78
@ -1032,11 +1032,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
||||
static int fallbacks[MIGRATE_TYPES][4] = {
|
||||
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
||||
#ifdef CONFIG_CMA
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
|
||||
#else
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
#ifdef CONFIG_CMA
|
||||
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
|
||||
#endif
|
||||
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
|
||||
#ifdef CONFIG_MEMORY_ISOLATION
|
||||
@ -1044,6 +1042,17 @@ static int fallbacks[MIGRATE_TYPES][4] = {
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
static struct page *__rmqueue_cma_fallback(struct zone *zone,
|
||||
unsigned int order)
|
||||
{
|
||||
return __rmqueue_smallest(zone, order, MIGRATE_CMA);
|
||||
}
|
||||
#else
|
||||
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
|
||||
unsigned int order) { return NULL; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Move the free pages in a range to the free lists of the requested type.
|
||||
* Note that start_page and end_pages are not aligned on a pageblock
|
||||
@ -1195,19 +1204,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
|
||||
struct page, lru);
|
||||
area->nr_free--;
|
||||
|
||||
if (!is_migrate_cma(migratetype)) {
|
||||
try_to_steal_freepages(zone, page,
|
||||
start_migratetype,
|
||||
try_to_steal_freepages(zone, page, start_migratetype,
|
||||
migratetype);
|
||||
} else {
|
||||
/*
|
||||
* When borrowing from MIGRATE_CMA, we need to
|
||||
* release the excess buddy pages to CMA
|
||||
* itself, and we do not try to steal extra
|
||||
* free pages.
|
||||
*/
|
||||
buddy_type = migratetype;
|
||||
}
|
||||
|
||||
/* Remove the page from the freelists */
|
||||
list_del(&page->lru);
|
||||
@ -1249,6 +1247,10 @@ retry_reserve:
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
|
||||
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
|
||||
if (migratetype == MIGRATE_MOVABLE)
|
||||
page = __rmqueue_cma_fallback(zone, order);
|
||||
|
||||
if (!page)
|
||||
page = __rmqueue_fallback(zone, order, migratetype);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user