mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-11 13:04:03 +08:00
mm/page_alloc: fix memalloc_nocma_{save/restore} APIs
Currently, memalloc_nocma_{save/restore} API that prevents CMA area
in page allocation is implemented by using current_gfp_context(). However,
there are two problems of this implementation.
First, this doesn't work for allocation fastpath. In the fastpath,
original gfp_mask is used since current_gfp_context() is introduced in
order to control reclaim and it is on slowpath. So, CMA area can be
allocated through the allocation fastpath even if
memalloc_nocma_{save/restore} APIs are used. Currently, there is just
one user for these APIs and it has a fallback method to prevent actual
problem.
Second, clearing __GFP_MOVABLE in current_gfp_context() has a side effect
to exclude the memory on the ZONE_MOVABLE for allocation target.
To fix these problems, this patch changes the implementation to exclude
CMA area in page allocation. Main point of this change is using the
alloc_flags. alloc_flags is mainly used to control allocation so it fits
for excluding CMA area in allocation.
Fixes: d7fefcc8de
(mm/cma: add PF flag to force non cma alloc)
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Link: http://lkml.kernel.org/r/1595468942-29687-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
182f3d7a02
commit
8510e69c8e
@ -175,12 +175,10 @@ static inline bool in_vfork(struct task_struct *tsk)
|
||||
* Applies per-task gfp context to the given allocation flags.
|
||||
* PF_MEMALLOC_NOIO implies GFP_NOIO
|
||||
* PF_MEMALLOC_NOFS implies GFP_NOFS
|
||||
* PF_MEMALLOC_NOCMA implies no allocation from CMA region.
|
||||
*/
|
||||
static inline gfp_t current_gfp_context(gfp_t flags)
|
||||
{
|
||||
if (unlikely(current->flags &
|
||||
(PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
|
||||
if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
|
||||
/*
|
||||
* NOIO implies both NOIO and NOFS and it is a weaker context
|
||||
* so always make sure it makes precedence
|
||||
@ -189,10 +187,6 @@ static inline gfp_t current_gfp_context(gfp_t flags)
|
||||
flags &= ~(__GFP_IO | __GFP_FS);
|
||||
else if (current->flags & PF_MEMALLOC_NOFS)
|
||||
flags &= ~__GFP_FS;
|
||||
#ifdef CONFIG_CMA
|
||||
if (current->flags & PF_MEMALLOC_NOCMA)
|
||||
flags &= ~__GFP_MOVABLE;
|
||||
#endif
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
@ -2785,7 +2785,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
||||
* allocating from CMA when over half of the zone's free memory
|
||||
* is in the CMA area.
|
||||
*/
|
||||
if (migratetype == MIGRATE_MOVABLE &&
|
||||
if (alloc_flags & ALLOC_CMA &&
|
||||
zone_page_state(zone, NR_FREE_CMA_PAGES) >
|
||||
zone_page_state(zone, NR_FREE_PAGES) / 2) {
|
||||
page = __rmqueue_cma_fallback(zone, order);
|
||||
@ -2796,7 +2796,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
||||
retry:
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
if (unlikely(!page)) {
|
||||
if (migratetype == MIGRATE_MOVABLE)
|
||||
if (alloc_flags & ALLOC_CMA)
|
||||
page = __rmqueue_cma_fallback(zone, order);
|
||||
|
||||
if (!page && __rmqueue_fallback(zone, order, migratetype,
|
||||
@ -3687,6 +3687,20 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
|
||||
return alloc_flags;
|
||||
}
|
||||
|
||||
static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
|
||||
unsigned int alloc_flags)
|
||||
{
|
||||
#ifdef CONFIG_CMA
|
||||
unsigned int pflags = current->flags;
|
||||
|
||||
if (!(pflags & PF_MEMALLOC_NOCMA) &&
|
||||
gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
||||
alloc_flags |= ALLOC_CMA;
|
||||
|
||||
#endif
|
||||
return alloc_flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_page_from_freelist goes through the zonelist trying to allocate
|
||||
* a page.
|
||||
@ -4333,10 +4347,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
||||
} else if (unlikely(rt_task(current)) && !in_interrupt())
|
||||
alloc_flags |= ALLOC_HARDER;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
||||
alloc_flags |= ALLOC_CMA;
|
||||
#endif
|
||||
alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
|
||||
|
||||
return alloc_flags;
|
||||
}
|
||||
|
||||
@ -4637,7 +4649,7 @@ retry:
|
||||
|
||||
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
|
||||
if (reserve_flags)
|
||||
alloc_flags = reserve_flags;
|
||||
alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
|
||||
|
||||
/*
|
||||
* Reset the nodemask and zonelist iterators if memory policies can be
|
||||
@ -4714,7 +4726,7 @@ retry:
|
||||
|
||||
/* Avoid allocations with no watermarks from looping endlessly */
|
||||
if (tsk_is_oom_victim(current) &&
|
||||
(alloc_flags == ALLOC_OOM ||
|
||||
(alloc_flags & ALLOC_OOM ||
|
||||
(gfp_mask & __GFP_NOMEMALLOC)))
|
||||
goto nopage;
|
||||
|
||||
@ -4806,8 +4818,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
|
||||
if (should_fail_alloc_page(gfp_mask, order))
|
||||
return false;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
|
||||
*alloc_flags |= ALLOC_CMA;
|
||||
*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user