mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Revert "mm: avoid waking kswapd for THP allocations when compaction is deferred or contended"
This reverts commit782fd30406
. We are going to reinstate the __GFP_NO_KSWAPD flag that has been removed, the removal reverted, and then removed again. Making this commit a pointless fixup for a problem that was caused by the removal of __GFP_NO_KSWAPD flag. The thing is, we really don't want to wake up kswapd for THP allocations (because they fail quite commonly under any kind of memory pressure, including when there is tons of memory free), and these patches were just trying to fix up the underlying bug: the original removal of __GFP_NO_KSWAPD in commitc654345924
("mm: remove __GFP_NO_KSWAPD") was simply bogus. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ed23ec4f0a
commit
31f8d42d44
@ -2378,15 +2378,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
||||
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
||||
}
|
||||
|
||||
/* Returns true if the allocation is likely for THP */
|
||||
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
if (order == pageblock_order &&
|
||||
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
||||
@ -2425,8 +2416,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
goto nopage;
|
||||
|
||||
restart:
|
||||
/* The decision whether to wake kswapd for THP is made later */
|
||||
if (!is_thp_alloc(gfp_mask, order))
|
||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||
zone_idx(preferred_zone));
|
||||
|
||||
@ -2498,22 +2487,16 @@ rebalance:
|
||||
goto got_pg;
|
||||
sync_migration = true;
|
||||
|
||||
if (is_thp_alloc(gfp_mask, order)) {
|
||||
/*
|
||||
* If compaction is deferred for high-order allocations, it is
|
||||
* because sync compaction recently failed. If this is the case
|
||||
* and the caller requested a movable allocation that does not
|
||||
* heavily disrupt the system then fail the allocation instead
|
||||
* of entering direct reclaim.
|
||||
* If compaction is deferred for high-order allocations, it is because
|
||||
* sync compaction recently failed. In this is the case and the caller
|
||||
* requested a movable allocation that does not heavily disrupt the
|
||||
* system then fail the allocation instead of entering direct reclaim.
|
||||
*/
|
||||
if (deferred_compaction || contended_compaction)
|
||||
if ((deferred_compaction || contended_compaction) &&
|
||||
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
||||
goto nopage;
|
||||
|
||||
/* If process is willing to reclaim/compact then wake kswapd */
|
||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||
zone_idx(preferred_zone));
|
||||
}
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
||||
zonelist, high_zoneidx,
|
||||
|
Loading…
Reference in New Issue
Block a user