2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-01 09:43:34 +08:00

mm/compaction: factor out code to test if we should run compaction for target order

We always do zone_watermark_ok check and compaction_suitable check
together to test if compaction for target order should be ran.  Factor
these code out to remove repeat code.

Link: https://lkml.kernel.org/r/20230901155141.249860-7-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kemeng Shi 2023-09-01 23:51:41 +08:00 committed by Andrew Morton
parent 9cc17ede51
commit e19a3f595a

View File

@ -2378,6 +2378,30 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
return false; return false;
} }
/*
* Should we do compaction for target allocation order.
* Return COMPACT_SUCCESS if allocation for target order can be already
* satisfied
* Return COMPACT_SKIPPED if compaction for target order is likely to fail
* Return COMPACT_CONTINUE if compaction for target order should be ran
*/
static enum compact_result
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
int highest_zoneidx, unsigned int alloc_flags)
{
unsigned long watermark;
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
alloc_flags))
return COMPACT_SUCCESS;
if (!compaction_suitable(zone, order, highest_zoneidx))
return COMPACT_SKIPPED;
return COMPACT_CONTINUE;
}
static enum compact_result static enum compact_result
compact_zone(struct compact_control *cc, struct capture_control *capc) compact_zone(struct compact_control *cc, struct capture_control *capc)
{ {
@ -2403,19 +2427,11 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->migratetype = gfp_migratetype(cc->gfp_mask); cc->migratetype = gfp_migratetype(cc->gfp_mask);
if (!is_via_compact_memory(cc->order)) { if (!is_via_compact_memory(cc->order)) {
unsigned long watermark; ret = compaction_suit_allocation_order(cc->zone, cc->order,
cc->highest_zoneidx,
/* Allocation can already succeed, nothing to do */ cc->alloc_flags);
watermark = wmark_pages(cc->zone, if (ret != COMPACT_CONTINUE)
cc->alloc_flags & ALLOC_WMARK_MASK); return ret;
if (zone_watermark_ok(cc->zone, cc->order, watermark,
cc->highest_zoneidx, cc->alloc_flags))
return COMPACT_SUCCESS;
/* Compaction is likely to fail */
if (!compaction_suitable(cc->zone, cc->order,
cc->highest_zoneidx))
return COMPACT_SKIPPED;
} }
/* /*
@ -2914,6 +2930,7 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
int zoneid; int zoneid;
struct zone *zone; struct zone *zone;
enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
enum compact_result ret;
for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
zone = &pgdat->node_zones[zoneid]; zone = &pgdat->node_zones[zoneid];
@ -2921,14 +2938,10 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
/* Allocation can already succeed, check other zones */ ret = compaction_suit_allocation_order(zone,
if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, pgdat->kcompactd_max_order,
min_wmark_pages(zone), highest_zoneidx, ALLOC_WMARK_MIN);
highest_zoneidx, 0)) if (ret == COMPACT_CONTINUE)
continue;
if (compaction_suitable(zone, pgdat->kcompactd_max_order,
highest_zoneidx))
return true; return true;
} }
@ -2951,6 +2964,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
.ignore_skip_hint = false, .ignore_skip_hint = false,
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
}; };
enum compact_result ret;
trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
cc.highest_zoneidx); cc.highest_zoneidx);
count_compact_event(KCOMPACTD_WAKE); count_compact_event(KCOMPACTD_WAKE);
@ -2965,12 +2980,9 @@ static void kcompactd_do_work(pg_data_t *pgdat)
if (compaction_deferred(zone, cc.order)) if (compaction_deferred(zone, cc.order))
continue; continue;
/* Allocation can already succeed, nothing to do */ ret = compaction_suit_allocation_order(zone,
if (zone_watermark_ok(zone, cc.order, cc.order, zoneid, ALLOC_WMARK_MIN);
min_wmark_pages(zone), zoneid, 0)) if (ret != COMPACT_CONTINUE)
continue;
if (!compaction_suitable(zone, cc.order, zoneid))
continue; continue;
if (kthread_should_stop()) if (kthread_should_stop())