mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
mm, page_alloc: remove field from alloc_context
The classzone_idx can be inferred from preferred_zoneref so remove the unnecessary field and save stack space. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c33d6c06f6
commit
93ea9964d1
@ -1602,7 +1602,7 @@ unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|
||||
|
||||
status = compact_zone_order(zone, order, gfp_mask, mode,
|
||||
&zone_contended, alloc_flags,
|
||||
ac->classzone_idx);
|
||||
ac_classzone_idx(ac));
|
||||
rc = max(status, rc);
|
||||
/*
|
||||
* It takes at least one zone that wasn't lock contended
|
||||
@ -1612,7 +1612,7 @@ unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|
||||
|
||||
/* If a normal allocation would succeed, stop compacting */
|
||||
if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
|
||||
ac->classzone_idx, alloc_flags)) {
|
||||
ac_classzone_idx(ac), alloc_flags)) {
|
||||
/*
|
||||
* We think the allocation will succeed in this zone,
|
||||
* but it is not certain, hence the false. The caller
|
||||
|
@ -103,12 +103,13 @@ struct alloc_context {
|
||||
struct zonelist *zonelist;
|
||||
nodemask_t *nodemask;
|
||||
struct zoneref *preferred_zoneref;
|
||||
int classzone_idx;
|
||||
int migratetype;
|
||||
enum zone_type high_zoneidx;
|
||||
bool spread_dirty_pages;
|
||||
};
|
||||
|
||||
#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
|
||||
|
||||
/*
|
||||
* Locate the struct page for both the matching buddy in our
|
||||
* pair (buddy1) and the combined O(n+1) page they form (page).
|
||||
|
@ -2771,7 +2771,7 @@ zonelist_scan:
|
||||
|
||||
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
|
||||
if (!zone_watermark_fast(zone, order, mark,
|
||||
ac->classzone_idx, alloc_flags)) {
|
||||
ac_classzone_idx(ac), alloc_flags)) {
|
||||
int ret;
|
||||
|
||||
/* Checked here to keep the fast path fast */
|
||||
@ -2794,7 +2794,7 @@ zonelist_scan:
|
||||
default:
|
||||
/* did we reclaim enough */
|
||||
if (zone_watermark_ok(zone, order, mark,
|
||||
ac->classzone_idx, alloc_flags))
|
||||
ac_classzone_idx(ac), alloc_flags))
|
||||
goto try_this_zone;
|
||||
|
||||
continue;
|
||||
@ -3114,7 +3114,7 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
|
||||
|
||||
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
|
||||
ac->high_zoneidx, ac->nodemask)
|
||||
wakeup_kswapd(zone, order, zonelist_zone_idx(ac->preferred_zoneref));
|
||||
wakeup_kswapd(zone, order, ac_classzone_idx(ac));
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
@ -3422,8 +3422,6 @@ retry_cpuset:
|
||||
goto no_zone;
|
||||
}
|
||||
|
||||
ac.classzone_idx = zonelist_zone_idx(ac.preferred_zoneref);
|
||||
|
||||
/* First allocation attempt */
|
||||
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
||||
if (likely(page))
|
||||
|
Loading…
Reference in New Issue
Block a user