mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-02 18:33:55 +08:00
mm: use zone->present_pages instead of zone->managed_pages where appropriate
Now we have zone->managed_pages for "pages managed by the buddy system in the zone", so replace zone->present_pages with zone->managed_pages if what the user really wants is number of allocatable pages. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Maciej Rutecki <maciej.rutecki@gmail.com> Cc: Chris Clayton <chris2553@googlemail.com> Cc: "Rafael J . Wysocki" <rjw@sisk.pl> Cc: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan@kernel.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Jianguo Wu <wujianguo@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f7210e6c4a
commit
b40da04946
@ -2808,7 +2808,7 @@ static unsigned int nr_free_zone_pages(int offset)
|
|||||||
struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
|
struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
|
||||||
|
|
||||||
for_each_zone_zonelist(zone, z, zonelist, offset) {
|
for_each_zone_zonelist(zone, z, zonelist, offset) {
|
||||||
unsigned long size = zone->present_pages;
|
unsigned long size = zone->managed_pages;
|
||||||
unsigned long high = high_wmark_pages(zone);
|
unsigned long high = high_wmark_pages(zone);
|
||||||
if (size > high)
|
if (size > high)
|
||||||
sum += size - high;
|
sum += size - high;
|
||||||
@ -2861,7 +2861,7 @@ void si_meminfo_node(struct sysinfo *val, int nid)
|
|||||||
val->totalram = pgdat->node_present_pages;
|
val->totalram = pgdat->node_present_pages;
|
||||||
val->freeram = node_page_state(nid, NR_FREE_PAGES);
|
val->freeram = node_page_state(nid, NR_FREE_PAGES);
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
|
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
|
||||||
val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
|
val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
|
||||||
NR_FREE_PAGES);
|
NR_FREE_PAGES);
|
||||||
#else
|
#else
|
||||||
@ -3939,7 +3939,7 @@ static int __meminit zone_batchsize(struct zone *zone)
|
|||||||
*
|
*
|
||||||
* OK, so we don't know how big the cache is. So guess.
|
* OK, so we don't know how big the cache is. So guess.
|
||||||
*/
|
*/
|
||||||
batch = zone->present_pages / 1024;
|
batch = zone->managed_pages / 1024;
|
||||||
if (batch * PAGE_SIZE > 512 * 1024)
|
if (batch * PAGE_SIZE > 512 * 1024)
|
||||||
batch = (512 * 1024) / PAGE_SIZE;
|
batch = (512 * 1024) / PAGE_SIZE;
|
||||||
batch /= 4; /* We effectively *= 4 below */
|
batch /= 4; /* We effectively *= 4 below */
|
||||||
@ -4023,7 +4023,7 @@ static void __meminit setup_zone_pageset(struct zone *zone)
|
|||||||
|
|
||||||
if (percpu_pagelist_fraction)
|
if (percpu_pagelist_fraction)
|
||||||
setup_pagelist_highmark(pcp,
|
setup_pagelist_highmark(pcp,
|
||||||
(zone->present_pages /
|
(zone->managed_pages /
|
||||||
percpu_pagelist_fraction));
|
percpu_pagelist_fraction));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5435,8 +5435,8 @@ static void calculate_totalreserve_pages(void)
|
|||||||
/* we treat the high watermark as reserved pages. */
|
/* we treat the high watermark as reserved pages. */
|
||||||
max += high_wmark_pages(zone);
|
max += high_wmark_pages(zone);
|
||||||
|
|
||||||
if (max > zone->present_pages)
|
if (max > zone->managed_pages)
|
||||||
max = zone->present_pages;
|
max = zone->managed_pages;
|
||||||
reserve_pages += max;
|
reserve_pages += max;
|
||||||
/*
|
/*
|
||||||
* Lowmem reserves are not available to
|
* Lowmem reserves are not available to
|
||||||
@ -5468,7 +5468,7 @@ static void setup_per_zone_lowmem_reserve(void)
|
|||||||
for_each_online_pgdat(pgdat) {
|
for_each_online_pgdat(pgdat) {
|
||||||
for (j = 0; j < MAX_NR_ZONES; j++) {
|
for (j = 0; j < MAX_NR_ZONES; j++) {
|
||||||
struct zone *zone = pgdat->node_zones + j;
|
struct zone *zone = pgdat->node_zones + j;
|
||||||
unsigned long present_pages = zone->present_pages;
|
unsigned long managed_pages = zone->managed_pages;
|
||||||
|
|
||||||
zone->lowmem_reserve[j] = 0;
|
zone->lowmem_reserve[j] = 0;
|
||||||
|
|
||||||
@ -5482,9 +5482,9 @@ static void setup_per_zone_lowmem_reserve(void)
|
|||||||
sysctl_lowmem_reserve_ratio[idx] = 1;
|
sysctl_lowmem_reserve_ratio[idx] = 1;
|
||||||
|
|
||||||
lower_zone = pgdat->node_zones + idx;
|
lower_zone = pgdat->node_zones + idx;
|
||||||
lower_zone->lowmem_reserve[j] = present_pages /
|
lower_zone->lowmem_reserve[j] = managed_pages /
|
||||||
sysctl_lowmem_reserve_ratio[idx];
|
sysctl_lowmem_reserve_ratio[idx];
|
||||||
present_pages += lower_zone->present_pages;
|
managed_pages += lower_zone->managed_pages;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5503,14 +5503,14 @@ static void __setup_per_zone_wmarks(void)
|
|||||||
/* Calculate total number of !ZONE_HIGHMEM pages */
|
/* Calculate total number of !ZONE_HIGHMEM pages */
|
||||||
for_each_zone(zone) {
|
for_each_zone(zone) {
|
||||||
if (!is_highmem(zone))
|
if (!is_highmem(zone))
|
||||||
lowmem_pages += zone->present_pages;
|
lowmem_pages += zone->managed_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_zone(zone) {
|
||||||
u64 tmp;
|
u64 tmp;
|
||||||
|
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
tmp = (u64)pages_min * zone->present_pages;
|
tmp = (u64)pages_min * zone->managed_pages;
|
||||||
do_div(tmp, lowmem_pages);
|
do_div(tmp, lowmem_pages);
|
||||||
if (is_highmem(zone)) {
|
if (is_highmem(zone)) {
|
||||||
/*
|
/*
|
||||||
@ -5524,7 +5524,7 @@ static void __setup_per_zone_wmarks(void)
|
|||||||
*/
|
*/
|
||||||
unsigned long min_pages;
|
unsigned long min_pages;
|
||||||
|
|
||||||
min_pages = zone->present_pages / 1024;
|
min_pages = zone->managed_pages / 1024;
|
||||||
min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
|
min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
|
||||||
zone->watermark[WMARK_MIN] = min_pages;
|
zone->watermark[WMARK_MIN] = min_pages;
|
||||||
} else {
|
} else {
|
||||||
@ -5586,7 +5586,7 @@ static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
|
|||||||
unsigned int gb, ratio;
|
unsigned int gb, ratio;
|
||||||
|
|
||||||
/* Zone size in gigabytes */
|
/* Zone size in gigabytes */
|
||||||
gb = zone->present_pages >> (30 - PAGE_SHIFT);
|
gb = zone->managed_pages >> (30 - PAGE_SHIFT);
|
||||||
if (gb)
|
if (gb)
|
||||||
ratio = int_sqrt(10 * gb);
|
ratio = int_sqrt(10 * gb);
|
||||||
else
|
else
|
||||||
@ -5672,7 +5672,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
|
|||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
for_each_zone(zone)
|
for_each_zone(zone)
|
||||||
zone->min_unmapped_pages = (zone->present_pages *
|
zone->min_unmapped_pages = (zone->managed_pages *
|
||||||
sysctl_min_unmapped_ratio) / 100;
|
sysctl_min_unmapped_ratio) / 100;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -5688,7 +5688,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
|
|||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
for_each_zone(zone)
|
for_each_zone(zone)
|
||||||
zone->min_slab_pages = (zone->present_pages *
|
zone->min_slab_pages = (zone->managed_pages *
|
||||||
sysctl_min_slab_ratio) / 100;
|
sysctl_min_slab_ratio) / 100;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -5730,7 +5730,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
|
|||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
unsigned long high;
|
unsigned long high;
|
||||||
high = zone->present_pages / percpu_pagelist_fraction;
|
high = zone->managed_pages / percpu_pagelist_fraction;
|
||||||
setup_pagelist_highmark(
|
setup_pagelist_highmark(
|
||||||
per_cpu_ptr(zone->pageset, cpu), high);
|
per_cpu_ptr(zone->pageset, cpu), high);
|
||||||
}
|
}
|
||||||
|
14
mm/vmscan.c
14
mm/vmscan.c
@ -2010,7 +2010,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
|||||||
* a reasonable chance of completing and allocating the page
|
* a reasonable chance of completing and allocating the page
|
||||||
*/
|
*/
|
||||||
balance_gap = min(low_wmark_pages(zone),
|
balance_gap = min(low_wmark_pages(zone),
|
||||||
(zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
|
(zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
|
||||||
KSWAPD_ZONE_BALANCE_GAP_RATIO);
|
KSWAPD_ZONE_BALANCE_GAP_RATIO);
|
||||||
watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
|
watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
|
||||||
watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
|
watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
|
||||||
@ -2525,7 +2525,7 @@ static bool zone_balanced(struct zone *zone, int order,
|
|||||||
*/
|
*/
|
||||||
static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
|
static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
|
||||||
{
|
{
|
||||||
unsigned long present_pages = 0;
|
unsigned long managed_pages = 0;
|
||||||
unsigned long balanced_pages = 0;
|
unsigned long balanced_pages = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -2536,7 +2536,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
|
|||||||
if (!populated_zone(zone))
|
if (!populated_zone(zone))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
present_pages += zone->present_pages;
|
managed_pages += zone->managed_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A special case here:
|
* A special case here:
|
||||||
@ -2546,18 +2546,18 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
|
|||||||
* they must be considered balanced here as well!
|
* they must be considered balanced here as well!
|
||||||
*/
|
*/
|
||||||
if (zone->all_unreclaimable) {
|
if (zone->all_unreclaimable) {
|
||||||
balanced_pages += zone->present_pages;
|
balanced_pages += zone->managed_pages;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zone_balanced(zone, order, 0, i))
|
if (zone_balanced(zone, order, 0, i))
|
||||||
balanced_pages += zone->present_pages;
|
balanced_pages += zone->managed_pages;
|
||||||
else if (!order)
|
else if (!order)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (order)
|
if (order)
|
||||||
return balanced_pages >= (present_pages >> 2);
|
return balanced_pages >= (managed_pages >> 2);
|
||||||
else
|
else
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -2745,7 +2745,7 @@ loop_again:
|
|||||||
* of the zone, whichever is smaller.
|
* of the zone, whichever is smaller.
|
||||||
*/
|
*/
|
||||||
balance_gap = min(low_wmark_pages(zone),
|
balance_gap = min(low_wmark_pages(zone),
|
||||||
(zone->present_pages +
|
(zone->managed_pages +
|
||||||
KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
|
KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
|
||||||
KSWAPD_ZONE_BALANCE_GAP_RATIO);
|
KSWAPD_ZONE_BALANCE_GAP_RATIO);
|
||||||
/*
|
/*
|
||||||
|
@ -142,7 +142,7 @@ int calculate_normal_threshold(struct zone *zone)
|
|||||||
* 125 1024 10 16-32 GB 9
|
* 125 1024 10 16-32 GB 9
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mem = zone->present_pages >> (27 - PAGE_SHIFT);
|
mem = zone->managed_pages >> (27 - PAGE_SHIFT);
|
||||||
|
|
||||||
threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
|
threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user