mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
mm: move zone->pages_scanned into a vmstat counter
zone->pages_scanned is a write-intensive cache line during page reclaim and it's also updated during page free. Move the counter into vmstat to take advantage of the per-cpu updates and do not update it in the free paths unless necessary. On a small UMA machine running tiobench the difference is marginal. On a 4-node machine the overhead is more noticable. Note that automatic NUMA balancing was disabled for this test as otherwise the system CPU overhead is unpredictable. 3.16.0-rc3 3.16.0-rc3 3.16.0-rc3 vanillarearrange-v5 vmstat-v5 User 746.94 759.78 774.56 System 65336.22 58350.98 32847.27 Elapsed 27553.52 27282.02 27415.04 Note that the overhead reduction will vary depending on where exactly pages are allocated and freed. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3484b2de94
commit
0d5d823ab4
@ -143,6 +143,7 @@ enum zone_stat_item {
|
||||
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
|
||||
NR_DIRTIED, /* page dirtyings since bootup */
|
||||
NR_WRITTEN, /* page writings since bootup */
|
||||
NR_PAGES_SCANNED, /* pages scanned since last reclaim */
|
||||
#ifdef CONFIG_NUMA
|
||||
NUMA_HIT, /* allocated in intended node */
|
||||
NUMA_MISS, /* allocated in non intended node */
|
||||
@ -480,7 +481,6 @@ struct zone {
|
||||
|
||||
/* Fields commonly accessed by the page reclaim scanner */
|
||||
spinlock_t lru_lock;
|
||||
unsigned long pages_scanned; /* since last reclaim */
|
||||
struct lruvec lruvec;
|
||||
|
||||
/* Evictions & activations on the inactive file list */
|
||||
|
@ -680,9 +680,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||
int migratetype = 0;
|
||||
int batch_free = 0;
|
||||
int to_free = count;
|
||||
unsigned long nr_scanned;
|
||||
|
||||
spin_lock(&zone->lock);
|
||||
zone->pages_scanned = 0;
|
||||
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
|
||||
if (nr_scanned)
|
||||
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
|
||||
|
||||
while (to_free) {
|
||||
struct page *page;
|
||||
@ -731,8 +734,11 @@ static void free_one_page(struct zone *zone,
|
||||
unsigned int order,
|
||||
int migratetype)
|
||||
{
|
||||
unsigned long nr_scanned;
|
||||
spin_lock(&zone->lock);
|
||||
zone->pages_scanned = 0;
|
||||
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
|
||||
if (nr_scanned)
|
||||
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
|
||||
|
||||
__free_one_page(page, pfn, zone, order, migratetype);
|
||||
if (unlikely(!is_migrate_isolate(migratetype)))
|
||||
@ -3248,7 +3254,7 @@ void show_free_areas(unsigned int filter)
|
||||
K(zone_page_state(zone, NR_BOUNCE)),
|
||||
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
|
||||
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
|
||||
zone->pages_scanned,
|
||||
K(zone_page_state(zone, NR_PAGES_SCANNED)),
|
||||
(!zone_reclaimable(zone) ? "yes" : "no")
|
||||
);
|
||||
printk("lowmem_reserve[]:");
|
||||
|
@ -174,7 +174,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
|
||||
|
||||
bool zone_reclaimable(struct zone *zone)
|
||||
{
|
||||
return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
|
||||
return zone_page_state(zone, NR_PAGES_SCANNED) <
|
||||
zone_reclaimable_pages(zone) * 6;
|
||||
}
|
||||
|
||||
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
||||
@ -1508,7 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
|
||||
|
||||
if (global_reclaim(sc)) {
|
||||
zone->pages_scanned += nr_scanned;
|
||||
__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
|
||||
if (current_is_kswapd())
|
||||
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
|
||||
else
|
||||
@ -1698,7 +1699,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
||||
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
|
||||
&nr_scanned, sc, isolate_mode, lru);
|
||||
if (global_reclaim(sc))
|
||||
zone->pages_scanned += nr_scanned;
|
||||
__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
|
||||
|
||||
reclaim_stat->recent_scanned[file] += nr_taken;
|
||||
|
||||
|
@ -763,6 +763,7 @@ const char * const vmstat_text[] = {
|
||||
"nr_shmem",
|
||||
"nr_dirtied",
|
||||
"nr_written",
|
||||
"nr_pages_scanned",
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
"numa_hit",
|
||||
@ -1067,7 +1068,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
|
||||
min_wmark_pages(zone),
|
||||
low_wmark_pages(zone),
|
||||
high_wmark_pages(zone),
|
||||
zone->pages_scanned,
|
||||
zone_page_state(zone, NR_PAGES_SCANNED),
|
||||
zone->spanned_pages,
|
||||
zone->present_pages,
|
||||
zone->managed_pages);
|
||||
|
Loading…
Reference in New Issue
Block a user