mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm: don't avoid high-priority reclaim on memcg limit reclaim
Commit 246e87a939
("memcg: fix get_scan_count() for small targets")
sought to avoid high reclaim priorities for memcg by forcing it to scan
a minimum amount of pages when lru_pages >> priority yielded nothing.
This was done at a time when reclaim decisions like dirty throttling
were tied to the priority level.
Nowadays, the only meaningful thing still tied to priority dropping
below DEF_PRIORITY - 2 is gating whether laptop_mode=1 is generally
allowed to write. But that is from an era where direct reclaim was
still allowed to call ->writepage, and kswapd nowadays avoids writes
until it's scanned every clean page in the system. Potential changes to
how quick sc->may_writepage could trigger are of little concern.
Remove the force_scan stuff, as well as the ugly multi-pass target
calculation that it necessitated.
Link: http://lkml.kernel.org/r/20170228214007.5621-7-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Jia He <hejianet@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a2d7f8e461
commit
688035f729
94
mm/vmscan.c
94
mm/vmscan.c
@ -2123,21 +2123,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
||||
unsigned long anon_prio, file_prio;
|
||||
enum scan_balance scan_balance;
|
||||
unsigned long anon, file;
|
||||
bool force_scan = false;
|
||||
unsigned long ap, fp;
|
||||
enum lru_list lru;
|
||||
bool some_scanned;
|
||||
int pass;
|
||||
|
||||
/*
|
||||
* If the zone or memcg is small, nr[l] can be 0. When
|
||||
* reclaiming for a memcg, a priority drop can cause high
|
||||
* latencies, so it's better to scan a minimum amount. When a
|
||||
* cgroup has already been deleted, scrape out the remaining
|
||||
* cache forcefully to get rid of the lingering state.
|
||||
*/
|
||||
if (!global_reclaim(sc) || !mem_cgroup_online(memcg))
|
||||
force_scan = true;
|
||||
|
||||
/* If we have no swap space, do not bother scanning anon pages. */
|
||||
if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
|
||||
@ -2268,55 +2255,48 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
||||
fraction[1] = fp;
|
||||
denominator = ap + fp + 1;
|
||||
out:
|
||||
some_scanned = false;
|
||||
/* Only use force_scan on second pass. */
|
||||
for (pass = 0; !some_scanned && pass < 2; pass++) {
|
||||
*lru_pages = 0;
|
||||
for_each_evictable_lru(lru) {
|
||||
int file = is_file_lru(lru);
|
||||
unsigned long size;
|
||||
unsigned long scan;
|
||||
*lru_pages = 0;
|
||||
for_each_evictable_lru(lru) {
|
||||
int file = is_file_lru(lru);
|
||||
unsigned long size;
|
||||
unsigned long scan;
|
||||
|
||||
size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
|
||||
scan = size >> sc->priority;
|
||||
|
||||
if (!scan && pass && force_scan)
|
||||
scan = min(size, SWAP_CLUSTER_MAX);
|
||||
|
||||
switch (scan_balance) {
|
||||
case SCAN_EQUAL:
|
||||
/* Scan lists relative to size */
|
||||
break;
|
||||
case SCAN_FRACT:
|
||||
/*
|
||||
* Scan types proportional to swappiness and
|
||||
* their relative recent reclaim efficiency.
|
||||
*/
|
||||
scan = div64_u64(scan * fraction[file],
|
||||
denominator);
|
||||
break;
|
||||
case SCAN_FILE:
|
||||
case SCAN_ANON:
|
||||
/* Scan one type exclusively */
|
||||
if ((scan_balance == SCAN_FILE) != file) {
|
||||
size = 0;
|
||||
scan = 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Look ma, no brain */
|
||||
BUG();
|
||||
}
|
||||
|
||||
*lru_pages += size;
|
||||
nr[lru] = scan;
|
||||
size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
|
||||
scan = size >> sc->priority;
|
||||
/*
|
||||
* If the cgroup's already been deleted, make sure to
|
||||
* scrape out the remaining cache.
|
||||
*/
|
||||
if (!scan && !mem_cgroup_online(memcg))
|
||||
scan = min(size, SWAP_CLUSTER_MAX);
|
||||
|
||||
switch (scan_balance) {
|
||||
case SCAN_EQUAL:
|
||||
/* Scan lists relative to size */
|
||||
break;
|
||||
case SCAN_FRACT:
|
||||
/*
|
||||
* Skip the second pass and don't force_scan,
|
||||
* if we found something to scan.
|
||||
* Scan types proportional to swappiness and
|
||||
* their relative recent reclaim efficiency.
|
||||
*/
|
||||
some_scanned |= !!scan;
|
||||
scan = div64_u64(scan * fraction[file],
|
||||
denominator);
|
||||
break;
|
||||
case SCAN_FILE:
|
||||
case SCAN_ANON:
|
||||
/* Scan one type exclusively */
|
||||
if ((scan_balance == SCAN_FILE) != file) {
|
||||
size = 0;
|
||||
scan = 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Look ma, no brain */
|
||||
BUG();
|
||||
}
|
||||
|
||||
*lru_pages += size;
|
||||
nr[lru] = scan;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user