mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm: memcontrol: remove synchronous stock draining code
With charge reparenting, the last synchronous stock drainer left. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b2052564e6
commit
6d3d6aa22a
@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
|
||||
disarm_kmem_keys(memcg);
|
||||
}
|
||||
|
||||
static void drain_all_stock_async(struct mem_cgroup *memcg);
|
||||
|
||||
static struct mem_cgroup_per_zone *
|
||||
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
|
||||
{
|
||||
@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
||||
|
||||
/*
|
||||
* Drains all per-CPU charge caches for given root_memcg resp. subtree
|
||||
* of the hierarchy under it. sync flag says whether we should block
|
||||
* until the work is done.
|
||||
* of the hierarchy under it.
|
||||
*/
|
||||
static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
|
||||
static void drain_all_stock(struct mem_cgroup *root_memcg)
|
||||
{
|
||||
int cpu, curcpu;
|
||||
|
||||
/* If someone's already draining, avoid adding running more workers. */
|
||||
if (!mutex_trylock(&percpu_charge_mutex))
|
||||
return;
|
||||
/* Notify other cpus that system-wide "drain" is running */
|
||||
get_online_cpus();
|
||||
curcpu = get_cpu();
|
||||
@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
|
||||
}
|
||||
}
|
||||
put_cpu();
|
||||
|
||||
if (!sync)
|
||||
goto out;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
||||
if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
|
||||
flush_work(&stock->work);
|
||||
}
|
||||
out:
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to drain stocked charges in other cpus. This function is asynchronous
|
||||
* and just put a work per cpu for draining localy on each cpu. Caller can
|
||||
* expects some charges will be back later but cannot wait for it.
|
||||
*/
|
||||
static void drain_all_stock_async(struct mem_cgroup *root_memcg)
|
||||
{
|
||||
/*
|
||||
* If someone calls draining, avoid adding more kworker runs.
|
||||
*/
|
||||
if (!mutex_trylock(&percpu_charge_mutex))
|
||||
return;
|
||||
drain_all_stock(root_memcg, false);
|
||||
mutex_unlock(&percpu_charge_mutex);
|
||||
}
|
||||
|
||||
/* This is a synchronous drain interface. */
|
||||
static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
|
||||
{
|
||||
/* called when force_empty is called */
|
||||
mutex_lock(&percpu_charge_mutex);
|
||||
drain_all_stock(root_memcg, true);
|
||||
mutex_unlock(&percpu_charge_mutex);
|
||||
}
|
||||
|
||||
@ -2472,7 +2438,7 @@ retry:
|
||||
goto retry;
|
||||
|
||||
if (!drained) {
|
||||
drain_all_stock_async(mem_over_limit);
|
||||
drain_all_stock(mem_over_limit);
|
||||
drained = true;
|
||||
goto retry;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user