mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-19 20:34:20 +08:00
memcg: free memcg_caches slot on css offline
We need to look up a kmem_cache in ->memcg_params.memcg_caches arrays only on allocations, so there is no need to have the array entries set until css free - we can clear them on css offline. This will allow us to reuse array entries more efficiently and avoid costly array relocations. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f1008365bb
commit
2a4db7eb93
@ -115,13 +115,12 @@ int slab_is_available(void);
|
|||||||
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
|
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
|
||||||
unsigned long,
|
unsigned long,
|
||||||
void (*)(void *));
|
void (*)(void *));
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
|
|
||||||
void memcg_destroy_kmem_caches(struct mem_cgroup *);
|
|
||||||
#endif
|
|
||||||
void kmem_cache_destroy(struct kmem_cache *);
|
void kmem_cache_destroy(struct kmem_cache *);
|
||||||
int kmem_cache_shrink(struct kmem_cache *);
|
int kmem_cache_shrink(struct kmem_cache *);
|
||||||
void kmem_cache_free(struct kmem_cache *, void *);
|
|
||||||
|
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
|
||||||
|
void memcg_deactivate_kmem_caches(struct mem_cgroup *);
|
||||||
|
void memcg_destroy_kmem_caches(struct mem_cgroup *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Please use this macro to create slab caches. Simply specify the
|
* Please use this macro to create slab caches. Simply specify the
|
||||||
@ -288,6 +287,7 @@ static __always_inline int kmalloc_index(size_t size)
|
|||||||
|
|
||||||
void *__kmalloc(size_t size, gfp_t flags);
|
void *__kmalloc(size_t size, gfp_t flags);
|
||||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
||||||
|
void kmem_cache_free(struct kmem_cache *, void *);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||||
|
@ -334,6 +334,7 @@ struct mem_cgroup {
|
|||||||
#if defined(CONFIG_MEMCG_KMEM)
|
#if defined(CONFIG_MEMCG_KMEM)
|
||||||
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
||||||
int kmemcg_id;
|
int kmemcg_id;
|
||||||
|
bool kmem_acct_active;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int last_scanned_node;
|
int last_scanned_node;
|
||||||
@ -354,7 +355,7 @@ struct mem_cgroup {
|
|||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
bool memcg_kmem_is_active(struct mem_cgroup *memcg)
|
bool memcg_kmem_is_active(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
return memcg->kmemcg_id >= 0;
|
return memcg->kmem_acct_active;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -585,7 +586,7 @@ static void memcg_free_cache_id(int id);
|
|||||||
|
|
||||||
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
if (memcg_kmem_is_active(memcg)) {
|
if (memcg->kmemcg_id >= 0) {
|
||||||
static_key_slow_dec(&memcg_kmem_enabled_key);
|
static_key_slow_dec(&memcg_kmem_enabled_key);
|
||||||
memcg_free_cache_id(memcg->kmemcg_id);
|
memcg_free_cache_id(memcg->kmemcg_id);
|
||||||
}
|
}
|
||||||
@ -2666,6 +2667,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
|||||||
{
|
{
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
struct kmem_cache *memcg_cachep;
|
struct kmem_cache *memcg_cachep;
|
||||||
|
int kmemcg_id;
|
||||||
|
|
||||||
VM_BUG_ON(!is_root_cache(cachep));
|
VM_BUG_ON(!is_root_cache(cachep));
|
||||||
|
|
||||||
@ -2673,10 +2675,11 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
|||||||
return cachep;
|
return cachep;
|
||||||
|
|
||||||
memcg = get_mem_cgroup_from_mm(current->mm);
|
memcg = get_mem_cgroup_from_mm(current->mm);
|
||||||
if (!memcg_kmem_is_active(memcg))
|
kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
|
||||||
|
if (kmemcg_id < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
|
memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
|
||||||
if (likely(memcg_cachep))
|
if (likely(memcg_cachep))
|
||||||
return memcg_cachep;
|
return memcg_cachep;
|
||||||
|
|
||||||
@ -3318,8 +3321,8 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int memcg_id;
|
int memcg_id;
|
||||||
|
|
||||||
if (memcg_kmem_is_active(memcg))
|
BUG_ON(memcg->kmemcg_id >= 0);
|
||||||
return 0;
|
BUG_ON(memcg->kmem_acct_active);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For simplicity, we won't allow this to be disabled. It also can't
|
* For simplicity, we won't allow this to be disabled. It also can't
|
||||||
@ -3362,6 +3365,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
|||||||
* patched.
|
* patched.
|
||||||
*/
|
*/
|
||||||
memcg->kmemcg_id = memcg_id;
|
memcg->kmemcg_id = memcg_id;
|
||||||
|
memcg->kmem_acct_active = true;
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -4041,6 +4045,22 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|||||||
return mem_cgroup_sockets_init(memcg, ss);
|
return mem_cgroup_sockets_init(memcg, ss);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
||||||
|
{
|
||||||
|
if (!memcg->kmem_acct_active)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear the 'active' flag before clearing memcg_caches arrays entries.
|
||||||
|
* Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
|
||||||
|
* guarantees no cache will be created for this cgroup after we are
|
||||||
|
* done (see memcg_create_kmem_cache()).
|
||||||
|
*/
|
||||||
|
memcg->kmem_acct_active = false;
|
||||||
|
|
||||||
|
memcg_deactivate_kmem_caches(memcg);
|
||||||
|
}
|
||||||
|
|
||||||
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
memcg_destroy_kmem_caches(memcg);
|
memcg_destroy_kmem_caches(memcg);
|
||||||
@ -4052,6 +4072,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -4608,6 +4632,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
|||||||
spin_unlock(&memcg->event_list_lock);
|
spin_unlock(&memcg->event_list_lock);
|
||||||
|
|
||||||
vmpressure_cleanup(&memcg->vmpressure);
|
vmpressure_cleanup(&memcg->vmpressure);
|
||||||
|
|
||||||
|
memcg_deactivate_kmem(memcg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||||
|
@ -440,18 +440,8 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
|
|||||||
*need_rcu_barrier = true;
|
*need_rcu_barrier = true;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
if (!is_root_cache(s)) {
|
if (!is_root_cache(s))
|
||||||
int idx;
|
|
||||||
struct memcg_cache_array *arr;
|
|
||||||
|
|
||||||
idx = memcg_cache_id(s->memcg_params.memcg);
|
|
||||||
arr = rcu_dereference_protected(s->memcg_params.root_cache->
|
|
||||||
memcg_params.memcg_caches,
|
|
||||||
lockdep_is_held(&slab_mutex));
|
|
||||||
BUG_ON(arr->entries[idx] != s);
|
|
||||||
arr->entries[idx] = NULL;
|
|
||||||
list_del(&s->memcg_params.list);
|
list_del(&s->memcg_params.list);
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
list_move(&s->list, release);
|
list_move(&s->list, release);
|
||||||
return 0;
|
return 0;
|
||||||
@ -499,6 +489,13 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|||||||
|
|
||||||
mutex_lock(&slab_mutex);
|
mutex_lock(&slab_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The memory cgroup could have been deactivated while the cache
|
||||||
|
* creation work was pending.
|
||||||
|
*/
|
||||||
|
if (!memcg_kmem_is_active(memcg))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
idx = memcg_cache_id(memcg);
|
idx = memcg_cache_id(memcg);
|
||||||
arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
|
arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
|
||||||
lockdep_is_held(&slab_mutex));
|
lockdep_is_held(&slab_mutex));
|
||||||
@ -548,6 +545,26 @@ out_unlock:
|
|||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
|
||||||
|
{
|
||||||
|
int idx;
|
||||||
|
struct memcg_cache_array *arr;
|
||||||
|
struct kmem_cache *s;
|
||||||
|
|
||||||
|
idx = memcg_cache_id(memcg);
|
||||||
|
|
||||||
|
mutex_lock(&slab_mutex);
|
||||||
|
list_for_each_entry(s, &slab_caches, list) {
|
||||||
|
if (!is_root_cache(s))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
|
||||||
|
lockdep_is_held(&slab_mutex));
|
||||||
|
arr->entries[idx] = NULL;
|
||||||
|
}
|
||||||
|
mutex_unlock(&slab_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
|
void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
LIST_HEAD(release);
|
LIST_HEAD(release);
|
||||||
|
Loading…
Reference in New Issue
Block a user