mm: memcontrol: report slab usage in cgroup2 memory.stat

Show how much memory is used for storing reclaimable and unreclaimable
in-kernel data structures allocated from slab caches.

Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Vladimir Davydov 2016-03-17 14:17:35 -07:00 committed by Linus Torvalds
parent 72b54e7314
commit 27ee57c93f
6 changed files with 79 additions and 6 deletions

View File

@ -843,6 +843,11 @@ PAGE_SIZE multiple when read back.
Amount of memory used to cache filesystem data, Amount of memory used to cache filesystem data,
including tmpfs and shared memory. including tmpfs and shared memory.
slab
Amount of memory used for storing in-kernel data
structures.
sock sock
Amount of memory used in network transmission buffers Amount of memory used in network transmission buffers
@ -871,6 +876,16 @@ PAGE_SIZE multiple when read back.
on the internal memory management lists used by the on the internal memory management lists used by the
page reclaim algorithm page reclaim algorithm
slab_reclaimable
Part of "slab" that might be reclaimed, such as
dentries and inodes.
slab_unreclaimable
Part of "slab" that cannot be reclaimed on memory
pressure.
pgfault pgfault
Total number of page faults incurred Total number of page faults incurred

View File

@ -53,6 +53,8 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_NSTATS, MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */ /* default hierarchy stats */
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_NR_STAT, MEMCG_NR_STAT,
}; };
@ -883,6 +885,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled()) if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep); __memcg_kmem_put_cache(cachep);
} }
/**
* memcg_kmem_update_page_stat - update kmem page state statistics
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*/
static inline void memcg_kmem_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
if (memcg_kmem_enabled() && page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}
#else #else
#define for_each_memcg_cache_index(_idx) \ #define for_each_memcg_cache_index(_idx) \
for (; NULL; ) for (; NULL; )
@ -928,6 +944,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{ {
} }
static inline void memcg_kmem_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
#endif /* _LINUX_MEMCONTROL_H */ #endif /* _LINUX_MEMCONTROL_H */

View File

@ -5106,6 +5106,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
(u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
seq_printf(m, "file %llu\n", seq_printf(m, "file %llu\n",
(u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
seq_printf(m, "slab %llu\n",
(u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
seq_printf(m, "sock %llu\n", seq_printf(m, "sock %llu\n",
(u64)stat[MEMCG_SOCK] * PAGE_SIZE); (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
@ -5126,6 +5129,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
} }
seq_printf(m, "slab_reclaimable %llu\n",
(u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
seq_printf(m, "slab_unreclaimable %llu\n",
(u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
/* Accumulated memory events */ /* Accumulated memory events */
seq_printf(m, "pgfault %lu\n", seq_printf(m, "pgfault %lu\n",

View File

@ -1442,9 +1442,10 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
*/ */
static void kmem_freepages(struct kmem_cache *cachep, struct page *page) static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{ {
const unsigned long nr_freed = (1 << cachep->gfporder); int order = cachep->gfporder;
unsigned long nr_freed = (1 << order);
kmemcheck_free_shadow(page, cachep->gfporder); kmemcheck_free_shadow(page, order);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page), sub_zone_page_state(page_zone(page),
@ -1461,7 +1462,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed; current->reclaim_state->reclaimed_slab += nr_freed;
__free_kmem_pages(page, cachep->gfporder); memcg_uncharge_slab(page, order, cachep);
__free_pages(page, order);
} }
static void kmem_rcu_free(struct rcu_head *head) static void kmem_rcu_free(struct rcu_head *head)

View File

@ -246,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order, gfp_t gfp, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
int ret;
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
return 0; return 0;
if (is_root_cache(s)) if (is_root_cache(s))
return 0; return 0;
return __memcg_kmem_charge_memcg(page, gfp, order,
s->memcg_params.memcg); ret = __memcg_kmem_charge_memcg(page, gfp, order,
s->memcg_params.memcg);
if (ret)
return ret;
memcg_kmem_update_page_stat(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
1 << order);
return 0;
}
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
memcg_kmem_update_page_stat(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
-(1 << order));
memcg_kmem_uncharge(page, order);
} }
extern void slab_init_memcg_params(struct kmem_cache *); extern void slab_init_memcg_params(struct kmem_cache *);
@ -294,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
return 0; return 0;
} }
static inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
}
static inline void slab_init_memcg_params(struct kmem_cache *s) static inline void slab_init_memcg_params(struct kmem_cache *s)
{ {
} }

View File

@ -1540,7 +1540,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page_mapcount_reset(page); page_mapcount_reset(page);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages; current->reclaim_state->reclaimed_slab += pages;
__free_kmem_pages(page, order); memcg_uncharge_slab(page, order, s);
__free_pages(page, order);
} }
#define need_reserve_slab_rcu \ #define need_reserve_slab_rcu \