mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
memcg: simplify and inline __mem_cgroup_from_kmem
Before the previous patch ("memcg: unify slab and other kmem pages charging"), __mem_cgroup_from_kmem had to handle two types of kmem - slab pages and pages allocated with alloc_kmem_pages - memcg in the page struct. Now we can unify it. Since after it, this function becomes tiny we can fold it into mem_cgroup_from_kmem. [hughd@google.com: move mem_cgroup_from_kmem into list_lru.c] Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f3ccb2c422
commit
df4065516b
@ -770,8 +770,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
||||
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
|
||||
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
|
||||
|
||||
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
|
||||
|
||||
static inline bool __memcg_kmem_bypass(gfp_t gfp)
|
||||
{
|
||||
if (!memcg_kmem_enabled())
|
||||
@ -830,13 +828,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
|
||||
if (memcg_kmem_enabled())
|
||||
__memcg_kmem_put_cache(cachep);
|
||||
}
|
||||
|
||||
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
|
||||
{
|
||||
if (!memcg_kmem_enabled())
|
||||
return NULL;
|
||||
return __mem_cgroup_from_kmem(ptr);
|
||||
}
|
||||
#else
|
||||
#define for_each_memcg_cache_index(_idx) \
|
||||
for (; NULL; )
|
||||
@ -882,11 +873,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
#endif /* _LINUX_MEMCONTROL_H */
|
||||
|
||||
|
@ -63,6 +63,16 @@ list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
|
||||
return &nlru->lru;
|
||||
}
|
||||
|
||||
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (!memcg_kmem_enabled())
|
||||
return NULL;
|
||||
page = virt_to_head_page(ptr);
|
||||
return page->mem_cgroup;
|
||||
}
|
||||
|
||||
static inline struct list_lru_one *
|
||||
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
|
||||
{
|
||||
|
@ -2430,24 +2430,6 @@ void __memcg_kmem_uncharge(struct page *page, int order)
|
||||
page->mem_cgroup = NULL;
|
||||
css_put_many(&memcg->css, nr_pages);
|
||||
}
|
||||
|
||||
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
|
||||
{
|
||||
struct mem_cgroup *memcg = NULL;
|
||||
struct kmem_cache *cachep;
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
if (PageSlab(page)) {
|
||||
cachep = page->slab_cache;
|
||||
if (!is_root_cache(cachep))
|
||||
memcg = cachep->memcg_params.memcg;
|
||||
} else
|
||||
/* page allocated by alloc_kmem_pages */
|
||||
memcg = page->mem_cgroup;
|
||||
|
||||
return memcg;
|
||||
}
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
Loading…
Reference in New Issue
Block a user