mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-24 02:24:28 +08:00
mm: memcontrol: rename memcg_kmem_enabled()
Currently there are two kmem-related helper functions with a confusing semantics: memcg_kmem_enabled() and mem_cgroup_kmem_disabled(). The problem is that an obvious expectation memcg_kmem_enabled() == !mem_cgroup_kmem_disabled(), can be false. mem_cgroup_kmem_disabled() is similar to mem_cgroup_disabled(): it returns true only if CONFIG_MEMCG_KMEM is not set or the kmem accounting is disabled using a boot time kernel option "cgroup.memory=nokmem". It never changes the value dynamically. memcg_kmem_enabled() is different: it always returns false until the first non-root memory cgroup will get online (assuming the kernel memory accounting is enabled). It's goal is to improve the performance on systems without the cgroupfs mounted/memory controller enabled or on the systems with only the root memory cgroup. To make things more obvious and avoid potential bugs, let's rename memcg_kmem_enabled() to memcg_kmem_online(). Link: https://lkml.kernel.org/r/20230213192922.1146370-1-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Muchun Song <songmuchun@bytedance.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Dennis Zhou <dennis@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b4fb12e6c7
commit
f7a449f779
@ -1776,24 +1776,24 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
|
|||||||
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
|
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
|
||||||
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
|
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
|
||||||
|
|
||||||
extern struct static_key_false memcg_kmem_enabled_key;
|
extern struct static_key_false memcg_kmem_online_key;
|
||||||
|
|
||||||
static inline bool memcg_kmem_enabled(void)
|
static inline bool memcg_kmem_online(void)
|
||||||
{
|
{
|
||||||
return static_branch_likely(&memcg_kmem_enabled_key);
|
return static_branch_likely(&memcg_kmem_online_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
|
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
|
||||||
int order)
|
int order)
|
||||||
{
|
{
|
||||||
if (memcg_kmem_enabled())
|
if (memcg_kmem_online())
|
||||||
return __memcg_kmem_charge_page(page, gfp, order);
|
return __memcg_kmem_charge_page(page, gfp, order);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
|
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
|
||||||
{
|
{
|
||||||
if (memcg_kmem_enabled())
|
if (memcg_kmem_online())
|
||||||
__memcg_kmem_uncharge_page(page, order);
|
__memcg_kmem_uncharge_page(page, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1814,7 +1814,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
|
|||||||
{
|
{
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
|
|
||||||
if (!memcg_kmem_enabled())
|
if (!memcg_kmem_online())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
@ -1854,7 +1854,7 @@ static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool memcg_kmem_enabled(void)
|
static inline bool memcg_kmem_online(void)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -345,8 +345,8 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
|
|||||||
* conditional to this static branch, we'll have to allow modules that does
|
* conditional to this static branch, we'll have to allow modules that does
|
||||||
* kmem_cache_alloc and the such to see this symbol as well
|
* kmem_cache_alloc and the such to see this symbol as well
|
||||||
*/
|
*/
|
||||||
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
|
DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
|
||||||
EXPORT_SYMBOL(memcg_kmem_enabled_key);
|
EXPORT_SYMBOL(memcg_kmem_online_key);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3034,7 +3034,7 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
|
|||||||
{
|
{
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
|
|
||||||
if (!memcg_kmem_enabled())
|
if (!memcg_kmem_online())
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (PageMemcgKmem(page)) {
|
if (PageMemcgKmem(page)) {
|
||||||
@ -3746,7 +3746,7 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
|
|||||||
objcg->memcg = memcg;
|
objcg->memcg = memcg;
|
||||||
rcu_assign_pointer(memcg->objcg, objcg);
|
rcu_assign_pointer(memcg->objcg, objcg);
|
||||||
|
|
||||||
static_branch_enable(&memcg_kmem_enabled_key);
|
static_branch_enable(&memcg_kmem_online_key);
|
||||||
|
|
||||||
memcg->kmemcg_id = memcg->id.id;
|
memcg->kmemcg_id = memcg->id.id;
|
||||||
|
|
||||||
|
@ -1410,7 +1410,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
|||||||
* Do not let hwpoison pages hit pcplists/buddy
|
* Do not let hwpoison pages hit pcplists/buddy
|
||||||
* Untie memcg state and reset page's owner
|
* Untie memcg state and reset page's owner
|
||||||
*/
|
*/
|
||||||
if (memcg_kmem_enabled() && PageMemcgKmem(page))
|
if (memcg_kmem_online() && PageMemcgKmem(page))
|
||||||
__memcg_kmem_uncharge_page(page, order);
|
__memcg_kmem_uncharge_page(page, order);
|
||||||
reset_page_owner(page, order);
|
reset_page_owner(page, order);
|
||||||
page_table_check_free(page, order);
|
page_table_check_free(page, order);
|
||||||
@ -1441,7 +1441,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
|||||||
}
|
}
|
||||||
if (PageMappingFlags(page))
|
if (PageMappingFlags(page))
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
if (memcg_kmem_enabled() && PageMemcgKmem(page))
|
if (memcg_kmem_online() && PageMemcgKmem(page))
|
||||||
__memcg_kmem_uncharge_page(page, order);
|
__memcg_kmem_uncharge_page(page, order);
|
||||||
if (check_free && free_page_is_bad(page))
|
if (check_free && free_page_is_bad(page))
|
||||||
bad++;
|
bad++;
|
||||||
@ -5432,7 +5432,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Bulk allocator does not support memcg accounting. */
|
/* Bulk allocator does not support memcg accounting. */
|
||||||
if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
|
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
/* Use the single page allocator for one page. */
|
/* Use the single page allocator for one page. */
|
||||||
@ -5604,7 +5604,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
|
|||||||
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
|
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
|
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
|
||||||
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
|
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
|
||||||
__free_pages(page, order);
|
__free_pages(page, order);
|
||||||
page = NULL;
|
page = NULL;
|
||||||
|
@ -1625,7 +1625,7 @@ static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
|
|||||||
{
|
{
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
|
|
||||||
if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
|
if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
objcg = get_obj_cgroup_from_current();
|
objcg = get_obj_cgroup_from_current();
|
||||||
|
10
mm/slab.h
10
mm/slab.h
@ -494,7 +494,7 @@ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
|
|||||||
{
|
{
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
|
|
||||||
if (!memcg_kmem_enabled())
|
if (!memcg_kmem_online())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
|
if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
|
||||||
@ -535,7 +535,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
|
|||||||
unsigned long off;
|
unsigned long off;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
if (!memcg_kmem_enabled() || !objcg)
|
if (!memcg_kmem_online() || !objcg)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
@ -567,7 +567,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
|||||||
struct obj_cgroup **objcgs;
|
struct obj_cgroup **objcgs;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!memcg_kmem_enabled())
|
if (!memcg_kmem_online())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
objcgs = slab_objcgs(slab);
|
objcgs = slab_objcgs(slab);
|
||||||
@ -649,7 +649,7 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
|
|||||||
static __always_inline void account_slab(struct slab *slab, int order,
|
static __always_inline void account_slab(struct slab *slab, int order,
|
||||||
struct kmem_cache *s, gfp_t gfp)
|
struct kmem_cache *s, gfp_t gfp)
|
||||||
{
|
{
|
||||||
if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
|
if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
|
||||||
memcg_alloc_slab_cgroups(slab, s, gfp, true);
|
memcg_alloc_slab_cgroups(slab, s, gfp, true);
|
||||||
|
|
||||||
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
|
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
|
||||||
@ -659,7 +659,7 @@ static __always_inline void account_slab(struct slab *slab, int order,
|
|||||||
static __always_inline void unaccount_slab(struct slab *slab, int order,
|
static __always_inline void unaccount_slab(struct slab *slab, int order,
|
||||||
struct kmem_cache *s)
|
struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
if (memcg_kmem_enabled())
|
if (memcg_kmem_online())
|
||||||
memcg_free_slab_cgroups(slab);
|
memcg_free_slab_cgroups(slab);
|
||||||
|
|
||||||
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
|
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
|
||||||
|
@ -915,7 +915,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Call non-slab shrinkers even though kmem is disabled */
|
/* Call non-slab shrinkers even though kmem is disabled */
|
||||||
if (!memcg_kmem_enabled() &&
|
if (!memcg_kmem_online() &&
|
||||||
!(shrinker->flags & SHRINKER_NONSLAB))
|
!(shrinker->flags & SHRINKER_NONSLAB))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user