mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock()
All the calls to mod_objcg_mlstate(), get_obj_stock() and put_obj_stock() are done by functions defined within the same "#ifdef CONFIG_MEMCG_KMEM" compilation block. When CONFIG_MEMCG_KMEM isn't defined, the following compilation warnings will be issued [1] and [2]. mm/memcontrol.c:785:20: warning: unused function 'mod_objcg_mlstate' mm/memcontrol.c:2113:33: warning: unused function 'get_obj_stock' Fix these warning by moving those functions to under the same CONFIG_MEMCG_KMEM compilation block. There is no functional change. [1] https://lore.kernel.org/lkml/202111272014.WOYNLUV6-lkp@intel.com/ [2] https://lore.kernel.org/lkml/202111280551.LXsWYt1T-lkp@intel.com/ Link: https://lkml.kernel.org/r/20211129161140.306488-1-longman@redhat.com Fixes:559271146e
("mm/memcg: optimize user context object stock access") Fixes:68ac5b3c8d
("mm/memcg: cache vmstat data in percpu memcg_stock_pcp") Signed-off-by: Waiman Long <longman@redhat.com> Reported-by: kernel test robot <lkp@intel.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Roman Gushchin <guro@fb.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
005a79e5c2
commit
a7ebf564de
106
mm/memcontrol.c
106
mm/memcontrol.c
@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* mod_objcg_mlstate() may be called with irq enabled, so
|
||||
* mod_memcg_lruvec_state() should be used.
|
||||
*/
|
||||
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
|
||||
struct pglist_data *pgdat,
|
||||
enum node_stat_item idx, int nr)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
struct lruvec *lruvec;
|
||||
|
||||
rcu_read_lock();
|
||||
memcg = obj_cgroup_memcg(objcg);
|
||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||
mod_memcg_lruvec_state(lruvec, idx, nr);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* __count_memcg_events - account VM events in a cgroup
|
||||
* @memcg: the memory cgroup
|
||||
@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
|
||||
* sequence used in this case to access content from object stock is slow.
|
||||
* To optimize for user context access, there are now two object stocks for
|
||||
* task context and interrupt context access respectively.
|
||||
*
|
||||
* The task context object stock can be accessed by disabling preemption only
|
||||
* which is cheap in non-preempt kernel. The interrupt context object stock
|
||||
* can only be accessed after disabling interrupt. User context code can
|
||||
* access interrupt object stock, but not vice versa.
|
||||
*/
|
||||
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
|
||||
{
|
||||
struct memcg_stock_pcp *stock;
|
||||
|
||||
if (likely(in_task())) {
|
||||
*pflags = 0UL;
|
||||
preempt_disable();
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
return &stock->task_obj;
|
||||
}
|
||||
|
||||
local_irq_save(*pflags);
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
return &stock->irq_obj;
|
||||
}
|
||||
|
||||
static inline void put_obj_stock(unsigned long flags)
|
||||
{
|
||||
if (likely(in_task()))
|
||||
preempt_enable();
|
||||
else
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* consume_stock: Try to consume stocked charge on this cpu.
|
||||
* @memcg: memcg to consume from.
|
||||
@ -2816,6 +2763,59 @@ retry:
|
||||
*/
|
||||
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
|
||||
|
||||
/*
|
||||
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
|
||||
* sequence used in this case to access content from object stock is slow.
|
||||
* To optimize for user context access, there are now two object stocks for
|
||||
* task context and interrupt context access respectively.
|
||||
*
|
||||
* The task context object stock can be accessed by disabling preemption only
|
||||
* which is cheap in non-preempt kernel. The interrupt context object stock
|
||||
* can only be accessed after disabling interrupt. User context code can
|
||||
* access interrupt object stock, but not vice versa.
|
||||
*/
|
||||
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
|
||||
{
|
||||
struct memcg_stock_pcp *stock;
|
||||
|
||||
if (likely(in_task())) {
|
||||
*pflags = 0UL;
|
||||
preempt_disable();
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
return &stock->task_obj;
|
||||
}
|
||||
|
||||
local_irq_save(*pflags);
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
return &stock->irq_obj;
|
||||
}
|
||||
|
||||
static inline void put_obj_stock(unsigned long flags)
|
||||
{
|
||||
if (likely(in_task()))
|
||||
preempt_enable();
|
||||
else
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* mod_objcg_mlstate() may be called with irq enabled, so
|
||||
* mod_memcg_lruvec_state() should be used.
|
||||
*/
|
||||
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
|
||||
struct pglist_data *pgdat,
|
||||
enum node_stat_item idx, int nr)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
struct lruvec *lruvec;
|
||||
|
||||
rcu_read_lock();
|
||||
memcg = obj_cgroup_memcg(objcg);
|
||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||
mod_memcg_lruvec_state(lruvec, idx, nr);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
|
||||
gfp_t gfp, bool new_page)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user