2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 07:04:00 +08:00

memcg: collect kmem bypass conditions into __memcg_kmem_bypass()

memcg_kmem_newpage_charge() and memcg_kmem_get_cache() are testing the
same series of conditions to decide whether to bypass kmem accounting.
Collect the tests into __memcg_kmem_bypass().

This is pure refactoring.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Tejun Heo 2015-11-05 18:46:14 -08:00 committed by Linus Torvalds
parent b23afb93d3
commit cbfb479809

View File

@ -777,20 +777,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
unsigned long nr_pages);
void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
* @gfp: the gfp allocation flags.
* @memcg: a pointer to the memcg this was charged against.
* @order: allocation order.
*
* returns true if the memcg where the current task belongs can hold this
* allocation.
*
* We return true automatically if this allocation is not to be accounted to
* any memcg.
*/
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
static inline bool __memcg_kmem_bypass(gfp_t gfp)
{
if (!memcg_kmem_enabled())
return true;
@ -812,6 +799,26 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
if (unlikely(fatal_signal_pending(current)))
return true;
return false;
}
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
* @gfp: the gfp allocation flags.
* @memcg: a pointer to the memcg this was charged against.
* @order: allocation order.
*
* returns true if the memcg where the current task belongs can hold this
* allocation.
*
* We return true automatically if this allocation is not to be accounted to
* any memcg.
*/
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
if (__memcg_kmem_bypass(gfp))
return true;
return __memcg_kmem_newpage_charge(gfp, memcg, order);
}
@ -854,17 +861,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
static __always_inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (!memcg_kmem_enabled())
if (__memcg_kmem_bypass(gfp))
return cachep;
if (gfp & __GFP_NOACCOUNT)
return cachep;
if (gfp & __GFP_NOFAIL)
return cachep;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
return cachep;
if (unlikely(fatal_signal_pending(current)))
return cachep;
return __memcg_kmem_get_cache(cachep);
}