mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
mm/slab: factor out unlikely part of cache_free_alien()
cache_free_alien() is rarely used function when node mismatch. But, it is defined with inline attribute so it is inlined to __cache_free() which is core free function of slab allocator. It uselessly makes kmem_cache_free()/kfree() functions large. What we really need to inline is just checking node match so this patch factor out other parts of cache_free_alien() to reduce code size of kmem_cache_free()/ kfree(). <Before> nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free" 00000000000011e0 0000000000000228 T kfree 0000000000000670 0000000000000216 T kmem_cache_free <After> nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free" 0000000000001110 00000000000001b5 T kfree 0000000000000750 0000000000000181 T kmem_cache_free You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d3aec34466
commit
25c4f304be
38
mm/slab.c
38
mm/slab.c
@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
|
||||
}
|
||||
}
|
||||
|
||||
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
|
||||
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
|
||||
int node, int page_node)
|
||||
{
|
||||
int nodeid = page_to_nid(virt_to_page(objp));
|
||||
struct kmem_cache_node *n;
|
||||
struct alien_cache *alien = NULL;
|
||||
struct array_cache *ac;
|
||||
int node;
|
||||
LIST_HEAD(list);
|
||||
|
||||
node = numa_mem_id();
|
||||
|
||||
/*
|
||||
* Make sure we are not freeing a object from another node to the array
|
||||
* cache on this cpu.
|
||||
*/
|
||||
if (likely(nodeid == node))
|
||||
return 0;
|
||||
|
||||
n = get_node(cachep, node);
|
||||
STATS_INC_NODEFREES(cachep);
|
||||
if (n->alien && n->alien[nodeid]) {
|
||||
alien = n->alien[nodeid];
|
||||
if (n->alien && n->alien[page_node]) {
|
||||
alien = n->alien[page_node];
|
||||
ac = &alien->ac;
|
||||
spin_lock(&alien->lock);
|
||||
if (unlikely(ac->avail == ac->limit)) {
|
||||
STATS_INC_ACOVERFLOW(cachep);
|
||||
__drain_alien_cache(cachep, ac, nodeid, &list);
|
||||
__drain_alien_cache(cachep, ac, page_node, &list);
|
||||
}
|
||||
ac_put_obj(cachep, ac, objp);
|
||||
spin_unlock(&alien->lock);
|
||||
slabs_destroy(cachep, &list);
|
||||
} else {
|
||||
n = get_node(cachep, nodeid);
|
||||
n = get_node(cachep, page_node);
|
||||
spin_lock(&n->list_lock);
|
||||
free_block(cachep, &objp, 1, nodeid, &list);
|
||||
free_block(cachep, &objp, 1, page_node, &list);
|
||||
spin_unlock(&n->list_lock);
|
||||
slabs_destroy(cachep, &list);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
|
||||
{
|
||||
int page_node = page_to_nid(virt_to_page(objp));
|
||||
int node = numa_mem_id();
|
||||
/*
|
||||
* Make sure we are not freeing a object from another node to the array
|
||||
* cache on this cpu.
|
||||
*/
|
||||
if (likely(node == page_node))
|
||||
return 0;
|
||||
|
||||
return __cache_free_alien(cachep, objp, node, page_node);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user