mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 23:54:04 +08:00
mm: slab: free kmem_cache_node after destroy sysfs file
When slub_debug alloc_calls_show is enabled we will try to track location and user of slab object on each online node, kmem_cache_node structure and cpu_cache/cpu_slub shouldn't be freed till there is the last reference to sysfs file. This fixes the following panic: BUG: unable to handle kernel NULL pointer dereference at 0000000000000020 IP: list_locations+0x169/0x4e0 PGD 257304067 PUD 438456067 PMD 0 Oops: 0000 [#1] SMP CPU: 3 PID: 973074 Comm: cat ve: 0 Not tainted 3.10.0-229.7.2.ovz.9.30-00007-japdoll-dirty #2 9.30 Hardware name: DEPO Computers To Be Filled By O.E.M./H67DE3, BIOS L1.60c 07/14/2011 task: ffff88042a5dc5b0 ti: ffff88037f8d8000 task.ti: ffff88037f8d8000 RIP: list_locations+0x169/0x4e0 Call Trace: alloc_calls_show+0x1d/0x30 slab_attr_show+0x1b/0x30 sysfs_read_file+0x9a/0x1a0 vfs_read+0x9c/0x170 SyS_read+0x58/0xb0 system_call_fastpath+0x16/0x1b Code: 5e 07 12 00 b9 00 04 00 00 3d 00 04 00 00 0f 4f c1 3d 00 04 00 00 89 45 b0 0f 84 c3 00 00 00 48 63 45 b0 49 8b 9c c4 f8 00 00 00 <48> 8b 43 20 48 85 c0 74 b6 48 89 df e8 46 37 44 00 48 8b 53 10 CR2: 0000000000000020 Separated __kmem_cache_release from __kmem_cache_shutdown which now called on slab_kmem_cache_release (after the last reference to sysfs file object has dropped). Reintroduced locking in free_partial as sysfs file might access cache's partial list after shutdowning - partial revert of the commit69cb8e6b7c
("slub: free slabs without holding locks"). Zap __remove_partial and use remove_partial (w/o underscores) as free_partial now takes list_lock which s partial revert for commit1e4dd9461f
("slub: do not assert not having lock in removing freed partial") Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1ac0b6dec6
commit
52b4b950b5
12
mm/slab.c
12
mm/slab.c
@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|||||||
|
|
||||||
err = setup_cpu_cache(cachep, gfp);
|
err = setup_cpu_cache(cachep, gfp);
|
||||||
if (err) {
|
if (err) {
|
||||||
__kmem_cache_shutdown(cachep);
|
__kmem_cache_release(cachep);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2413,13 +2413,14 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||||
|
{
|
||||||
|
return __kmem_cache_shrink(cachep, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __kmem_cache_release(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
int rc = __kmem_cache_shrink(cachep, false);
|
|
||||||
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
free_percpu(cachep->cpu_cache);
|
free_percpu(cachep->cpu_cache);
|
||||||
|
|
||||||
@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
|||||||
kfree(n);
|
kfree(n);
|
||||||
cachep->node[i] = NULL;
|
cachep->node[i] = NULL;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
|||||||
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
|
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
|
||||||
|
|
||||||
int __kmem_cache_shutdown(struct kmem_cache *);
|
int __kmem_cache_shutdown(struct kmem_cache *);
|
||||||
|
void __kmem_cache_release(struct kmem_cache *);
|
||||||
int __kmem_cache_shrink(struct kmem_cache *, bool);
|
int __kmem_cache_shrink(struct kmem_cache *, bool);
|
||||||
void slab_kmem_cache_release(struct kmem_cache *);
|
void slab_kmem_cache_release(struct kmem_cache *);
|
||||||
|
|
||||||
|
@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
|
|||||||
|
|
||||||
void slab_kmem_cache_release(struct kmem_cache *s)
|
void slab_kmem_cache_release(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
|
__kmem_cache_release(s);
|
||||||
destroy_memcg_params(s);
|
destroy_memcg_params(s);
|
||||||
kfree_const(s->name);
|
kfree_const(s->name);
|
||||||
kmem_cache_free(kmem_cache, s);
|
kmem_cache_free(kmem_cache, s);
|
||||||
|
@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __kmem_cache_release(struct kmem_cache *c)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
|
int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
38
mm/slub.c
38
mm/slub.c
@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
|
|||||||
__add_partial(n, page, tail);
|
__add_partial(n, page, tail);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
__remove_partial(struct kmem_cache_node *n, struct page *page)
|
|
||||||
{
|
|
||||||
list_del(&page->lru);
|
|
||||||
n->nr_partial--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void remove_partial(struct kmem_cache_node *n,
|
static inline void remove_partial(struct kmem_cache_node *n,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&n->list_lock);
|
lockdep_assert_held(&n->list_lock);
|
||||||
__remove_partial(n, page);
|
list_del(&page->lru);
|
||||||
|
n->nr_partial--;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __kmem_cache_release(struct kmem_cache *s)
|
||||||
|
{
|
||||||
|
free_percpu(s->cpu_slab);
|
||||||
|
free_kmem_cache_nodes(s);
|
||||||
|
}
|
||||||
|
|
||||||
static int init_kmem_cache_nodes(struct kmem_cache *s)
|
static int init_kmem_cache_nodes(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
int node;
|
int node;
|
||||||
@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempt to free all partial slabs on a node.
|
* Attempt to free all partial slabs on a node.
|
||||||
* This is called from kmem_cache_close(). We must be the last thread
|
* This is called from __kmem_cache_shutdown(). We must take list_lock
|
||||||
* using the cache and therefore we do not need to lock anymore.
|
* because sysfs file might still access partial list after the shutdowning.
|
||||||
*/
|
*/
|
||||||
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||||
{
|
{
|
||||||
struct page *page, *h;
|
struct page *page, *h;
|
||||||
|
|
||||||
|
BUG_ON(irqs_disabled());
|
||||||
|
spin_lock_irq(&n->list_lock);
|
||||||
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
||||||
if (!page->inuse) {
|
if (!page->inuse) {
|
||||||
__remove_partial(n, page);
|
remove_partial(n, page);
|
||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
} else {
|
} else {
|
||||||
list_slab_objects(s, page,
|
list_slab_objects(s, page,
|
||||||
"Objects remaining in %s on kmem_cache_close()");
|
"Objects remaining in %s on __kmem_cache_shutdown()");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock_irq(&n->list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release all resources used by a slab cache.
|
* Release all resources used by a slab cache.
|
||||||
*/
|
*/
|
||||||
static inline int kmem_cache_close(struct kmem_cache *s)
|
int __kmem_cache_shutdown(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
int node;
|
int node;
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|||||||
if (n->nr_partial || slabs_node(s, node))
|
if (n->nr_partial || slabs_node(s, node))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
free_percpu(s->cpu_slab);
|
|
||||||
free_kmem_cache_nodes(s);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __kmem_cache_shutdown(struct kmem_cache *s)
|
|
||||||
{
|
|
||||||
return kmem_cache_close(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
/********************************************************************
|
/********************************************************************
|
||||||
* Kmalloc subsystem
|
* Kmalloc subsystem
|
||||||
*******************************************************************/
|
*******************************************************************/
|
||||||
@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
|
|||||||
memcg_propagate_slab_attrs(s);
|
memcg_propagate_slab_attrs(s);
|
||||||
err = sysfs_slab_add(s);
|
err = sysfs_slab_add(s);
|
||||||
if (err)
|
if (err)
|
||||||
kmem_cache_close(s);
|
__kmem_cache_release(s);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user