mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
[PATCH] slab: verify pointers before free
Passing an invalid pointer to kfree() and kmem_cache_free() is likely to cause bad memory corruption or even take down the whole system because the bad pointer is likely reused immediately due to the per-CPU caches. Until now, we don't do any verification for this if CONFIG_DEBUG_SLAB is disabled. As suggested by Linus, add PageSlab check to page_to_cache() and page_to_slab() to verify pointers passed to kfree(). Also, move the stronger check from cache_free_debugcheck() to kmem_cache_free() to ensure the passed pointer actually belongs to the cache we're about to free the object. For page_to_cache() and page_to_slab(), the assertions should have virtually no extra cost (two instructions, no data cache pressure) and for kmem_cache_free() the overhead should be minimal. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
8d3c138b77
commit
ddc2e812d5
13
mm/slab.c
13
mm/slab.c
@ -592,6 +592,7 @@ static inline struct kmem_cache *page_get_cache(struct page *page)
|
||||
{
|
||||
if (unlikely(PageCompound(page)))
|
||||
page = (struct page *)page_private(page);
|
||||
BUG_ON(!PageSlab(page));
|
||||
return (struct kmem_cache *)page->lru.next;
|
||||
}
|
||||
|
||||
@ -604,6 +605,7 @@ static inline struct slab *page_get_slab(struct page *page)
|
||||
{
|
||||
if (unlikely(PageCompound(page)))
|
||||
page = (struct page *)page_private(page);
|
||||
BUG_ON(!PageSlab(page));
|
||||
return (struct slab *)page->lru.prev;
|
||||
}
|
||||
|
||||
@ -2669,15 +2671,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
|
||||
kfree_debugcheck(objp);
|
||||
page = virt_to_page(objp);
|
||||
|
||||
if (page_get_cache(page) != cachep) {
|
||||
printk(KERN_ERR "mismatch in kmem_cache_free: expected "
|
||||
"cache %p, got %p\n",
|
||||
page_get_cache(page), cachep);
|
||||
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
|
||||
printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
|
||||
page_get_cache(page)->name);
|
||||
WARN_ON(1);
|
||||
}
|
||||
slabp = page_get_slab(page);
|
||||
|
||||
if (cachep->flags & SLAB_RED_ZONE) {
|
||||
@ -3393,6 +3386,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(virt_to_cache(objp) != cachep);
|
||||
|
||||
local_irq_save(flags);
|
||||
__cache_free(cachep, objp);
|
||||
local_irq_restore(flags);
|
||||
|
Loading…
Reference in New Issue
Block a user