kasan: add return value for kasan_mempool_poison_object

Add a return value for kasan_mempool_poison_object that lets the caller
know whether the allocation is affected by a double-free or an
invalid-free bug.  The caller can use this return value to stop operating
on the object.

Also introduce a check_page_allocation helper function to improve the code
readability.

Link: https://lkml.kernel.org/r/618af65273875fb9f56954285443279b15f1fcd9.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2023-12-19 23:28:48 +01:00 committed by Andrew Morton
parent 1bb843048d
commit 2e7c954c11
2 changed files with 22 additions and 16 deletions

View File

@ -212,7 +212,7 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object; return (void *)object;
} }
void __kasan_mempool_poison_object(void *ptr, unsigned long ip); bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
/** /**
* kasan_mempool_poison_object - Check and poison a mempool slab allocation. * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
* @ptr: Pointer to the slab allocation. * @ptr: Pointer to the slab allocation.
@ -225,16 +225,20 @@ void __kasan_mempool_poison_object(void *ptr, unsigned long ip);
* without putting it into the quarantine (for the Generic mode). * without putting it into the quarantine (for the Generic mode).
* *
* This function also performs checks to detect double-free and invalid-free * This function also performs checks to detect double-free and invalid-free
* bugs and reports them. * bugs and reports them. The caller can use the return value of this function
* to find out if the allocation is buggy.
* *
* This function operates on all slab allocations including large kmalloc * This function operates on all slab allocations including large kmalloc
* allocations (the ones returned by kmalloc_large() or by kmalloc() with the * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
* size > KMALLOC_MAX_SIZE). * size > KMALLOC_MAX_SIZE).
*
* Return: true if the allocation can be safely reused; false otherwise.
*/ */
static __always_inline void kasan_mempool_poison_object(void *ptr) static __always_inline bool kasan_mempool_poison_object(void *ptr)
{ {
if (kasan_enabled()) if (kasan_enabled())
__kasan_mempool_poison_object(ptr, _RET_IP_); return __kasan_mempool_poison_object(ptr, _RET_IP_);
return true;
} }
/* /*
@ -293,7 +297,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{ {
return (void *)object; return (void *)object;
} }
static inline void kasan_mempool_poison_object(void *ptr) {} static inline bool kasan_mempool_poison_object(void *ptr)
{
return true;
}
static inline bool kasan_check_byte(const void *address) static inline bool kasan_check_byte(const void *address)
{ {
return true; return true;

View File

@ -254,7 +254,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return ____kasan_slab_free(cache, object, ip, true, init); return ____kasan_slab_free(cache, object, ip, true, init);
} }
static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) static inline bool check_page_allocation(void *ptr, unsigned long ip)
{ {
if (!kasan_arch_is_ready()) if (!kasan_arch_is_ready())
return false; return false;
@ -269,17 +269,14 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
return true; return true;
} }
/*
* The object will be poisoned by kasan_poison_pages() or
* kasan_mempool_poison_object().
*/
return false; return false;
} }
void __kasan_kfree_large(void *ptr, unsigned long ip) void __kasan_kfree_large(void *ptr, unsigned long ip)
{ {
____kasan_kfree_large(ptr, ip); check_page_allocation(ptr, ip);
/* The object will be poisoned by kasan_poison_pages(). */
} }
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
@ -429,7 +426,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
return ____kasan_kmalloc(slab->slab_cache, object, size, flags); return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
} }
void __kasan_mempool_poison_object(void *ptr, unsigned long ip) bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{ {
struct folio *folio; struct folio *folio;
@ -442,13 +439,15 @@ void __kasan_mempool_poison_object(void *ptr, unsigned long ip)
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
*/ */
if (unlikely(!folio_test_slab(folio))) { if (unlikely(!folio_test_slab(folio))) {
if (____kasan_kfree_large(ptr, ip)) if (check_page_allocation(ptr, ip))
return; return false;
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
return true;
} else { } else {
struct slab *slab = folio_slab(folio); struct slab *slab = folio_slab(folio);
____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); return !____kasan_slab_free(slab->slab_cache, ptr, ip,
false, false);
} }
} }