mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 08:44:14 +08:00
kasan, mm: rename kasan_poison_kfree
Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better reflects what this annotation does. Also add a comment that explains the PageSlab() check. No functional changes. Link: https://lkml.kernel.org/r/141675fb493555e984c5dca555e9d9f768c7bbaa.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810 Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
34303244f2
commit
eeb3160c24
@ -176,6 +176,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
return false;
|
||||
}
|
||||
|
||||
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_slab_free_mempool(ptr, ip);
|
||||
}
|
||||
|
||||
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
|
||||
void *object, gfp_t flags);
|
||||
static __always_inline void * __must_check kasan_slab_alloc(
|
||||
@ -216,13 +223,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
void __kasan_poison_kfree(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_poison_kfree(ptr, ip);
|
||||
}
|
||||
|
||||
void __kasan_kfree_large(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
|
||||
{
|
||||
@ -261,6 +261,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
|
||||
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
|
||||
gfp_t flags)
|
||||
{
|
||||
@ -280,7 +281,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
|
||||
{
|
||||
return (void *)object;
|
||||
}
|
||||
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
|
||||
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
|
||||
|
||||
#endif /* CONFIG_KASAN */
|
||||
|
@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
|
||||
return ____kasan_slab_free(cache, object, ip, true);
|
||||
}
|
||||
|
||||
void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/*
|
||||
* Even though this function is only called for kmem_cache_alloc and
|
||||
* kmalloc backed mempool allocations, those allocations can still be
|
||||
* !PageSlab() when the size provided to kmalloc is larger than
|
||||
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
|
||||
*/
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
if (ptr != page_address(page)) {
|
||||
kasan_report_invalid_free(ptr, ip);
|
||||
return;
|
||||
}
|
||||
poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
|
||||
} else {
|
||||
____kasan_slab_free(page->slab_cache, ptr, ip, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
|
||||
{
|
||||
kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
|
||||
@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
|
||||
flags, true);
|
||||
}
|
||||
|
||||
void __kasan_poison_kfree(void *ptr, unsigned long ip)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
if (ptr != page_address(page)) {
|
||||
kasan_report_invalid_free(ptr, ip);
|
||||
return;
|
||||
}
|
||||
poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
|
||||
} else {
|
||||
____kasan_slab_free(page->slab_cache, ptr, ip, false);
|
||||
}
|
||||
}
|
||||
|
||||
void __kasan_kfree_large(void *ptr, unsigned long ip)
|
||||
{
|
||||
if (ptr != page_address(virt_to_head_page(ptr)))
|
||||
|
@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
|
||||
static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
|
||||
{
|
||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||
kasan_poison_kfree(element, _RET_IP_);
|
||||
kasan_slab_free_mempool(element, _RET_IP_);
|
||||
else if (pool->alloc == mempool_alloc_pages)
|
||||
kasan_free_pages(element, (unsigned long)pool->pool_data);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user