kasan: introduce kasan_mempool_unpoison_pages

Introduce and document a new kasan_mempool_unpoison_pages hook to be used
by the mempool code instead of kasan_unpoison_pages.

This hook is not functionally different from kasan_unpoison_pages, but
using it improves the mempool code readability.

Link: https://lkml.kernel.org/r/239bd9af6176f2cc59f5c25893eb36143184daff.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2023-12-19 23:28:51 +01:00 committed by Andrew Morton
parent f129c31039
commit 9f41c59ae3
2 changed files with 31 additions and 0 deletions

View File

@ -225,6 +225,9 @@ bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
* This function is similar to kasan_mempool_poison_object() but operates on * This function is similar to kasan_mempool_poison_object() but operates on
* page allocations. * page allocations.
* *
* Before the poisoned allocation can be reused, it must be unpoisoned via
* kasan_mempool_unpoison_pages().
*
* Return: true if the allocation can be safely reused; false otherwise. * Return: true if the allocation can be safely reused; false otherwise.
*/ */
static __always_inline bool kasan_mempool_poison_pages(struct page *page, static __always_inline bool kasan_mempool_poison_pages(struct page *page,
@ -235,6 +238,27 @@ static __always_inline bool kasan_mempool_poison_pages(struct page *page,
return true; return true;
} }
void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
unsigned long ip);
/**
* kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
* @page: Pointer to the page allocation.
* @order: Order of the allocation.
*
* This function is intended for kernel subsystems that cache page allocations
* to reuse them instead of freeing them back to page_alloc (e.g. mempool).
*
* This function unpoisons a page allocation that was previously poisoned by
* kasan_mempool_poison_pages() without zeroing the allocation's memory. For
* the tag-based modes, this function assigns a new tag to the allocation.
*/
static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
unsigned int order)
{
if (kasan_enabled())
__kasan_mempool_unpoison_pages(page, order, _RET_IP_);
}
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
/** /**
* kasan_mempool_poison_object - Check and poison a mempool slab allocation. * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
@ -353,6 +377,7 @@ static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int or
{ {
return true; return true;
} }
static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
static inline bool kasan_mempool_poison_object(void *ptr) static inline bool kasan_mempool_poison_object(void *ptr)
{ {
return true; return true;

View File

@ -449,6 +449,12 @@ bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
return true; return true;
} }
void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
unsigned long ip)
{
__kasan_unpoison_pages(page, order, false);
}
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{ {
struct folio *folio; struct folio *folio;