mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-16 06:35:39 +08:00
kasan, slub: fix handling of kasan_slab_free hook
The kasan_slab_free hook's return value denotes whether the reuse of a slab object must be delayed (e.g. when the object is put into memory qurantine). The current way SLUB handles this hook is by ignoring its return value and hardcoding checks similar (but not exactly the same) to the ones performed in kasan_slab_free, which is prone to making mistakes. The main difference between the hardcoded checks and the ones in kasan_slab_free is whether we want to perform a free in case when an invalid-free or a double-free was detected (we don't). This patch changes the way SLUB handles this by: 1. taking into account the return value of kasan_slab_free for each of the objects, that are being freed; 2. reconstructing the freelist of objects to exclude the ones, whose reuse must be delayed. [andreyknvl@google.com: eliminate unnecessary branch in slab_free] Link: http://lkml.kernel.org/r/a62759a2545fddf69b0c034547212ca1eb1b3ce2.1520359686.git.andreyknvl@google.com Link: http://lkml.kernel.org/r/083f58501e54731203801d899632d76175868e97.1519400992.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Kostya Serebryany <kcc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b7d349c741
commit
c3895391df
57
mm/slub.c
57
mm/slub.c
@ -1363,10 +1363,8 @@ static __always_inline void kfree_hook(void *x)
|
|||||||
kasan_kfree_large(x, _RET_IP_);
|
kasan_kfree_large(x, _RET_IP_);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
|
static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
|
||||||
{
|
{
|
||||||
void *freeptr;
|
|
||||||
|
|
||||||
kmemleak_free_recursive(x, s->flags);
|
kmemleak_free_recursive(x, s->flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1386,17 +1384,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
|
|||||||
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
||||||
debug_check_no_obj_freed(x, s->object_size);
|
debug_check_no_obj_freed(x, s->object_size);
|
||||||
|
|
||||||
freeptr = get_freepointer(s, x);
|
/* KASAN might put x into memory quarantine, delaying its reuse */
|
||||||
/*
|
return kasan_slab_free(s, x, _RET_IP_);
|
||||||
* kasan_slab_free() may put x into memory quarantine, delaying its
|
|
||||||
* reuse. In this case the object's freelist pointer is changed.
|
|
||||||
*/
|
|
||||||
kasan_slab_free(s, x, _RET_IP_);
|
|
||||||
return freeptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void slab_free_freelist_hook(struct kmem_cache *s,
|
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||||
void *head, void *tail)
|
void **head, void **tail)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Compiler cannot detect this function can be removed if slab_free_hook()
|
* Compiler cannot detect this function can be removed if slab_free_hook()
|
||||||
@ -1407,13 +1400,33 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
|
|||||||
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
|
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
|
||||||
defined(CONFIG_KASAN)
|
defined(CONFIG_KASAN)
|
||||||
|
|
||||||
void *object = head;
|
void *object;
|
||||||
void *tail_obj = tail ? : head;
|
void *next = *head;
|
||||||
void *freeptr;
|
void *old_tail = *tail ? *tail : *head;
|
||||||
|
|
||||||
|
/* Head and tail of the reconstructed freelist */
|
||||||
|
*head = NULL;
|
||||||
|
*tail = NULL;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
freeptr = slab_free_hook(s, object);
|
object = next;
|
||||||
} while ((object != tail_obj) && (object = freeptr));
|
next = get_freepointer(s, object);
|
||||||
|
/* If object's reuse doesn't have to be delayed */
|
||||||
|
if (!slab_free_hook(s, object)) {
|
||||||
|
/* Move object to the new freelist */
|
||||||
|
set_freepointer(s, object, *head);
|
||||||
|
*head = object;
|
||||||
|
if (!*tail)
|
||||||
|
*tail = object;
|
||||||
|
}
|
||||||
|
} while (object != old_tail);
|
||||||
|
|
||||||
|
if (*head == *tail)
|
||||||
|
*tail = NULL;
|
||||||
|
|
||||||
|
return *head != NULL;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2968,14 +2981,12 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
void *head, void *tail, int cnt,
|
void *head, void *tail, int cnt,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
slab_free_freelist_hook(s, head, tail);
|
|
||||||
/*
|
/*
|
||||||
* slab_free_freelist_hook() could have put the items into quarantine.
|
* With KASAN enabled slab_free_freelist_hook modifies the freelist
|
||||||
* If so, no need to free them.
|
* to remove objects, whose reuse must be delayed.
|
||||||
*/
|
*/
|
||||||
if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
|
if (slab_free_freelist_hook(s, &head, &tail))
|
||||||
return;
|
do_slab_free(s, page, head, tail, cnt, addr);
|
||||||
do_slab_free(s, page, head, tail, cnt, addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
|
Loading…
Reference in New Issue
Block a user