mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
kasan: only define kasan_never_merge for Generic mode
KASAN prevents merging of slab caches whose objects have per-object metadata stored in redzones. As now only the Generic mode uses per-object metadata, define kasan_never_merge() only for this mode. Link: https://lkml.kernel.org/r/81ed01f29ff3443580b7e2fe362a8b47b1e8006d.1662411799.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Peter Collingbourne <pcc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f372bde922
commit
3b7f8813e9
@ -103,14 +103,6 @@ struct kasan_cache {
|
||||
bool is_kmalloc;
|
||||
};
|
||||
|
||||
slab_flags_t __kasan_never_merge(void);
|
||||
static __always_inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_never_merge();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kasan_unpoison_range(const void *addr, size_t size);
|
||||
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
|
||||
{
|
||||
@ -261,10 +253,6 @@ static __always_inline bool kasan_check_byte(const void *addr)
|
||||
|
||||
#else /* CONFIG_KASAN */
|
||||
|
||||
static inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kasan_unpoison_range(const void *address, size_t size) {}
|
||||
static inline void kasan_poison_pages(struct page *page, unsigned int order,
|
||||
bool init) {}
|
||||
@ -325,6 +313,7 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
|
||||
size_t kasan_metadata_size(struct kmem_cache *cache);
|
||||
slab_flags_t kasan_never_merge(void);
|
||||
|
||||
void kasan_cache_shrink(struct kmem_cache *cache);
|
||||
void kasan_cache_shutdown(struct kmem_cache *cache);
|
||||
@ -338,6 +327,11 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
/* And thus nothing prevents cache merging. */
|
||||
static inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
|
||||
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
|
||||
|
@ -88,14 +88,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
|
||||
}
|
||||
#endif /* CONFIG_KASAN_STACK */
|
||||
|
||||
/* Only allow cache merging when no per-object metadata is present. */
|
||||
slab_flags_t __kasan_never_merge(void)
|
||||
{
|
||||
if (kasan_requires_meta())
|
||||
return SLAB_KASAN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
|
||||
{
|
||||
u8 tag;
|
||||
|
@ -328,6 +328,14 @@ DEFINE_ASAN_SET_SHADOW(f3);
|
||||
DEFINE_ASAN_SET_SHADOW(f5);
|
||||
DEFINE_ASAN_SET_SHADOW(f8);
|
||||
|
||||
/* Only allow cache merging when no per-object metadata is present. */
|
||||
slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
if (!kasan_requires_meta())
|
||||
return 0;
|
||||
return SLAB_KASAN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
||||
* For larger allocations larger redzones are used.
|
||||
|
Loading…
Reference in New Issue
Block a user