mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-11 04:09:39 +08:00
Merge branch 'slab/for-6.9/slab-flag-cleanups' into slab/for-linus
Merge a series from myself that replaces hardcoded SLAB_ cache flag values with an enum, and explicitly deprecates the SLAB_MEM_SPREAD flag that is a no-op sine SLAB removal.
This commit is contained in:
commit
1a1c4e4576
@ -429,7 +429,6 @@ struct kasan_cache {
|
||||
};
|
||||
|
||||
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
|
||||
slab_flags_t kasan_never_merge(void);
|
||||
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||
slab_flags_t *flags);
|
||||
|
||||
@ -446,11 +445,6 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
/* And thus nothing prevents cache merging. */
|
||||
static inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
/* And no cache-related metadata initialization is required. */
|
||||
static inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
unsigned int *size,
|
||||
|
@ -21,29 +21,69 @@
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
enum _slab_flag_bits {
|
||||
_SLAB_CONSISTENCY_CHECKS,
|
||||
_SLAB_RED_ZONE,
|
||||
_SLAB_POISON,
|
||||
_SLAB_KMALLOC,
|
||||
_SLAB_HWCACHE_ALIGN,
|
||||
_SLAB_CACHE_DMA,
|
||||
_SLAB_CACHE_DMA32,
|
||||
_SLAB_STORE_USER,
|
||||
_SLAB_PANIC,
|
||||
_SLAB_TYPESAFE_BY_RCU,
|
||||
_SLAB_TRACE,
|
||||
#ifdef CONFIG_DEBUG_OBJECTS
|
||||
_SLAB_DEBUG_OBJECTS,
|
||||
#endif
|
||||
_SLAB_NOLEAKTRACE,
|
||||
_SLAB_NO_MERGE,
|
||||
#ifdef CONFIG_FAILSLAB
|
||||
_SLAB_FAILSLAB,
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
_SLAB_ACCOUNT,
|
||||
#endif
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
_SLAB_KASAN,
|
||||
#endif
|
||||
_SLAB_NO_USER_FLAGS,
|
||||
#ifdef CONFIG_KFENCE
|
||||
_SLAB_SKIP_KFENCE,
|
||||
#endif
|
||||
#ifndef CONFIG_SLUB_TINY
|
||||
_SLAB_RECLAIM_ACCOUNT,
|
||||
#endif
|
||||
_SLAB_OBJECT_POISON,
|
||||
_SLAB_CMPXCHG_DOUBLE,
|
||||
_SLAB_FLAGS_LAST_BIT
|
||||
};
|
||||
|
||||
#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
|
||||
#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
|
||||
|
||||
/*
|
||||
* Flags to pass to kmem_cache_create().
|
||||
* The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
|
||||
*/
|
||||
/* DEBUG: Perform (expensive) checks on alloc/free */
|
||||
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
|
||||
#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
|
||||
/* DEBUG: Red zone objs in a cache */
|
||||
#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
|
||||
#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
|
||||
/* DEBUG: Poison objects */
|
||||
#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
|
||||
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
|
||||
/* Indicate a kmalloc slab */
|
||||
#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
|
||||
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
|
||||
/* Align objs on cache lines */
|
||||
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
|
||||
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
|
||||
/* Use GFP_DMA memory */
|
||||
#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
|
||||
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
|
||||
/* Use GFP_DMA32 memory */
|
||||
#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
|
||||
#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
|
||||
/* DEBUG: Store the last owner for bug hunting */
|
||||
#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
|
||||
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
|
||||
/* Panic if kmem_cache_create() fails */
|
||||
#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
|
||||
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
|
||||
/*
|
||||
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
|
||||
*
|
||||
@ -95,21 +135,19 @@
|
||||
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
|
||||
*/
|
||||
/* Defer freeing slabs to RCU */
|
||||
#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
|
||||
/* Spread some memory over cpuset */
|
||||
#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
|
||||
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
|
||||
/* Trace allocations and frees */
|
||||
#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
|
||||
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
|
||||
|
||||
/* Flag to prevent checks on free */
|
||||
#ifdef CONFIG_DEBUG_OBJECTS
|
||||
# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
|
||||
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
|
||||
#else
|
||||
# define SLAB_DEBUG_OBJECTS 0
|
||||
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
/* Avoid kmemleak tracing */
|
||||
#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
|
||||
#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
|
||||
|
||||
/*
|
||||
* Prevent merging with compatible kmem caches. This flag should be used
|
||||
@ -121,25 +159,25 @@
|
||||
* - performance critical caches, should be very rare and consulted with slab
|
||||
* maintainers, and not used together with CONFIG_SLUB_TINY
|
||||
*/
|
||||
#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
|
||||
#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
|
||||
|
||||
/* Fault injection mark */
|
||||
#ifdef CONFIG_FAILSLAB
|
||||
# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
|
||||
# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
|
||||
#else
|
||||
# define SLAB_FAILSLAB 0
|
||||
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
/* Account to memcg */
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
|
||||
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
|
||||
#else
|
||||
# define SLAB_ACCOUNT 0
|
||||
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
|
||||
#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
|
||||
#else
|
||||
#define SLAB_KASAN 0
|
||||
#define SLAB_KASAN __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -147,23 +185,26 @@
|
||||
* Intended for caches created for self-tests so they have only flags
|
||||
* specified in the code and other flags are ignored.
|
||||
*/
|
||||
#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
|
||||
#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
|
||||
|
||||
#ifdef CONFIG_KFENCE
|
||||
#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
|
||||
#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
|
||||
#else
|
||||
#define SLAB_SKIP_KFENCE 0
|
||||
#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
/* The following flags affect the page allocator grouping pages by mobility */
|
||||
/* Objects are reclaimable */
|
||||
#ifndef CONFIG_SLUB_TINY
|
||||
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
|
||||
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
|
||||
#else
|
||||
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
|
||||
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
|
||||
|
||||
/* Obsolete unused flag, to be removed */
|
||||
#define SLAB_MEM_SPREAD __SLAB_FLAG_UNUSED
|
||||
|
||||
/*
|
||||
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
||||
*
|
||||
|
@ -334,14 +334,6 @@ DEFINE_ASAN_SET_SHADOW(f3);
|
||||
DEFINE_ASAN_SET_SHADOW(f5);
|
||||
DEFINE_ASAN_SET_SHADOW(f8);
|
||||
|
||||
/* Only allow cache merging when no per-object metadata is present. */
|
||||
slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
if (!kasan_requires_meta())
|
||||
return 0;
|
||||
return SLAB_KASAN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
||||
* For larger allocations larger redzones are used.
|
||||
@ -370,15 +362,13 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||
return;
|
||||
|
||||
/*
|
||||
* SLAB_KASAN is used to mark caches that are sanitized by KASAN
|
||||
* and that thus have per-object metadata.
|
||||
* Currently this flag is used in two places:
|
||||
* 1. In slab_ksize() to account for per-object metadata when
|
||||
* calculating the size of the accessible memory within the object.
|
||||
* 2. In slab_common.c via kasan_never_merge() to prevent merging of
|
||||
* caches with per-object metadata.
|
||||
* SLAB_KASAN is used to mark caches that are sanitized by KASAN and
|
||||
* that thus have per-object metadata. Currently, this flag is used in
|
||||
* slab_ksize() to account for per-object metadata when calculating the
|
||||
* size of the accessible memory within the object. Additionally, we use
|
||||
* SLAB_NO_MERGE to prevent merging of caches with per-object metadata.
|
||||
*/
|
||||
*flags |= SLAB_KASAN;
|
||||
*flags |= SLAB_KASAN | SLAB_NO_MERGE;
|
||||
|
||||
ok_size = *size;
|
||||
|
||||
|
@ -465,7 +465,6 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
|
||||
SLAB_STORE_USER | \
|
||||
SLAB_TRACE | \
|
||||
SLAB_CONSISTENCY_CHECKS | \
|
||||
SLAB_MEM_SPREAD | \
|
||||
SLAB_NOLEAKTRACE | \
|
||||
SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_TEMPORARY | \
|
||||
|
@ -50,7 +50,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
|
||||
*/
|
||||
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
||||
SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
|
||||
SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge())
|
||||
SLAB_FAILSLAB | SLAB_NO_MERGE)
|
||||
|
||||
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
|
||||
SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
|
||||
|
@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
||||
|
||||
/* Internal SLUB flags */
|
||||
/* Poison object */
|
||||
#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
|
||||
#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
|
||||
/* Use cmpxchg_double */
|
||||
|
||||
#ifdef system_has_freelist_aba
|
||||
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
|
||||
#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
|
||||
#else
|
||||
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
|
||||
#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user