mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
kasan: rename and document kasan_(un)poison_object_data
Rename kasan_unpoison_object_data to kasan_unpoison_new_object and add a documentation comment. Do the same for kasan_poison_object_data. The new names and the comments should suggest the users that these hooks are intended for internal use by the slab allocator. The following patch will remove non-slab-internal uses of these hooks. No functional changes. [andreyknvl@google.com: update references to renamed functions in comments] Link: https://lkml.kernel.org/r/20231221180637.105098-1-andrey.konovalov@linux.dev Link: https://lkml.kernel.org/r/eab156ebbd635f9635ef67d1a4271f716994e628.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Cc: Alexander Lobakin <alobakin@pm.me> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Breno Leitao <leitao@debian.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
86b1596983
commit
1ce9a05239
@ -129,20 +129,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
|
||||
__kasan_poison_slab(slab);
|
||||
}
|
||||
|
||||
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
||||
static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
|
||||
/**
|
||||
* kasan_unpoison_new_object - Temporarily unpoison a new slab object.
|
||||
* @cache: Cache the object belong to.
|
||||
* @object: Pointer to the object.
|
||||
*
|
||||
* This function is intended for the slab allocator's internal use. It
|
||||
* temporarily unpoisons an object from a newly allocated slab without doing
|
||||
* anything else. The object must later be repoisoned by
|
||||
* kasan_poison_new_object().
|
||||
*/
|
||||
static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
|
||||
void *object)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_unpoison_object_data(cache, object);
|
||||
__kasan_unpoison_new_object(cache, object);
|
||||
}
|
||||
|
||||
void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
||||
static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
|
||||
void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
|
||||
/**
|
||||
* kasan_unpoison_new_object - Repoison a new slab object.
|
||||
* @cache: Cache the object belong to.
|
||||
* @object: Pointer to the object.
|
||||
*
|
||||
* This function is intended for the slab allocator's internal use. It
|
||||
* repoisons an object that was previously unpoisoned by
|
||||
* kasan_unpoison_new_object() without doing anything else.
|
||||
*/
|
||||
static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
|
||||
void *object)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_poison_object_data(cache, object);
|
||||
__kasan_poison_new_object(cache, object);
|
||||
}
|
||||
|
||||
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
|
||||
@ -342,9 +361,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
|
||||
return false;
|
||||
}
|
||||
static inline void kasan_poison_slab(struct slab *slab) {}
|
||||
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
|
||||
void *object) {}
|
||||
static inline void kasan_poison_object_data(struct kmem_cache *cache,
|
||||
static inline void kasan_poison_new_object(struct kmem_cache *cache,
|
||||
void *object) {}
|
||||
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
|
||||
const void *object)
|
||||
|
@ -143,12 +143,12 @@ void __kasan_poison_slab(struct slab *slab)
|
||||
KASAN_SLAB_REDZONE, false);
|
||||
}
|
||||
|
||||
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
|
||||
void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
|
||||
{
|
||||
kasan_unpoison(object, cache->object_size, false);
|
||||
}
|
||||
|
||||
void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
|
||||
void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
|
||||
{
|
||||
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
|
||||
KASAN_SLAB_REDZONE, false);
|
||||
|
@ -130,7 +130,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
|
||||
|
||||
/*
|
||||
* Perform shadow offset calculation based on untagged address, as
|
||||
* some of the callers (e.g. kasan_poison_object_data) pass tagged
|
||||
* some of the callers (e.g. kasan_poison_new_object) pass tagged
|
||||
* addresses to this function.
|
||||
*/
|
||||
addr = kasan_reset_tag(addr);
|
||||
@ -170,7 +170,7 @@ void kasan_unpoison(const void *addr, size_t size, bool init)
|
||||
|
||||
/*
|
||||
* Perform shadow offset calculation based on untagged address, as
|
||||
* some of the callers (e.g. kasan_unpoison_object_data) pass tagged
|
||||
* some of the callers (e.g. kasan_unpoison_new_object) pass tagged
|
||||
* addresses to this function.
|
||||
*/
|
||||
addr = kasan_reset_tag(addr);
|
||||
|
10
mm/slab.c
10
mm/slab.c
@ -2327,11 +2327,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
|
||||
* They must also be threaded.
|
||||
*/
|
||||
if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
|
||||
kasan_unpoison_object_data(cachep,
|
||||
objp + obj_offset(cachep));
|
||||
kasan_unpoison_new_object(cachep, objp + obj_offset(cachep));
|
||||
cachep->ctor(objp + obj_offset(cachep));
|
||||
kasan_poison_object_data(
|
||||
cachep, objp + obj_offset(cachep));
|
||||
kasan_poison_new_object(cachep, objp + obj_offset(cachep));
|
||||
}
|
||||
|
||||
if (cachep->flags & SLAB_RED_ZONE) {
|
||||
@ -2472,9 +2470,9 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
||||
|
||||
/* constructor could break poison info */
|
||||
if (DEBUG == 0 && cachep->ctor) {
|
||||
kasan_unpoison_object_data(cachep, objp);
|
||||
kasan_unpoison_new_object(cachep, objp);
|
||||
cachep->ctor(objp);
|
||||
kasan_poison_object_data(cachep, objp);
|
||||
kasan_poison_new_object(cachep, objp);
|
||||
}
|
||||
|
||||
if (!shuffled)
|
||||
|
@ -1860,9 +1860,9 @@ static void *setup_object(struct kmem_cache *s, void *object)
|
||||
setup_object_debug(s, object);
|
||||
object = kasan_init_slab_obj(s, object);
|
||||
if (unlikely(s->ctor)) {
|
||||
kasan_unpoison_object_data(s, object);
|
||||
kasan_unpoison_new_object(s, object);
|
||||
s->ctor(object);
|
||||
kasan_poison_object_data(s, object);
|
||||
kasan_poison_new_object(s, object);
|
||||
}
|
||||
return object;
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ static struct sk_buff *napi_skb_cache_get(void)
|
||||
}
|
||||
|
||||
skb = nc->skb_cache[--nc->skb_count];
|
||||
kasan_unpoison_object_data(skbuff_cache, skb);
|
||||
kasan_unpoison_new_object(skbuff_cache, skb);
|
||||
|
||||
return skb;
|
||||
}
|
||||
@ -1309,13 +1309,13 @@ static void napi_skb_cache_put(struct sk_buff *skb)
|
||||
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||||
u32 i;
|
||||
|
||||
kasan_poison_object_data(skbuff_cache, skb);
|
||||
kasan_poison_new_object(skbuff_cache, skb);
|
||||
nc->skb_cache[nc->skb_count++] = skb;
|
||||
|
||||
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
|
||||
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
|
||||
kasan_unpoison_object_data(skbuff_cache,
|
||||
nc->skb_cache[i]);
|
||||
kasan_unpoison_new_object(skbuff_cache,
|
||||
nc->skb_cache[i]);
|
||||
|
||||
kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
|
||||
nc->skb_cache + NAPI_SKB_CACHE_HALF);
|
||||
|
Loading…
Reference in New Issue
Block a user