mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
mm: Convert check_heap_object() to use struct slab
Ensure that we're not seeing a tail page inside __check_heap_object() by converting to a slab instead of a page. Take the opportunity to mark the slab as const since we're not modifying it. Also move the declaration of __check_heap_object() to mm/slab.h so it's not available to the wider kernel. [ vbabka@suse.cz: in check_heap_object() only convert to struct slab for actual PageSlab pages; use folio as intermediate step instead of page ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com>
This commit is contained in:
parent
7213230af5
commit
0b3eb091d5
@ -189,14 +189,6 @@ bool kmem_valid_obj(void *object);
|
||||
void kmem_dump_obj(void *object);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
||||
bool to_user);
|
||||
#else
|
||||
static inline void __check_heap_object(const void *ptr, unsigned long n,
|
||||
struct page *page, bool to_user) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
|
||||
* alignment larger than the alignment of a 64-bit integer.
|
||||
|
14
mm/slab.c
14
mm/slab.c
@ -372,8 +372,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
||||
static int slab_max_order = SLAB_MAX_ORDER_LO;
|
||||
static bool slab_max_order_set __initdata;
|
||||
|
||||
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
|
||||
unsigned int idx)
|
||||
static inline void *index_to_obj(struct kmem_cache *cache,
|
||||
const struct page *page, unsigned int idx)
|
||||
{
|
||||
return page->s_mem + cache->size * idx;
|
||||
}
|
||||
@ -4166,8 +4166,8 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
|
||||
* Returns NULL if check passes, otherwise const char * to name of cache
|
||||
* to indicate an error.
|
||||
*/
|
||||
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
||||
bool to_user)
|
||||
void __check_heap_object(const void *ptr, unsigned long n,
|
||||
const struct slab *slab, bool to_user)
|
||||
{
|
||||
struct kmem_cache *cachep;
|
||||
unsigned int objnr;
|
||||
@ -4176,15 +4176,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
||||
ptr = kasan_reset_tag(ptr);
|
||||
|
||||
/* Find and validate object. */
|
||||
cachep = page->slab_cache;
|
||||
objnr = obj_to_index(cachep, page, (void *)ptr);
|
||||
cachep = slab->slab_cache;
|
||||
objnr = obj_to_index(cachep, slab_page(slab), (void *)ptr);
|
||||
BUG_ON(objnr >= cachep->num);
|
||||
|
||||
/* Find offset within object. */
|
||||
if (is_kfence_address(ptr))
|
||||
offset = ptr - kfence_object_start(ptr);
|
||||
else
|
||||
offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
|
||||
offset = ptr - index_to_obj(cachep, slab_page(slab), objnr) - obj_offset(cachep);
|
||||
|
||||
/* Allow address range falling entirely within usercopy region. */
|
||||
if (offset >= cachep->useroffset &&
|
||||
|
11
mm/slab.h
11
mm/slab.h
@ -812,4 +812,15 @@ struct kmem_obj_info {
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
void __check_heap_object(const void *ptr, unsigned long n,
|
||||
const struct slab *slab, bool to_user);
|
||||
#else
|
||||
static inline
|
||||
void __check_heap_object(const void *ptr, unsigned long n,
|
||||
const struct slab *slab, bool to_user)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* MM_SLAB_H */
|
||||
|
10
mm/slub.c
10
mm/slub.c
@ -4485,8 +4485,8 @@ EXPORT_SYMBOL(__kmalloc_node);
|
||||
* Returns NULL if check passes, otherwise const char * to name of cache
|
||||
* to indicate an error.
|
||||
*/
|
||||
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
||||
bool to_user)
|
||||
void __check_heap_object(const void *ptr, unsigned long n,
|
||||
const struct slab *slab, bool to_user)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
unsigned int offset;
|
||||
@ -4495,10 +4495,10 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
||||
ptr = kasan_reset_tag(ptr);
|
||||
|
||||
/* Find object and usable object size. */
|
||||
s = page->slab_cache;
|
||||
s = slab->slab_cache;
|
||||
|
||||
/* Reject impossible pointers. */
|
||||
if (ptr < page_address(page))
|
||||
if (ptr < slab_address(slab))
|
||||
usercopy_abort("SLUB object not in SLUB page?!", NULL,
|
||||
to_user, 0, n);
|
||||
|
||||
@ -4506,7 +4506,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
||||
if (is_kfence)
|
||||
offset = ptr - kfence_object_start(ptr);
|
||||
else
|
||||
offset = (ptr - page_address(page)) % s->size;
|
||||
offset = (ptr - slab_address(slab)) % s->size;
|
||||
|
||||
/* Adjust for redzone and reject if within the redzone. */
|
||||
if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <asm/sections.h>
|
||||
#include "slab.h"
|
||||
|
||||
/*
|
||||
* Checks if a given pointer and length is contained by the current
|
||||
@ -223,7 +224,7 @@ static inline void check_page_span(const void *ptr, unsigned long n,
|
||||
static inline void check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return;
|
||||
@ -231,16 +232,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
|
||||
/*
|
||||
* When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
|
||||
* highmem page or fallback to virt_to_page(). The following
|
||||
* is effectively a highmem-aware virt_to_head_page().
|
||||
* is effectively a highmem-aware virt_to_slab().
|
||||
*/
|
||||
page = compound_head(kmap_to_page((void *)ptr));
|
||||
folio = page_folio(kmap_to_page((void *)ptr));
|
||||
|
||||
if (PageSlab(page)) {
|
||||
if (folio_test_slab(folio)) {
|
||||
/* Check slab allocator for flags and size. */
|
||||
__check_heap_object(ptr, n, page, to_user);
|
||||
__check_heap_object(ptr, n, folio_slab(folio), to_user);
|
||||
} else {
|
||||
/* Verify object does not incorrectly span multiple pages. */
|
||||
check_page_span(ptr, n, page, to_user);
|
||||
check_page_span(ptr, n, folio_page(folio, 0), to_user);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user