mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
mm/slab: move kmalloc_slab() to mm/slab.h
In preparation for the next patch, move the kmalloc_slab() function to the header, as it will have callers from two files, and make it inline. To avoid unnecessary bloat, remove all size checks/warnings from kmalloc_slab() as they just duplicate those in callers, especially after recent changes to kmalloc_size_roundup(). We just need to adjust handling of zero size in __do_kmalloc_node(). Also we can stop handling NULL result from kmalloc_slab() there as that now cannot happen (unless called too early during boot). The size_index array becomes visible so rename it to a more specific kmalloc_size_index. Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Tested-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
b774d3e326
commit
5a9d31d980
28
mm/slab.h
28
mm/slab.h
@ -389,8 +389,32 @@ extern const struct kmalloc_info_struct {
|
||||
void setup_kmalloc_cache_index_table(void);
|
||||
void create_kmalloc_caches(slab_flags_t);
|
||||
|
||||
/* Find the kmalloc slab corresponding for a certain size */
|
||||
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
|
||||
extern u8 kmalloc_size_index[24];
|
||||
|
||||
static inline unsigned int size_index_elem(unsigned int bytes)
|
||||
{
|
||||
return (bytes - 1) / 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the kmem_cache structure that serves a given size of
|
||||
* allocation
|
||||
*
|
||||
* This assumes size is larger than zero and not larger than
|
||||
* KMALLOC_MAX_CACHE_SIZE and the caller must check that.
|
||||
*/
|
||||
static inline struct kmem_cache *
|
||||
kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
|
||||
{
|
||||
unsigned int index;
|
||||
|
||||
if (size <= 192)
|
||||
index = kmalloc_size_index[size_index_elem(size)];
|
||||
else
|
||||
index = fls(size - 1);
|
||||
|
||||
return kmalloc_caches[kmalloc_type(flags, caller)][index];
|
||||
}
|
||||
|
||||
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
|
||||
int node, size_t orig_size,
|
||||
|
@ -665,7 +665,7 @@ EXPORT_SYMBOL(random_kmalloc_seed);
|
||||
* of two cache sizes there. The size of larger slabs can be determined using
|
||||
* fls.
|
||||
*/
|
||||
static u8 size_index[24] __ro_after_init = {
|
||||
u8 kmalloc_size_index[24] __ro_after_init = {
|
||||
3, /* 8 */
|
||||
4, /* 16 */
|
||||
5, /* 24 */
|
||||
@ -692,33 +692,6 @@ static u8 size_index[24] __ro_after_init = {
|
||||
2 /* 192 */
|
||||
};
|
||||
|
||||
static inline unsigned int size_index_elem(unsigned int bytes)
|
||||
{
|
||||
return (bytes - 1) / 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the kmem_cache structure that serves a given size of
|
||||
* allocation
|
||||
*/
|
||||
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
|
||||
{
|
||||
unsigned int index;
|
||||
|
||||
if (size <= 192) {
|
||||
if (!size)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
index = size_index[size_index_elem(size)];
|
||||
} else {
|
||||
if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
|
||||
return NULL;
|
||||
index = fls(size - 1);
|
||||
}
|
||||
|
||||
return kmalloc_caches[kmalloc_type(flags, caller)][index];
|
||||
}
|
||||
|
||||
size_t kmalloc_size_roundup(size_t size)
|
||||
{
|
||||
if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
|
||||
@ -843,9 +816,9 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
|
||||
unsigned int elem = size_index_elem(i);
|
||||
|
||||
if (elem >= ARRAY_SIZE(size_index))
|
||||
if (elem >= ARRAY_SIZE(kmalloc_size_index))
|
||||
break;
|
||||
size_index[elem] = KMALLOC_SHIFT_LOW;
|
||||
kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
|
||||
}
|
||||
|
||||
if (KMALLOC_MIN_SIZE >= 64) {
|
||||
@ -854,7 +827,7 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
* is 64 byte.
|
||||
*/
|
||||
for (i = 64 + 8; i <= 96; i += 8)
|
||||
size_index[size_index_elem(i)] = 7;
|
||||
kmalloc_size_index[size_index_elem(i)] = 7;
|
||||
|
||||
}
|
||||
|
||||
@ -865,7 +838,7 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
* instead.
|
||||
*/
|
||||
for (i = 128 + 8; i <= 192; i += 8)
|
||||
size_index[size_index_elem(i)] = 8;
|
||||
kmalloc_size_index[size_index_elem(i)] = 8;
|
||||
}
|
||||
}
|
||||
|
||||
@ -977,10 +950,10 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
|
||||
return ret;
|
||||
}
|
||||
|
||||
s = kmalloc_slab(size, flags, caller);
|
||||
if (unlikely(!size))
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||
return s;
|
||||
s = kmalloc_slab(size, flags, caller);
|
||||
|
||||
ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
|
||||
ret = kasan_kmalloc(s, ret, size, flags);
|
||||
|
Loading…
Reference in New Issue
Block a user