mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
bpf: Add bpf_mem_alloc_check_size() helper
Introduce bpf_mem_alloc_check_size() to check whether the allocation size exceeds the limitation for the kmalloc-equivalent allocator. The upper limit for percpu allocation is LLIST_NODE_SZ bytes larger than non-percpu allocation, so a percpu argument is added to the helper. The helper will be used in the following patch to check whether the size parameter passed to bpf_mem_alloc() is too big. Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20241030100516.3633640-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
101ccfbabf
commit
62a898b07b
@ -33,6 +33,9 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
|
|||||||
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
|
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
|
||||||
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
|
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
|
||||||
|
|
||||||
|
/* Check the allocation size for kmalloc equivalent allocator */
|
||||||
|
int bpf_mem_alloc_check_size(bool percpu, size_t size);
|
||||||
|
|
||||||
/* kmalloc/kfree equivalent: */
|
/* kmalloc/kfree equivalent: */
|
||||||
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
|
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
|
||||||
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
|
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
|
||||||
|
@ -35,6 +35,8 @@
|
|||||||
*/
|
*/
|
||||||
#define LLIST_NODE_SZ sizeof(struct llist_node)
|
#define LLIST_NODE_SZ sizeof(struct llist_node)
|
||||||
|
|
||||||
|
#define BPF_MEM_ALLOC_SIZE_MAX 4096
|
||||||
|
|
||||||
/* similar to kmalloc, but sizeof == 8 bucket is gone */
|
/* similar to kmalloc, but sizeof == 8 bucket is gone */
|
||||||
static u8 size_index[24] __ro_after_init = {
|
static u8 size_index[24] __ro_after_init = {
|
||||||
3, /* 8 */
|
3, /* 8 */
|
||||||
@ -65,7 +67,7 @@ static u8 size_index[24] __ro_after_init = {
|
|||||||
|
|
||||||
static int bpf_mem_cache_idx(size_t size)
|
static int bpf_mem_cache_idx(size_t size)
|
||||||
{
|
{
|
||||||
if (!size || size > 4096)
|
if (!size || size > BPF_MEM_ALLOC_SIZE_MAX)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (size <= 192)
|
if (size <= 192)
|
||||||
@ -1005,3 +1007,13 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
|
|||||||
|
|
||||||
return !ret ? NULL : ret + LLIST_NODE_SZ;
|
return !ret ? NULL : ret + LLIST_NODE_SZ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bpf_mem_alloc_check_size(bool percpu, size_t size)
|
||||||
|
{
|
||||||
|
/* The size of percpu allocation doesn't have LLIST_NODE_SZ overhead */
|
||||||
|
if ((percpu && size > BPF_MEM_ALLOC_SIZE_MAX) ||
|
||||||
|
(!percpu && size > BPF_MEM_ALLOC_SIZE_MAX - LLIST_NODE_SZ))
|
||||||
|
return -E2BIG;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user