mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
bpf: Ensure unit_size is matched with slab cache object size
Add extra check in bpf_mem_alloc_init() to ensure the unit_size of bpf_mem_cache is matched with the object_size of underlying slab cache. If these two sizes are unmatched, print a warning once and return -EINVAL in bpf_mem_alloc_init(), so the mismatch can be found early and the potential issue can be prevented. Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20230908133923.2675053-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
b1d53958b6
commit
c930472552
@ -486,6 +486,24 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
|
||||
}
|
||||
|
||||
static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
|
||||
{
|
||||
struct llist_node *first;
|
||||
unsigned int obj_size;
|
||||
|
||||
first = c->free_llist.first;
|
||||
if (!first)
|
||||
return 0;
|
||||
|
||||
obj_size = ksize(first);
|
||||
if (obj_size != c->unit_size) {
|
||||
WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
|
||||
idx, obj_size, c->unit_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* When size != 0 bpf_mem_cache for each cpu.
|
||||
* This is typical bpf hash map use case when all elements have equal size.
|
||||
*
|
||||
@ -496,10 +514,10 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
{
|
||||
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
|
||||
int cpu, i, err, unit_size, percpu_size = 0;
|
||||
struct bpf_mem_caches *cc, __percpu *pcc;
|
||||
struct bpf_mem_cache *c, __percpu *pc;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
int cpu, i, unit_size, percpu_size = 0;
|
||||
|
||||
if (size) {
|
||||
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
|
||||
@ -537,6 +555,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
||||
if (!pcc)
|
||||
return -ENOMEM;
|
||||
err = 0;
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
objcg = get_obj_cgroup_from_current();
|
||||
#endif
|
||||
@ -557,10 +576,20 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
if (i != bpf_mem_cache_idx(c->unit_size))
|
||||
continue;
|
||||
prefill_mem_cache(c, cpu);
|
||||
err = check_obj_size(c, i);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
ma->caches = pcc;
|
||||
return 0;
|
||||
/* refill_work is either zeroed or initialized, so it is safe to
|
||||
* call irq_work_sync().
|
||||
*/
|
||||
if (err)
|
||||
bpf_mem_alloc_destroy(ma);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void drain_mem_cache(struct bpf_mem_cache *c)
|
||||
|
Loading…
Reference in New Issue
Block a user