diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 6a51cfe4c2d6..aa0fbf000a12 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -490,27 +490,6 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false); } -static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx) -{ - struct llist_node *first; - unsigned int obj_size; - - first = c->free_llist.first; - if (!first) - return 0; - - if (c->percpu_size) - obj_size = pcpu_alloc_size(((void **)first)[1]); - else - obj_size = ksize(first); - if (obj_size != c->unit_size) { - WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n", - idx, c->percpu_size, obj_size, c->unit_size); - return -EINVAL; - } - return 0; -} - /* When size != 0 bpf_mem_cache for each cpu. * This is typical bpf hash map use case when all elements have equal size. * @@ -521,10 +500,10 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx) int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) { static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; - int cpu, i, err, unit_size, percpu_size = 0; struct bpf_mem_caches *cc, __percpu *pcc; struct bpf_mem_cache *c, __percpu *pc; struct obj_cgroup *objcg = NULL; + int cpu, i, unit_size, percpu_size = 0; /* room for llist_node and per-cpu pointer */ if (percpu) @@ -560,7 +539,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); if (!pcc) return -ENOMEM; - err = 0; #ifdef CONFIG_MEMCG_KMEM objcg = get_obj_cgroup_from_current(); #endif @@ -574,28 +552,12 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) c->tgt = c; init_refill_work(c); - /* Another bpf_mem_cache will be used when allocating - * c->unit_size in bpf_mem_alloc(), so doesn't prefill - * for the bpf_mem_cache because these free objects will - * never be used. - */ - if (i != bpf_mem_cache_idx(c->unit_size)) - continue; prefill_mem_cache(c, cpu); - err = check_obj_size(c, i); - if (err) - goto out; } } -out: ma->caches = pcc; - /* refill_work is either zeroed or initialized, so it is safe to - * call irq_work_sync(). - */ - if (err) - bpf_mem_alloc_destroy(ma); - return err; + return 0; } static void drain_mem_cache(struct bpf_mem_cache *c) @@ -869,7 +831,7 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) void *ret; if (!size) - return ZERO_SIZE_PTR; + return NULL; idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); if (idx < 0) @@ -879,26 +841,17 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) return !ret ? NULL : ret + LLIST_NODE_SZ; } -static notrace int bpf_mem_free_idx(void *ptr, bool percpu) -{ - size_t size; - - if (percpu) - size = pcpu_alloc_size(*((void **)ptr)); - else - size = ksize(ptr - LLIST_NODE_SZ); - return bpf_mem_cache_idx(size); -} - void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) { + struct bpf_mem_cache *c; int idx; if (!ptr) return; - idx = bpf_mem_free_idx(ptr, ma->percpu); - if (idx < 0) + c = *(void **)(ptr - LLIST_NODE_SZ); + idx = bpf_mem_cache_idx(c->unit_size); + if (WARN_ON_ONCE(idx < 0)) return; unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); @@ -906,13 +859,15 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr) { + struct bpf_mem_cache *c; int idx; if (!ptr) return; - idx = bpf_mem_free_idx(ptr, ma->percpu); - if (idx < 0) + c = *(void **)(ptr - LLIST_NODE_SZ); + idx = bpf_mem_cache_idx(c->unit_size); + if (WARN_ON_ONCE(idx < 0)) return; unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr); @@ -986,41 +941,3 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) return !ret ? NULL : ret + LLIST_NODE_SZ; } - -/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the - * actual size of dynamic per-cpu area will always be matched and there is - * no need to adjust size_index for per-cpu allocation. However for the - * simplicity of the implementation, use an unified size_index for both - * kmalloc and per-cpu allocation. - */ -static __init int bpf_mem_cache_adjust_size(void) -{ - unsigned int size; - - /* Adjusting the indexes in size_index() according to the object_size - * of underlying slab cache, so bpf_mem_alloc() will select a - * bpf_mem_cache with unit_size equal to the object_size of - * the underlying slab cache. - * - * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is - * 256-bytes, so only do adjustment for [8-bytes, 192-bytes]. - */ - for (size = 192; size >= 8; size -= 8) { - unsigned int kmalloc_size, index; - - kmalloc_size = kmalloc_size_roundup(size); - if (kmalloc_size == size) - continue; - - if (kmalloc_size <= 192) - index = size_index[(kmalloc_size - 1) / 8]; - else - index = fls(kmalloc_size - 1) - 1; - /* Only overwrite if necessary */ - if (size_index[(size - 1) / 8] != index) - size_index[(size - 1) / 8] = index; - } - - return 0; -} -subsys_initcall(bpf_mem_cache_adjust_size);