mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-11 13:04:03 +08:00
bpf: Don't prefill for unused bpf_mem_cache
When the unit_size of a bpf_mem_cache is unmatched with the object_size of the underlying slab cache, the bpf_mem_cache will not be used, and the allocation will be redirected to a bpf_mem_cache with a bigger unit_size instead, so there is no need to prefill for these unused bpf_mem_caches. Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20230908133923.2675053-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
d52b59315b
commit
b1d53958b6
@ -459,8 +459,7 @@ static void notrace irq_work_raise(struct bpf_mem_cache *c)
|
||||
* Typical case will be between 11K and 116K closer to 11K.
|
||||
* bpf progs can and should share bpf_mem_cache when possible.
|
||||
*/
|
||||
|
||||
static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||
static void init_refill_work(struct bpf_mem_cache *c)
|
||||
{
|
||||
init_irq_work(&c->refill_work, bpf_mem_refill);
|
||||
if (c->unit_size <= 256) {
|
||||
@ -476,7 +475,10 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||
c->high_watermark = max(96 * 256 / c->unit_size, 3);
|
||||
}
|
||||
c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
|
||||
}
|
||||
|
||||
static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||
{
|
||||
/* To avoid consuming memory assume that 1st run of bpf
|
||||
* prog won't be doing more than 4 map_update_elem from
|
||||
* irq disabled region
|
||||
@ -521,6 +523,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
c->objcg = objcg;
|
||||
c->percpu_size = percpu_size;
|
||||
c->tgt = c;
|
||||
init_refill_work(c);
|
||||
prefill_mem_cache(c, cpu);
|
||||
}
|
||||
ma->cache = pc;
|
||||
@ -544,6 +547,15 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
c->unit_size = sizes[i];
|
||||
c->objcg = objcg;
|
||||
c->tgt = c;
|
||||
|
||||
init_refill_work(c);
|
||||
/* Another bpf_mem_cache will be used when allocating
|
||||
* c->unit_size in bpf_mem_alloc(), so doesn't prefill
|
||||
* for the bpf_mem_cache because these free objects will
|
||||
* never be used.
|
||||
*/
|
||||
if (i != bpf_mem_cache_idx(c->unit_size))
|
||||
continue;
|
||||
prefill_mem_cache(c, cpu);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user