mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 17:44:14 +08:00
libbpf: Add support for bpf_arena.
mmap() bpf_arena right after creation, since the kernel needs to remember the address returned from mmap. This is user_vm_start. LLVM will generate bpf_arena_cast_user() instructions where necessary and JIT will add upper 32-bit of user_vm_start to such pointers. Fix up bpf_map_mmap_sz() to compute mmap size as map->value_size * map->max_entries for arrays and PAGE_SIZE * map->max_entries for arena. Don't set BTF at arena creation time, since it doesn't support it. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20240308010812.89848-9-alexei.starovoitov@gmail.com
This commit is contained in:
parent
4d2b56081c
commit
79ff13e991
@ -185,6 +185,7 @@ static const char * const map_type_name[] = {
|
||||
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
|
||||
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
|
||||
[BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
|
||||
[BPF_MAP_TYPE_ARENA] = "arena",
|
||||
};
|
||||
|
||||
static const char * const prog_type_name[] = {
|
||||
@ -1684,7 +1685,7 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
|
||||
return map;
|
||||
}
|
||||
|
||||
static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
|
||||
static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
|
||||
{
|
||||
const long page_sz = sysconf(_SC_PAGE_SIZE);
|
||||
size_t map_sz;
|
||||
@ -1694,6 +1695,20 @@ static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
|
||||
return map_sz;
|
||||
}
|
||||
|
||||
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
|
||||
{
|
||||
const long page_sz = sysconf(_SC_PAGE_SIZE);
|
||||
|
||||
switch (map->def.type) {
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
|
||||
case BPF_MAP_TYPE_ARENA:
|
||||
return page_sz * map->def.max_entries;
|
||||
default:
|
||||
return 0; /* not supported */
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
|
||||
{
|
||||
void *mmaped;
|
||||
@ -1847,7 +1862,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
|
||||
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
|
||||
map->name, map->sec_idx, map->sec_offset, def->map_flags);
|
||||
|
||||
mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
|
||||
mmap_sz = bpf_map_mmap_sz(map);
|
||||
map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
||||
if (map->mmaped == MAP_FAILED) {
|
||||
@ -5017,6 +5032,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
case BPF_MAP_TYPE_QUEUE:
|
||||
case BPF_MAP_TYPE_STACK:
|
||||
case BPF_MAP_TYPE_ARENA:
|
||||
create_attr.btf_fd = 0;
|
||||
create_attr.btf_key_type_id = 0;
|
||||
create_attr.btf_value_type_id = 0;
|
||||
@ -5261,7 +5277,19 @@ retry:
|
||||
if (err < 0)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (map->def.type == BPF_MAP_TYPE_ARENA) {
|
||||
map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
|
||||
PROT_READ | PROT_WRITE,
|
||||
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
|
||||
map->fd, 0);
|
||||
if (map->mmaped == MAP_FAILED) {
|
||||
err = -errno;
|
||||
map->mmaped = NULL;
|
||||
pr_warn("map '%s': failed to mmap arena: %d\n",
|
||||
map->name, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
|
||||
err = init_map_in_map_slots(obj, map);
|
||||
if (err < 0)
|
||||
@ -8761,7 +8789,7 @@ static void bpf_map__destroy(struct bpf_map *map)
|
||||
if (map->mmaped) {
|
||||
size_t mmap_sz;
|
||||
|
||||
mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
|
||||
mmap_sz = bpf_map_mmap_sz(map);
|
||||
munmap(map->mmaped, mmap_sz);
|
||||
map->mmaped = NULL;
|
||||
}
|
||||
@ -9995,11 +10023,14 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
|
||||
return libbpf_err(-EBUSY);
|
||||
|
||||
if (map->mmaped) {
|
||||
int err;
|
||||
size_t mmap_old_sz, mmap_new_sz;
|
||||
int err;
|
||||
|
||||
mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
|
||||
mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
|
||||
if (map->def.type != BPF_MAP_TYPE_ARRAY)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mmap_old_sz = bpf_map_mmap_sz(map);
|
||||
mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
|
||||
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
|
||||
if (err) {
|
||||
pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
|
||||
@ -13530,7 +13561,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map *map = *s->maps[i].map;
|
||||
size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
|
||||
size_t mmap_sz = bpf_map_mmap_sz(map);
|
||||
int prot, map_fd = map->fd;
|
||||
void **mmaped = s->maps[i].mmaped;
|
||||
|
||||
|
@ -338,6 +338,13 @@ static int probe_map_create(enum bpf_map_type map_type)
|
||||
key_size = 0;
|
||||
max_entries = 1;
|
||||
break;
|
||||
case BPF_MAP_TYPE_ARENA:
|
||||
key_size = 0;
|
||||
value_size = 0;
|
||||
max_entries = 1; /* one page */
|
||||
opts.map_extra = 0; /* can mmap() at any address */
|
||||
opts.map_flags = BPF_F_MMAPABLE;
|
||||
break;
|
||||
case BPF_MAP_TYPE_HASH:
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||
|
Loading…
Reference in New Issue
Block a user