bpf: Compute map_btf_id during build time

For now, the field 'map_btf_id' in 'struct bpf_map_ops' for all map
types are computed during vmlinux-btf init:

  btf_parse_vmlinux() -> btf_vmlinux_map_ids_init()

It will lookup the btf_type according to the 'map_btf_name' field in
'struct bpf_map_ops'. This process can be done during build time,
thanks to Jiri's resolve_btfids.

selftest of map_ptr has passed:

  $96 map_ptr:OK
  Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Menglong Dong <imagedong@tencent.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Menglong Dong 2022-04-25 21:32:47 +08:00 committed by Alexei Starovoitov
parent 367590b7fc
commit c317ab71fa
19 changed files with 62 additions and 129 deletions

View File

@ -148,8 +148,7 @@ struct bpf_map_ops {
bpf_callback_t callback_fn, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags); void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */ /* BTF id of struct allocated by map_alloc */
const char * const map_btf_name;
int *map_btf_id; int *map_btf_id;
/* bpf_iter info used to open a seq_file */ /* bpf_iter info used to open a seq_file */

View File

@ -11,6 +11,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include <linux/rcupdate_trace.h> #include <linux/rcupdate_trace.h>
#include <linux/btf_ids.h>
#include "map_in_map.h" #include "map_in_map.h"
@ -690,7 +691,7 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_
return num_elems; return num_elems;
} }
static int array_map_btf_id; BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
const struct bpf_map_ops array_map_ops = { const struct bpf_map_ops array_map_ops = {
.map_meta_equal = array_map_meta_equal, .map_meta_equal = array_map_meta_equal,
.map_alloc_check = array_map_alloc_check, .map_alloc_check = array_map_alloc_check,
@ -711,12 +712,10 @@ const struct bpf_map_ops array_map_ops = {
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem, .map_for_each_callback = bpf_for_each_array_elem,
.map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_ids[0],
.map_btf_id = &array_map_btf_id,
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
static int percpu_array_map_btf_id;
const struct bpf_map_ops percpu_array_map_ops = { const struct bpf_map_ops percpu_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = array_map_alloc_check, .map_alloc_check = array_map_alloc_check,
@ -732,8 +731,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem, .map_for_each_callback = bpf_for_each_array_elem,
.map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_ids[0],
.map_btf_id = &percpu_array_map_btf_id,
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
@ -1112,7 +1110,6 @@ static void prog_array_map_free(struct bpf_map *map)
* Thus, prog_array_map cannot be used as an inner_map * Thus, prog_array_map cannot be used as an inner_map
* and map_meta_equal is not implemented. * and map_meta_equal is not implemented.
*/ */
static int prog_array_map_btf_id;
const struct bpf_map_ops prog_array_map_ops = { const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check, .map_alloc_check = fd_array_map_alloc_check,
.map_alloc = prog_array_map_alloc, .map_alloc = prog_array_map_alloc,
@ -1128,8 +1125,7 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = prog_array_map_clear, .map_release_uref = prog_array_map_clear,
.map_seq_show_elem = prog_array_map_seq_show_elem, .map_seq_show_elem = prog_array_map_seq_show_elem,
.map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_ids[0],
.map_btf_id = &prog_array_map_btf_id,
}; };
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
@ -1218,7 +1214,6 @@ static void perf_event_fd_array_map_free(struct bpf_map *map)
fd_array_map_free(map); fd_array_map_free(map);
} }
static int perf_event_array_map_btf_id;
const struct bpf_map_ops perf_event_array_map_ops = { const struct bpf_map_ops perf_event_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check, .map_alloc_check = fd_array_map_alloc_check,
@ -1231,8 +1226,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
.map_fd_put_ptr = perf_event_fd_array_put_ptr, .map_fd_put_ptr = perf_event_fd_array_put_ptr,
.map_release = perf_event_fd_array_release, .map_release = perf_event_fd_array_release,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_ids[0],
.map_btf_id = &perf_event_array_map_btf_id,
}; };
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
@ -1255,7 +1249,6 @@ static void cgroup_fd_array_free(struct bpf_map *map)
fd_array_map_free(map); fd_array_map_free(map);
} }
static int cgroup_array_map_btf_id;
const struct bpf_map_ops cgroup_array_map_ops = { const struct bpf_map_ops cgroup_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check, .map_alloc_check = fd_array_map_alloc_check,
@ -1267,8 +1260,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
.map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_get_ptr = cgroup_fd_array_get_ptr,
.map_fd_put_ptr = cgroup_fd_array_put_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_ids[0],
.map_btf_id = &cgroup_array_map_btf_id,
}; };
#endif #endif
@ -1342,7 +1334,6 @@ static int array_of_map_gen_lookup(struct bpf_map *map,
return insn - insn_buf; return insn - insn_buf;
} }
static int array_of_maps_map_btf_id;
const struct bpf_map_ops array_of_maps_map_ops = { const struct bpf_map_ops array_of_maps_map_ops = {
.map_alloc_check = fd_array_map_alloc_check, .map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_of_map_alloc, .map_alloc = array_of_map_alloc,
@ -1355,6 +1346,5 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = array_of_map_gen_lookup, .map_gen_lookup = array_of_map_gen_lookup,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_ids[0],
.map_btf_id = &array_of_maps_map_btf_id,
}; };

View File

@ -7,6 +7,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/btf_ids.h>
#define BLOOM_CREATE_FLAG_MASK \ #define BLOOM_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
@ -192,7 +193,7 @@ static int bloom_map_check_btf(const struct bpf_map *map,
return btf_type_is_void(key_type) ? 0 : -EINVAL; return btf_type_is_void(key_type) ? 0 : -EINVAL;
} }
static int bpf_bloom_map_btf_id; BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
const struct bpf_map_ops bloom_filter_map_ops = { const struct bpf_map_ops bloom_filter_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = bloom_map_alloc, .map_alloc = bloom_map_alloc,
@ -205,6 +206,5 @@ const struct bpf_map_ops bloom_filter_map_ops = {
.map_update_elem = bloom_map_update_elem, .map_update_elem = bloom_map_update_elem,
.map_delete_elem = bloom_map_delete_elem, .map_delete_elem = bloom_map_delete_elem,
.map_check_btf = bloom_map_check_btf, .map_check_btf = bloom_map_check_btf,
.map_btf_name = "bpf_bloom_filter", .map_btf_id = &bpf_bloom_map_btf_ids[0],
.map_btf_id = &bpf_bloom_map_btf_id,
}; };

View File

@ -245,7 +245,8 @@ static void inode_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap, NULL); bpf_local_storage_map_free(smap, NULL);
} }
static int inode_storage_map_btf_id; BTF_ID_LIST_SINGLE(inode_storage_map_btf_ids, struct,
bpf_local_storage_map)
const struct bpf_map_ops inode_storage_map_ops = { const struct bpf_map_ops inode_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check, .map_alloc_check = bpf_local_storage_map_alloc_check,
@ -256,8 +257,7 @@ const struct bpf_map_ops inode_storage_map_ops = {
.map_update_elem = bpf_fd_inode_storage_update_elem, .map_update_elem = bpf_fd_inode_storage_update_elem,
.map_delete_elem = bpf_fd_inode_storage_delete_elem, .map_delete_elem = bpf_fd_inode_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf, .map_check_btf = bpf_local_storage_map_check_btf,
.map_btf_name = "bpf_local_storage_map", .map_btf_id = &inode_storage_map_btf_ids[0],
.map_btf_id = &inode_storage_map_btf_id,
.map_owner_storage_ptr = inode_storage_ptr, .map_owner_storage_ptr = inode_storage_ptr,
}; };

View File

@ -10,6 +10,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/btf_ids.h>
enum bpf_struct_ops_state { enum bpf_struct_ops_state {
BPF_STRUCT_OPS_STATE_INIT, BPF_STRUCT_OPS_STATE_INIT,
@ -612,7 +613,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
return map; return map;
} }
static int bpf_struct_ops_map_btf_id; BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
const struct bpf_map_ops bpf_struct_ops_map_ops = { const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_alloc_check = bpf_struct_ops_map_alloc_check, .map_alloc_check = bpf_struct_ops_map_alloc_check,
.map_alloc = bpf_struct_ops_map_alloc, .map_alloc = bpf_struct_ops_map_alloc,
@ -622,8 +623,7 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_delete_elem = bpf_struct_ops_map_delete_elem, .map_delete_elem = bpf_struct_ops_map_delete_elem,
.map_update_elem = bpf_struct_ops_map_update_elem, .map_update_elem = bpf_struct_ops_map_update_elem,
.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
.map_btf_name = "bpf_struct_ops_map", .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
.map_btf_id = &bpf_struct_ops_map_btf_id,
}; };
/* "const void *" because some subsystem is /* "const void *" because some subsystem is

View File

@ -307,7 +307,7 @@ static void task_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap, &bpf_task_storage_busy); bpf_local_storage_map_free(smap, &bpf_task_storage_busy);
} }
static int task_storage_map_btf_id; BTF_ID_LIST_SINGLE(task_storage_map_btf_ids, struct, bpf_local_storage_map)
const struct bpf_map_ops task_storage_map_ops = { const struct bpf_map_ops task_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check, .map_alloc_check = bpf_local_storage_map_alloc_check,
@ -318,8 +318,7 @@ const struct bpf_map_ops task_storage_map_ops = {
.map_update_elem = bpf_pid_task_storage_update_elem, .map_update_elem = bpf_pid_task_storage_update_elem,
.map_delete_elem = bpf_pid_task_storage_delete_elem, .map_delete_elem = bpf_pid_task_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf, .map_check_btf = bpf_local_storage_map_check_btf,
.map_btf_name = "bpf_local_storage_map", .map_btf_id = &task_storage_map_btf_ids[0],
.map_btf_id = &task_storage_map_btf_id,
.map_owner_storage_ptr = task_storage_ptr, .map_owner_storage_ptr = task_storage_ptr,
}; };

View File

@ -5025,41 +5025,6 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
return ctx_type; return ctx_type;
} }
static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
#define BPF_LINK_TYPE(_id, _name)
#define BPF_MAP_TYPE(_id, _ops) \
[_id] = &_ops,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_LINK_TYPE
#undef BPF_MAP_TYPE
};
static int btf_vmlinux_map_ids_init(const struct btf *btf,
struct bpf_verifier_log *log)
{
const struct bpf_map_ops *ops;
int i, btf_id;
for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
ops = btf_vmlinux_map_ops[i];
if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
continue;
if (!ops->map_btf_name || !ops->map_btf_id) {
bpf_log(log, "map type %d is misconfigured\n", i);
return -EINVAL;
}
btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
BTF_KIND_STRUCT);
if (btf_id < 0)
return btf_id;
*ops->map_btf_id = btf_id;
}
return 0;
}
static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
struct btf *btf, struct btf *btf,
const struct btf_type *t, const struct btf_type *t,
@ -5125,11 +5090,6 @@ struct btf *btf_parse_vmlinux(void)
/* btf_parse_vmlinux() runs under bpf_verifier_lock */ /* btf_parse_vmlinux() runs under bpf_verifier_lock */
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
/* find bpf map structs for map_ptr access checking */
err = btf_vmlinux_map_ids_init(btf, log);
if (err < 0)
goto errout;
bpf_struct_ops_init(btf, log); bpf_struct_ops_init(btf, log);
refcount_set(&btf->refcnt, 1); refcount_set(&btf->refcnt, 1);

View File

@ -27,6 +27,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <trace/events/xdp.h> #include <trace/events/xdp.h>
#include <linux/btf_ids.h>
#include <linux/netdevice.h> /* netif_receive_skb_list */ #include <linux/netdevice.h> /* netif_receive_skb_list */
#include <linux/etherdevice.h> /* eth_type_trans */ #include <linux/etherdevice.h> /* eth_type_trans */
@ -673,7 +674,7 @@ static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
__cpu_map_lookup_elem); __cpu_map_lookup_elem);
} }
static int cpu_map_btf_id; BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
const struct bpf_map_ops cpu_map_ops = { const struct bpf_map_ops cpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = cpu_map_alloc, .map_alloc = cpu_map_alloc,
@ -683,8 +684,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_lookup_elem = cpu_map_lookup_elem, .map_lookup_elem = cpu_map_lookup_elem,
.map_get_next_key = cpu_map_get_next_key, .map_get_next_key = cpu_map_get_next_key,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_cpu_map", .map_btf_id = &cpu_map_btf_ids[0],
.map_btf_id = &cpu_map_btf_id,
.map_redirect = cpu_map_redirect, .map_redirect = cpu_map_redirect,
}; };

View File

@ -48,6 +48,7 @@
#include <net/xdp.h> #include <net/xdp.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <trace/events/xdp.h> #include <trace/events/xdp.h>
#include <linux/btf_ids.h>
#define DEV_CREATE_FLAG_MASK \ #define DEV_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@ -1005,7 +1006,7 @@ static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
__dev_map_hash_lookup_elem); __dev_map_hash_lookup_elem);
} }
static int dev_map_btf_id; BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
const struct bpf_map_ops dev_map_ops = { const struct bpf_map_ops dev_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = dev_map_alloc, .map_alloc = dev_map_alloc,
@ -1015,12 +1016,10 @@ const struct bpf_map_ops dev_map_ops = {
.map_update_elem = dev_map_update_elem, .map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem, .map_delete_elem = dev_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_dtab", .map_btf_id = &dev_map_btf_ids[0],
.map_btf_id = &dev_map_btf_id,
.map_redirect = dev_map_redirect, .map_redirect = dev_map_redirect,
}; };
static int dev_map_hash_map_btf_id;
const struct bpf_map_ops dev_map_hash_ops = { const struct bpf_map_ops dev_map_hash_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = dev_map_alloc, .map_alloc = dev_map_alloc,
@ -1030,8 +1029,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
.map_update_elem = dev_map_hash_update_elem, .map_update_elem = dev_map_hash_update_elem,
.map_delete_elem = dev_map_hash_delete_elem, .map_delete_elem = dev_map_hash_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_dtab", .map_btf_id = &dev_map_btf_ids[0],
.map_btf_id = &dev_map_hash_map_btf_id,
.map_redirect = dev_hash_map_redirect, .map_redirect = dev_hash_map_redirect,
}; };

View File

@ -10,6 +10,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include <linux/rcupdate_trace.h> #include <linux/rcupdate_trace.h>
#include <linux/btf_ids.h>
#include "percpu_freelist.h" #include "percpu_freelist.h"
#include "bpf_lru_list.h" #include "bpf_lru_list.h"
#include "map_in_map.h" #include "map_in_map.h"
@ -2137,7 +2138,7 @@ out:
return num_elems; return num_elems;
} }
static int htab_map_btf_id; BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
const struct bpf_map_ops htab_map_ops = { const struct bpf_map_ops htab_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check, .map_alloc_check = htab_map_alloc_check,
@ -2154,12 +2155,10 @@ const struct bpf_map_ops htab_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab), BATCH_OPS(htab),
.map_btf_name = "bpf_htab", .map_btf_id = &htab_map_btf_ids[0],
.map_btf_id = &htab_map_btf_id,
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
static int htab_lru_map_btf_id;
const struct bpf_map_ops htab_lru_map_ops = { const struct bpf_map_ops htab_lru_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check, .map_alloc_check = htab_map_alloc_check,
@ -2177,8 +2176,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab_lru), BATCH_OPS(htab_lru),
.map_btf_name = "bpf_htab", .map_btf_id = &htab_map_btf_ids[0],
.map_btf_id = &htab_lru_map_btf_id,
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
@ -2284,7 +2282,6 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock(); rcu_read_unlock();
} }
static int htab_percpu_map_btf_id;
const struct bpf_map_ops htab_percpu_map_ops = { const struct bpf_map_ops htab_percpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check, .map_alloc_check = htab_map_alloc_check,
@ -2299,12 +2296,10 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab_percpu), BATCH_OPS(htab_percpu),
.map_btf_name = "bpf_htab", .map_btf_id = &htab_map_btf_ids[0],
.map_btf_id = &htab_percpu_map_btf_id,
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
static int htab_lru_percpu_map_btf_id;
const struct bpf_map_ops htab_lru_percpu_map_ops = { const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check, .map_alloc_check = htab_map_alloc_check,
@ -2319,8 +2314,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab_lru_percpu), BATCH_OPS(htab_lru_percpu),
.map_btf_name = "bpf_htab", .map_btf_id = &htab_map_btf_ids[0],
.map_btf_id = &htab_lru_percpu_map_btf_id,
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
@ -2444,7 +2438,6 @@ static void htab_of_map_free(struct bpf_map *map)
fd_htab_map_free(map); fd_htab_map_free(map);
} }
static int htab_of_maps_map_btf_id;
const struct bpf_map_ops htab_of_maps_map_ops = { const struct bpf_map_ops htab_of_maps_map_ops = {
.map_alloc_check = fd_htab_map_alloc_check, .map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc, .map_alloc = htab_of_map_alloc,
@ -2457,6 +2450,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = htab_of_map_gen_lookup, .map_gen_lookup = htab_of_map_gen_lookup,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_htab", .map_btf_id = &htab_map_btf_ids[0],
.map_btf_id = &htab_of_maps_map_btf_id,
}; };

View File

@ -9,6 +9,7 @@
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include <linux/btf_ids.h>
#ifdef CONFIG_CGROUP_BPF #ifdef CONFIG_CGROUP_BPF
@ -446,7 +447,8 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock(); rcu_read_unlock();
} }
static int cgroup_storage_map_btf_id; BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct,
bpf_cgroup_storage_map)
const struct bpf_map_ops cgroup_storage_map_ops = { const struct bpf_map_ops cgroup_storage_map_ops = {
.map_alloc = cgroup_storage_map_alloc, .map_alloc = cgroup_storage_map_alloc,
.map_free = cgroup_storage_map_free, .map_free = cgroup_storage_map_free,
@ -456,8 +458,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_delete_elem = cgroup_storage_delete_elem, .map_delete_elem = cgroup_storage_delete_elem,
.map_check_btf = cgroup_storage_check_btf, .map_check_btf = cgroup_storage_check_btf,
.map_seq_show_elem = cgroup_storage_seq_show_elem, .map_seq_show_elem = cgroup_storage_seq_show_elem,
.map_btf_name = "bpf_cgroup_storage_map", .map_btf_id = &cgroup_storage_map_btf_ids[0],
.map_btf_id = &cgroup_storage_map_btf_id,
}; };
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map) int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)

View File

@ -14,6 +14,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include <linux/btf_ids.h>
/* Intermediate node */ /* Intermediate node */
#define LPM_TREE_NODE_FLAG_IM BIT(0) #define LPM_TREE_NODE_FLAG_IM BIT(0)
@ -719,7 +720,7 @@ static int trie_check_btf(const struct bpf_map *map,
-EINVAL : 0; -EINVAL : 0;
} }
static int trie_map_btf_id; BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
const struct bpf_map_ops trie_map_ops = { const struct bpf_map_ops trie_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = trie_alloc, .map_alloc = trie_alloc,
@ -732,6 +733,5 @@ const struct bpf_map_ops trie_map_ops = {
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_delete_batch = generic_map_delete_batch, .map_delete_batch = generic_map_delete_batch,
.map_check_btf = trie_check_btf, .map_check_btf = trie_check_btf,
.map_btf_name = "lpm_trie", .map_btf_id = &trie_map_btf_ids[0],
.map_btf_id = &trie_map_btf_id,
}; };

View File

@ -8,6 +8,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/btf_ids.h>
#include "percpu_freelist.h" #include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \ #define QUEUE_STACK_CREATE_FLAG_MASK \
@ -247,7 +248,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
return -EINVAL; return -EINVAL;
} }
static int queue_map_btf_id; BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
const struct bpf_map_ops queue_map_ops = { const struct bpf_map_ops queue_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check, .map_alloc_check = queue_stack_map_alloc_check,
@ -260,11 +261,9 @@ const struct bpf_map_ops queue_map_ops = {
.map_pop_elem = queue_map_pop_elem, .map_pop_elem = queue_map_pop_elem,
.map_peek_elem = queue_map_peek_elem, .map_peek_elem = queue_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key, .map_get_next_key = queue_stack_map_get_next_key,
.map_btf_name = "bpf_queue_stack", .map_btf_id = &queue_map_btf_ids[0],
.map_btf_id = &queue_map_btf_id,
}; };
static int stack_map_btf_id;
const struct bpf_map_ops stack_map_ops = { const struct bpf_map_ops stack_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check, .map_alloc_check = queue_stack_map_alloc_check,
@ -277,6 +276,5 @@ const struct bpf_map_ops stack_map_ops = {
.map_pop_elem = stack_map_pop_elem, .map_pop_elem = stack_map_pop_elem,
.map_peek_elem = stack_map_peek_elem, .map_peek_elem = stack_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key, .map_get_next_key = queue_stack_map_get_next_key,
.map_btf_name = "bpf_queue_stack", .map_btf_id = &queue_map_btf_ids[0],
.map_btf_id = &stack_map_btf_id,
}; };

View File

@ -6,6 +6,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/sock_diag.h> #include <linux/sock_diag.h>
#include <net/sock_reuseport.h> #include <net/sock_reuseport.h>
#include <linux/btf_ids.h>
struct reuseport_array { struct reuseport_array {
struct bpf_map map; struct bpf_map map;
@ -337,7 +338,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
return 0; return 0;
} }
static int reuseport_array_map_btf_id; BTF_ID_LIST_SINGLE(reuseport_array_map_btf_ids, struct, reuseport_array)
const struct bpf_map_ops reuseport_array_ops = { const struct bpf_map_ops reuseport_array_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = reuseport_array_alloc_check, .map_alloc_check = reuseport_array_alloc_check,
@ -346,6 +347,5 @@ const struct bpf_map_ops reuseport_array_ops = {
.map_lookup_elem = reuseport_array_lookup_elem, .map_lookup_elem = reuseport_array_lookup_elem,
.map_get_next_key = reuseport_array_get_next_key, .map_get_next_key = reuseport_array_get_next_key,
.map_delete_elem = reuseport_array_delete_elem, .map_delete_elem = reuseport_array_delete_elem,
.map_btf_name = "reuseport_array", .map_btf_id = &reuseport_array_map_btf_ids[0],
.map_btf_id = &reuseport_array_map_btf_id,
}; };

View File

@ -10,6 +10,7 @@
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include <linux/btf_ids.h>
#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE) #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
@ -263,7 +264,7 @@ static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
return 0; return 0;
} }
static int ringbuf_map_btf_id; BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
const struct bpf_map_ops ringbuf_map_ops = { const struct bpf_map_ops ringbuf_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = ringbuf_map_alloc, .map_alloc = ringbuf_map_alloc,
@ -274,8 +275,7 @@ const struct bpf_map_ops ringbuf_map_ops = {
.map_update_elem = ringbuf_map_update_elem, .map_update_elem = ringbuf_map_update_elem,
.map_delete_elem = ringbuf_map_delete_elem, .map_delete_elem = ringbuf_map_delete_elem,
.map_get_next_key = ringbuf_map_get_next_key, .map_get_next_key = ringbuf_map_get_next_key,
.map_btf_name = "bpf_ringbuf_map", .map_btf_id = &ringbuf_map_btf_ids[0],
.map_btf_id = &ringbuf_map_btf_id,
}; };
/* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself, /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,

View File

@ -654,7 +654,7 @@ static void stack_map_free(struct bpf_map *map)
put_callchain_buffers(); put_callchain_buffers();
} }
static int stack_trace_map_btf_id; BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
const struct bpf_map_ops stack_trace_map_ops = { const struct bpf_map_ops stack_trace_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = stack_map_alloc, .map_alloc = stack_map_alloc,
@ -664,6 +664,5 @@ const struct bpf_map_ops stack_trace_map_ops = {
.map_update_elem = stack_map_update_elem, .map_update_elem = stack_map_update_elem,
.map_delete_elem = stack_map_delete_elem, .map_delete_elem = stack_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_stack_map", .map_btf_id = &stack_trace_map_btf_ids[0],
.map_btf_id = &stack_trace_map_btf_id,
}; };

View File

@ -338,7 +338,7 @@ bpf_sk_storage_ptr(void *owner)
return &sk->sk_bpf_storage; return &sk->sk_bpf_storage;
} }
static int sk_storage_map_btf_id; BTF_ID_LIST_SINGLE(sk_storage_map_btf_ids, struct, bpf_local_storage_map)
const struct bpf_map_ops sk_storage_map_ops = { const struct bpf_map_ops sk_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check, .map_alloc_check = bpf_local_storage_map_alloc_check,
@ -349,8 +349,7 @@ const struct bpf_map_ops sk_storage_map_ops = {
.map_update_elem = bpf_fd_sk_storage_update_elem, .map_update_elem = bpf_fd_sk_storage_update_elem,
.map_delete_elem = bpf_fd_sk_storage_delete_elem, .map_delete_elem = bpf_fd_sk_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf, .map_check_btf = bpf_local_storage_map_check_btf,
.map_btf_name = "bpf_local_storage_map", .map_btf_id = &sk_storage_map_btf_ids[0],
.map_btf_id = &sk_storage_map_btf_id,
.map_local_storage_charge = bpf_sk_storage_charge, .map_local_storage_charge = bpf_sk_storage_charge,
.map_local_storage_uncharge = bpf_sk_storage_uncharge, .map_local_storage_uncharge = bpf_sk_storage_uncharge,
.map_owner_storage_ptr = bpf_sk_storage_ptr, .map_owner_storage_ptr = bpf_sk_storage_ptr,

View File

@ -793,7 +793,7 @@ static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_priv_size = sizeof(struct sock_map_seq_info), .seq_priv_size = sizeof(struct sock_map_seq_info),
}; };
static int sock_map_btf_id; BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
const struct bpf_map_ops sock_map_ops = { const struct bpf_map_ops sock_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = sock_map_alloc, .map_alloc = sock_map_alloc,
@ -805,8 +805,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_lookup_elem = sock_map_lookup, .map_lookup_elem = sock_map_lookup,
.map_release_uref = sock_map_release_progs, .map_release_uref = sock_map_release_progs,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_stab", .map_btf_id = &sock_map_btf_ids[0],
.map_btf_id = &sock_map_btf_id,
.iter_seq_info = &sock_map_iter_seq_info, .iter_seq_info = &sock_map_iter_seq_info,
}; };
@ -1385,7 +1384,7 @@ static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_priv_size = sizeof(struct sock_hash_seq_info), .seq_priv_size = sizeof(struct sock_hash_seq_info),
}; };
static int sock_hash_map_btf_id; BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
const struct bpf_map_ops sock_hash_ops = { const struct bpf_map_ops sock_hash_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
.map_alloc = sock_hash_alloc, .map_alloc = sock_hash_alloc,
@ -1397,8 +1396,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_lookup_elem_sys_only = sock_hash_lookup_sys,
.map_release_uref = sock_hash_release_progs, .map_release_uref = sock_hash_release_progs,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_shtab", .map_btf_id = &sock_hash_map_btf_ids[0],
.map_btf_id = &sock_hash_map_btf_id,
.iter_seq_info = &sock_hash_iter_seq_info, .iter_seq_info = &sock_hash_iter_seq_info,
}; };

View File

@ -9,6 +9,7 @@
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/btf_ids.h>
#include "xsk.h" #include "xsk.h"
@ -254,7 +255,7 @@ static bool xsk_map_meta_equal(const struct bpf_map *meta0,
bpf_map_meta_equal(meta0, meta1); bpf_map_meta_equal(meta0, meta1);
} }
static int xsk_map_btf_id; BTF_ID_LIST_SINGLE(xsk_map_btf_ids, struct, xsk_map)
const struct bpf_map_ops xsk_map_ops = { const struct bpf_map_ops xsk_map_ops = {
.map_meta_equal = xsk_map_meta_equal, .map_meta_equal = xsk_map_meta_equal,
.map_alloc = xsk_map_alloc, .map_alloc = xsk_map_alloc,
@ -266,7 +267,6 @@ const struct bpf_map_ops xsk_map_ops = {
.map_update_elem = xsk_map_update_elem, .map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem, .map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_btf_name = "xsk_map", .map_btf_id = &xsk_map_btf_ids[0],
.map_btf_id = &xsk_map_btf_id,
.map_redirect = xsk_map_redirect, .map_redirect = xsk_map_redirect,
}; };