mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-07-13 The following pull-request contains BPF updates for your *net-next* tree. We've added 36 non-merge commits during the last 7 day(s) which contain a total of 62 files changed, 2242 insertions(+), 468 deletions(-). The main changes are: 1) Avoid trace_printk warning banner by switching bpf_trace_printk to use its own tracing event, from Alan. 2) Better libbpf support on older kernels, from Andrii. 3) Additional AF_XDP stats, from Ciara. 4) build time resolution of BTF IDs, from Jiri. 5) BPF_CGROUP_INET_SOCK_RELEASE hook, from Stanislav. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
07dd1b7e68
@ -691,6 +691,42 @@ kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct
|
||||
bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the
|
||||
beginning of section (``btf_ext_info_sec->sec_name_off``).
|
||||
|
||||
4.2 .BTF_ids section
|
||||
====================
|
||||
|
||||
The .BTF_ids section encodes BTF ID values that are used within the kernel.
|
||||
|
||||
This section is created during the kernel compilation with the help of
|
||||
macros defined in ``include/linux/btf_ids.h`` header file. Kernel code can
|
||||
use them to create lists and sets (sorted lists) of BTF ID values.
|
||||
|
||||
The ``BTF_ID_LIST`` and ``BTF_ID`` macros define unsorted list of BTF ID values,
|
||||
with following syntax::
|
||||
|
||||
BTF_ID_LIST(list)
|
||||
BTF_ID(type1, name1)
|
||||
BTF_ID(type2, name2)
|
||||
|
||||
resulting in following layout in .BTF_ids section::
|
||||
|
||||
__BTF_ID__type1__name1__1:
|
||||
.zero 4
|
||||
__BTF_ID__type2__name2__2:
|
||||
.zero 4
|
||||
|
||||
The ``u32 list[];`` variable is defined to access the list.
|
||||
|
||||
The ``BTF_ID_UNUSED`` macro defines 4 zero bytes. It's used when we
|
||||
want to define unused entry in BTF_ID_LIST, like::
|
||||
|
||||
BTF_ID_LIST(bpf_skb_output_btf_ids)
|
||||
BTF_ID(struct, sk_buff)
|
||||
BTF_ID_UNUSED
|
||||
BTF_ID(struct, task_struct)
|
||||
|
||||
All the BTF ID lists and sets are compiled in the .BTF_ids section and
|
||||
resolved during the linking phase of kernel build by ``resolve_btfids`` tool.
|
||||
|
||||
5. Using BTF
|
||||
************
|
||||
|
||||
|
25
Makefile
25
Makefile
@ -448,6 +448,7 @@ OBJSIZE = $(CROSS_COMPILE)size
|
||||
STRIP = $(CROSS_COMPILE)strip
|
||||
endif
|
||||
PAHOLE = pahole
|
||||
RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids
|
||||
LEX = flex
|
||||
YACC = bison
|
||||
AWK = awk
|
||||
@ -510,7 +511,7 @@ GCC_PLUGINS_CFLAGS :=
|
||||
CLANG_FLAGS :=
|
||||
|
||||
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
|
||||
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
|
||||
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ
|
||||
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
|
||||
@ -1053,9 +1054,10 @@ export mod_sign_cmd
|
||||
|
||||
HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
|
||||
|
||||
has_libelf = $(call try-run,\
|
||||
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
|
||||
|
||||
ifdef CONFIG_STACK_VALIDATION
|
||||
has_libelf := $(call try-run,\
|
||||
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
|
||||
ifeq ($(has_libelf),1)
|
||||
objtool_target := tools/objtool FORCE
|
||||
else
|
||||
@ -1064,6 +1066,14 @@ ifdef CONFIG_STACK_VALIDATION
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO_BTF
|
||||
ifeq ($(has_libelf),1)
|
||||
resolve_btfids_target := tools/bpf/resolve_btfids FORCE
|
||||
else
|
||||
ERROR_RESOLVE_BTFIDS := 1
|
||||
endif
|
||||
endif
|
||||
|
||||
PHONY += prepare0
|
||||
|
||||
export MODORDER := $(extmod-prefix)modules.order
|
||||
@ -1175,7 +1185,7 @@ prepare0: archprepare
|
||||
$(Q)$(MAKE) $(build)=.
|
||||
|
||||
# All the preparing..
|
||||
prepare: prepare0 prepare-objtool
|
||||
prepare: prepare0 prepare-objtool prepare-resolve_btfids
|
||||
|
||||
# Support for using generic headers in asm-generic
|
||||
asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj
|
||||
@ -1188,7 +1198,7 @@ uapi-asm-generic:
|
||||
$(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \
|
||||
generic=include/uapi/asm-generic
|
||||
|
||||
PHONY += prepare-objtool
|
||||
PHONY += prepare-objtool prepare-resolve_btfids
|
||||
prepare-objtool: $(objtool_target)
|
||||
ifeq ($(SKIP_STACK_VALIDATION),1)
|
||||
ifdef CONFIG_UNWINDER_ORC
|
||||
@ -1199,6 +1209,11 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
prepare-resolve_btfids: $(resolve_btfids_target)
|
||||
ifeq ($(ERROR_RESOLVE_BTFIDS),1)
|
||||
@echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
|
||||
@false
|
||||
endif
|
||||
# Generate some files
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
@ -641,6 +641,10 @@
|
||||
__start_BTF = .; \
|
||||
*(.BTF) \
|
||||
__stop_BTF = .; \
|
||||
} \
|
||||
. = ALIGN(4); \
|
||||
.BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
|
||||
*(.BTF_ids) \
|
||||
}
|
||||
#else
|
||||
#define BTF
|
||||
|
@ -210,6 +210,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
|
||||
|
||||
@ -401,6 +404,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
|
||||
|
87
include/linux/btf_ids.h
Normal file
87
include/linux/btf_ids.h
Normal file
@ -0,0 +1,87 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _LINUX_BTF_IDS_H
|
||||
#define _LINUX_BTF_IDS_H
|
||||
|
||||
#include <linux/compiler.h> /* for __PASTE */
|
||||
|
||||
/*
|
||||
* Following macros help to define lists of BTF IDs placed
|
||||
* in .BTF_ids section. They are initially filled with zeros
|
||||
* (during compilation) and resolved later during the
|
||||
* linking phase by resolve_btfids tool.
|
||||
*
|
||||
* Any change in list layout must be reflected in resolve_btfids
|
||||
* tool logic.
|
||||
*/
|
||||
|
||||
#define BTF_IDS_SECTION ".BTF_ids"
|
||||
|
||||
#define ____BTF_ID(symbol) \
|
||||
asm( \
|
||||
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
||||
".local " #symbol " ; \n" \
|
||||
".type " #symbol ", @object; \n" \
|
||||
".size " #symbol ", 4; \n" \
|
||||
#symbol ": \n" \
|
||||
".zero 4 \n" \
|
||||
".popsection; \n");
|
||||
|
||||
#define __BTF_ID(symbol) \
|
||||
____BTF_ID(symbol)
|
||||
|
||||
#define __ID(prefix) \
|
||||
__PASTE(prefix, __COUNTER__)
|
||||
|
||||
/*
|
||||
* The BTF_ID defines unique symbol for each ID pointing
|
||||
* to 4 zero bytes.
|
||||
*/
|
||||
#define BTF_ID(prefix, name) \
|
||||
__BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
|
||||
|
||||
/*
|
||||
* The BTF_ID_LIST macro defines pure (unsorted) list
|
||||
* of BTF IDs, with following layout:
|
||||
*
|
||||
* BTF_ID_LIST(list1)
|
||||
* BTF_ID(type1, name1)
|
||||
* BTF_ID(type2, name2)
|
||||
*
|
||||
* list1:
|
||||
* __BTF_ID__type1__name1__1:
|
||||
* .zero 4
|
||||
* __BTF_ID__type2__name2__2:
|
||||
* .zero 4
|
||||
*
|
||||
*/
|
||||
#define __BTF_ID_LIST(name) \
|
||||
asm( \
|
||||
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
||||
".local " #name "; \n" \
|
||||
#name ":; \n" \
|
||||
".popsection; \n"); \
|
||||
|
||||
#define BTF_ID_LIST(name) \
|
||||
__BTF_ID_LIST(name) \
|
||||
extern u32 name[];
|
||||
|
||||
/*
|
||||
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
||||
* It's used when we want to define 'unused' entry
|
||||
* in BTF_ID_LIST, like:
|
||||
*
|
||||
* BTF_ID_LIST(bpf_skb_output_btf_ids)
|
||||
* BTF_ID(struct, sk_buff)
|
||||
* BTF_ID_UNUSED
|
||||
* BTF_ID(struct, task_struct)
|
||||
*/
|
||||
|
||||
#define BTF_ID_UNUSED \
|
||||
asm( \
|
||||
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
||||
".zero 4 \n" \
|
||||
".popsection; \n");
|
||||
|
||||
|
||||
#endif
|
@ -69,7 +69,11 @@ struct xdp_sock {
|
||||
spinlock_t tx_completion_lock;
|
||||
/* Protects generic receive. */
|
||||
spinlock_t rx_lock;
|
||||
|
||||
/* Statistics */
|
||||
u64 rx_dropped;
|
||||
u64 rx_queue_full;
|
||||
|
||||
struct list_head map_list;
|
||||
/* Protects map_list */
|
||||
spinlock_t map_list_lock;
|
||||
|
@ -226,6 +226,7 @@ enum bpf_attach_type {
|
||||
BPF_CGROUP_INET4_GETSOCKNAME,
|
||||
BPF_CGROUP_INET6_GETSOCKNAME,
|
||||
BPF_XDP_DEVMAP,
|
||||
BPF_CGROUP_INET_SOCK_RELEASE,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
@ -73,9 +73,12 @@ struct xdp_umem_reg {
|
||||
};
|
||||
|
||||
struct xdp_statistics {
|
||||
__u64 rx_dropped; /* Dropped for reasons other than invalid desc */
|
||||
__u64 rx_dropped; /* Dropped for other reasons */
|
||||
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 rx_ring_full; /* Dropped due to rx ring being full */
|
||||
__u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
|
||||
__u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
|
||||
};
|
||||
|
||||
struct xdp_options {
|
||||
|
@ -30,6 +30,7 @@ struct xdp_diag_msg {
|
||||
#define XDP_SHOW_RING_CFG (1 << 1)
|
||||
#define XDP_SHOW_UMEM (1 << 2)
|
||||
#define XDP_SHOW_MEMINFO (1 << 3)
|
||||
#define XDP_SHOW_STATS (1 << 4)
|
||||
|
||||
enum {
|
||||
XDP_DIAG_NONE,
|
||||
@ -41,6 +42,7 @@ enum {
|
||||
XDP_DIAG_UMEM_FILL_RING,
|
||||
XDP_DIAG_UMEM_COMPLETION_RING,
|
||||
XDP_DIAG_MEMINFO,
|
||||
XDP_DIAG_STATS,
|
||||
__XDP_DIAG_MAX,
|
||||
};
|
||||
|
||||
@ -69,4 +71,13 @@ struct xdp_diag_umem {
|
||||
__u32 refs;
|
||||
};
|
||||
|
||||
struct xdp_diag_stats {
|
||||
__u64 n_rx_dropped;
|
||||
__u64 n_rx_invalid;
|
||||
__u64 n_rx_full;
|
||||
__u64 n_fill_ring_empty;
|
||||
__u64 n_tx_invalid;
|
||||
__u64 n_tx_ring_empty;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_XDP_DIAG_H */
|
||||
|
103
kernel/bpf/btf.c
103
kernel/bpf/btf.c
@ -18,6 +18,7 @@
|
||||
#include <linux/sort.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/skmsg.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <net/sock.h>
|
||||
@ -3621,12 +3622,15 @@ static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
|
||||
return kern_ctx_type->type;
|
||||
}
|
||||
|
||||
BTF_ID_LIST(bpf_ctx_convert_btf_id)
|
||||
BTF_ID(struct, bpf_ctx_convert)
|
||||
|
||||
struct btf *btf_parse_vmlinux(void)
|
||||
{
|
||||
struct btf_verifier_env *env = NULL;
|
||||
struct bpf_verifier_log *log;
|
||||
struct btf *btf = NULL;
|
||||
int err, btf_id;
|
||||
int err;
|
||||
|
||||
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!env)
|
||||
@ -3659,14 +3663,8 @@ struct btf *btf_parse_vmlinux(void)
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
/* find struct bpf_ctx_convert for type checking later */
|
||||
btf_id = btf_find_by_name_kind(btf, "bpf_ctx_convert", BTF_KIND_STRUCT);
|
||||
if (btf_id < 0) {
|
||||
err = btf_id;
|
||||
goto errout;
|
||||
}
|
||||
/* btf_parse_vmlinux() runs under bpf_verifier_lock */
|
||||
bpf_ctx_convert.t = btf_type_by_id(btf, btf_id);
|
||||
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
|
||||
|
||||
/* find bpf map structs for map_ptr access checking */
|
||||
err = btf_vmlinux_map_ids_init(btf, log);
|
||||
@ -4079,96 +4077,17 @@ error:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
|
||||
int arg)
|
||||
{
|
||||
char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
|
||||
const struct btf_param *args;
|
||||
const struct btf_type *t;
|
||||
const char *tname, *sym;
|
||||
u32 btf_id, i;
|
||||
|
||||
if (IS_ERR(btf_vmlinux)) {
|
||||
bpf_log(log, "btf_vmlinux is malformed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
|
||||
if (!sym) {
|
||||
bpf_log(log, "kernel doesn't have kallsyms\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 1; i <= btf_vmlinux->nr_types; i++) {
|
||||
t = btf_type_by_id(btf_vmlinux, i);
|
||||
if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
|
||||
continue;
|
||||
tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
|
||||
if (!strcmp(tname, fnname))
|
||||
break;
|
||||
}
|
||||
if (i > btf_vmlinux->nr_types) {
|
||||
bpf_log(log, "helper %s type is not found\n", fnname);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
t = btf_type_by_id(btf_vmlinux, t->type);
|
||||
if (!btf_type_is_ptr(t))
|
||||
return -EFAULT;
|
||||
t = btf_type_by_id(btf_vmlinux, t->type);
|
||||
if (!btf_type_is_func_proto(t))
|
||||
return -EFAULT;
|
||||
|
||||
args = (const struct btf_param *)(t + 1);
|
||||
if (arg >= btf_type_vlen(t)) {
|
||||
bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
|
||||
fnname, arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
t = btf_type_by_id(btf_vmlinux, args[arg].type);
|
||||
if (!btf_type_is_ptr(t) || !t->type) {
|
||||
/* anything but the pointer to struct is a helper config bug */
|
||||
bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
btf_id = t->type;
|
||||
t = btf_type_by_id(btf_vmlinux, t->type);
|
||||
/* skip modifiers */
|
||||
while (btf_type_is_modifier(t)) {
|
||||
btf_id = t->type;
|
||||
t = btf_type_by_id(btf_vmlinux, t->type);
|
||||
}
|
||||
if (!btf_type_is_struct(t)) {
|
||||
bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
|
||||
arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
|
||||
return btf_id;
|
||||
}
|
||||
|
||||
int btf_resolve_helper_id(struct bpf_verifier_log *log,
|
||||
const struct bpf_func_proto *fn, int arg)
|
||||
{
|
||||
int *btf_id = &fn->btf_id[arg];
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
|
||||
return -EINVAL;
|
||||
|
||||
ret = READ_ONCE(*btf_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* ok to race the search. The result is the same */
|
||||
ret = __btf_resolve_helper_id(log, fn->func, arg);
|
||||
if (!ret) {
|
||||
/* Function argument cannot be type 'void' */
|
||||
bpf_log(log, "BTF resolution bug\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
WRITE_ONCE(*btf_id, ret);
|
||||
return ret;
|
||||
id = fn->btf_id[arg];
|
||||
if (!id || id > btf_vmlinux->nr_types)
|
||||
return -EINVAL;
|
||||
return id;
|
||||
}
|
||||
|
||||
static int __get_type_size(struct btf *btf, u32 btf_id,
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include "percpu_freelist.h"
|
||||
|
||||
#define STACK_CREATE_FLAG_MASK \
|
||||
@ -576,7 +577,9 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
|
||||
return __bpf_get_stack(regs, task, buf, size, flags);
|
||||
}
|
||||
|
||||
static int bpf_get_task_stack_btf_ids[5];
|
||||
BTF_ID_LIST(bpf_get_task_stack_btf_ids)
|
||||
BTF_ID(struct, task_struct)
|
||||
|
||||
const struct bpf_func_proto bpf_get_task_stack_proto = {
|
||||
.func = bpf_get_task_stack,
|
||||
.gpl_only = false,
|
||||
|
@ -1981,6 +1981,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK:
|
||||
switch (expected_attach_type) {
|
||||
case BPF_CGROUP_INET_SOCK_CREATE:
|
||||
case BPF_CGROUP_INET_SOCK_RELEASE:
|
||||
case BPF_CGROUP_INET4_POST_BIND:
|
||||
case BPF_CGROUP_INET6_POST_BIND:
|
||||
return 0;
|
||||
@ -2779,6 +2780,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
|
||||
return BPF_PROG_TYPE_CGROUP_SKB;
|
||||
break;
|
||||
case BPF_CGROUP_INET_SOCK_CREATE:
|
||||
case BPF_CGROUP_INET_SOCK_RELEASE:
|
||||
case BPF_CGROUP_INET4_POST_BIND:
|
||||
case BPF_CGROUP_INET6_POST_BIND:
|
||||
return BPF_PROG_TYPE_CGROUP_SOCK;
|
||||
@ -2927,6 +2929,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
|
||||
case BPF_CGROUP_INET_INGRESS:
|
||||
case BPF_CGROUP_INET_EGRESS:
|
||||
case BPF_CGROUP_INET_SOCK_CREATE:
|
||||
case BPF_CGROUP_INET_SOCK_RELEASE:
|
||||
case BPF_CGROUP_INET4_BIND:
|
||||
case BPF_CGROUP_INET6_BIND:
|
||||
case BPF_CGROUP_INET4_POST_BIND:
|
||||
|
@ -31,6 +31,8 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE
|
||||
GCOV_PROFILE := y
|
||||
endif
|
||||
|
||||
CFLAGS_bpf_trace.o := -I$(src)
|
||||
|
||||
CFLAGS_trace_benchmark.o := -I$(src)
|
||||
CFLAGS_trace_events_filter.o := -I$(src)
|
||||
|
||||
|
@ -11,14 +11,19 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/error-injection.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#include "trace_probe.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "bpf_trace.h"
|
||||
|
||||
#define bpf_event_rcu_dereference(p) \
|
||||
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
|
||||
|
||||
@ -374,6 +379,30 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(trace_printk_lock);
|
||||
|
||||
#define BPF_TRACE_PRINTK_SIZE 1024
|
||||
|
||||
static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
|
||||
{
|
||||
static char buf[BPF_TRACE_PRINTK_SIZE];
|
||||
unsigned long flags;
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&trace_printk_lock, flags);
|
||||
va_start(ap, fmt);
|
||||
ret = vsnprintf(buf, sizeof(buf), fmt, ap);
|
||||
va_end(ap);
|
||||
/* vsnprintf() will not append null for zero-length strings */
|
||||
if (ret == 0)
|
||||
buf[0] = '\0';
|
||||
trace_bpf_trace_printk(buf);
|
||||
raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only limited trace_printk() conversion specifiers allowed:
|
||||
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
|
||||
@ -483,8 +512,7 @@ fmt_next:
|
||||
*/
|
||||
#define __BPF_TP_EMIT() __BPF_ARG3_TP()
|
||||
#define __BPF_TP(...) \
|
||||
__trace_printk(0 /* Fake ip */, \
|
||||
fmt, ##__VA_ARGS__)
|
||||
bpf_do_trace_printk(fmt, ##__VA_ARGS__)
|
||||
|
||||
#define __BPF_ARG1_TP(...) \
|
||||
((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
|
||||
@ -521,10 +549,15 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
|
||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
|
||||
{
|
||||
/*
|
||||
* this program might be calling bpf_trace_printk,
|
||||
* so allocate per-cpu printk buffers
|
||||
* This program might be calling bpf_trace_printk,
|
||||
* so enable the associated bpf_trace/bpf_trace_printk event.
|
||||
* Repeat this each time as it is possible a user has
|
||||
* disabled bpf_trace_printk events. By loading a program
|
||||
* calling bpf_trace_printk() however the user has expressed
|
||||
* the intent to see such events.
|
||||
*/
|
||||
trace_printk_init_buffers();
|
||||
if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
|
||||
pr_warn_ratelimited("could not enable bpf_trace_printk events");
|
||||
|
||||
return &bpf_trace_printk_proto;
|
||||
}
|
||||
@ -710,7 +743,9 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_seq_printf_btf_ids[5];
|
||||
BTF_ID_LIST(bpf_seq_printf_btf_ids)
|
||||
BTF_ID(struct, seq_file)
|
||||
|
||||
static const struct bpf_func_proto bpf_seq_printf_proto = {
|
||||
.func = bpf_seq_printf,
|
||||
.gpl_only = true,
|
||||
@ -728,7 +763,9 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
|
||||
return seq_write(m, data, len) ? -EOVERFLOW : 0;
|
||||
}
|
||||
|
||||
static int bpf_seq_write_btf_ids[5];
|
||||
BTF_ID_LIST(bpf_seq_write_btf_ids)
|
||||
BTF_ID(struct, seq_file)
|
||||
|
||||
static const struct bpf_func_proto bpf_seq_write_proto = {
|
||||
.func = bpf_seq_write,
|
||||
.gpl_only = true,
|
||||
|
34
kernel/trace/bpf_trace.h
Normal file
34
kernel/trace/bpf_trace.h
Normal file
@ -0,0 +1,34 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM bpf_trace
|
||||
|
||||
#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
|
||||
#define _TRACE_BPF_TRACE_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(bpf_trace_printk,
|
||||
|
||||
TP_PROTO(const char *bpf_string),
|
||||
|
||||
TP_ARGS(bpf_string),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(bpf_string, bpf_string)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(bpf_string, bpf_string);
|
||||
),
|
||||
|
||||
TP_printk("%s", __get_str(bpf_string))
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BPF_TRACE_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE bpf_trace
|
||||
|
||||
#include <trace/define_trace.h>
|
@ -75,6 +75,7 @@
|
||||
#include <net/ipv6_stubs.h>
|
||||
#include <net/bpf_sk_storage.h>
|
||||
#include <net/transp_v6.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
/**
|
||||
* sk_filter_trim_cap - run a packet through a socket filter
|
||||
@ -3779,7 +3780,9 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
static int bpf_skb_output_btf_ids[5];
|
||||
BTF_ID_LIST(bpf_skb_output_btf_ids)
|
||||
BTF_ID(struct, sk_buff)
|
||||
|
||||
const struct bpf_func_proto bpf_skb_output_proto = {
|
||||
.func = bpf_skb_event_output,
|
||||
.gpl_only = true,
|
||||
@ -4173,7 +4176,9 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
static int bpf_xdp_output_btf_ids[5];
|
||||
BTF_ID_LIST(bpf_xdp_output_btf_ids)
|
||||
BTF_ID(struct, xdp_buff)
|
||||
|
||||
const struct bpf_func_proto bpf_xdp_output_proto = {
|
||||
.func = bpf_xdp_event_output,
|
||||
.gpl_only = true,
|
||||
@ -6894,6 +6899,7 @@ static bool __sock_filter_check_attach_type(int off,
|
||||
case offsetof(struct bpf_sock, priority):
|
||||
switch (attach_type) {
|
||||
case BPF_CGROUP_INET_SOCK_CREATE:
|
||||
case BPF_CGROUP_INET_SOCK_RELEASE:
|
||||
goto full_access;
|
||||
default:
|
||||
return false;
|
||||
|
@ -411,6 +411,9 @@ int inet_release(struct socket *sock)
|
||||
if (sk) {
|
||||
long timeout;
|
||||
|
||||
if (!sk->sk_kern_sock)
|
||||
BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
|
||||
|
||||
/* Applications forget to leave groups before exiting */
|
||||
ip_mc_drop_socket(sk);
|
||||
|
||||
|
@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
|
||||
addr = xp_get_handle(xskb);
|
||||
err = xskq_prod_reserve_desc(xs->rx, addr, len);
|
||||
if (err) {
|
||||
xs->rx_dropped++;
|
||||
xs->rx_queue_full++;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
|
||||
if (!xskq_cons_peek_desc(xs->tx, desc, umem))
|
||||
if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
|
||||
xs->tx->queue_empty_descs++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* This is the backpressure mechanism for the Tx path.
|
||||
* Reserve space in the completion queue and only proceed
|
||||
@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||
sent_frame = true;
|
||||
}
|
||||
|
||||
xs->tx->queue_empty_descs++;
|
||||
|
||||
out:
|
||||
if (sent_frame)
|
||||
sk->sk_write_space(sk);
|
||||
@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
|
||||
ring->desc = offsetof(struct xdp_umem_ring, desc);
|
||||
}
|
||||
|
||||
struct xdp_statistics_v1 {
|
||||
__u64 rx_dropped;
|
||||
__u64 rx_invalid_descs;
|
||||
__u64 tx_invalid_descs;
|
||||
};
|
||||
|
||||
static int xsk_getsockopt(struct socket *sock, int level, int optname,
|
||||
char __user *optval, int __user *optlen)
|
||||
{
|
||||
@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
|
||||
case XDP_STATISTICS:
|
||||
{
|
||||
struct xdp_statistics stats;
|
||||
bool extra_stats = true;
|
||||
size_t stats_size;
|
||||
|
||||
if (len < sizeof(stats))
|
||||
if (len < sizeof(struct xdp_statistics_v1)) {
|
||||
return -EINVAL;
|
||||
} else if (len < sizeof(stats)) {
|
||||
extra_stats = false;
|
||||
stats_size = sizeof(struct xdp_statistics_v1);
|
||||
} else {
|
||||
stats_size = sizeof(stats);
|
||||
}
|
||||
|
||||
mutex_lock(&xs->mutex);
|
||||
stats.rx_dropped = xs->rx_dropped;
|
||||
if (extra_stats) {
|
||||
stats.rx_ring_full = xs->rx_queue_full;
|
||||
stats.rx_fill_ring_empty_descs =
|
||||
xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
|
||||
stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
|
||||
} else {
|
||||
stats.rx_dropped += xs->rx_queue_full;
|
||||
}
|
||||
stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
|
||||
stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
|
||||
mutex_unlock(&xs->mutex);
|
||||
|
||||
if (copy_to_user(optval, &stats, sizeof(stats)))
|
||||
if (copy_to_user(optval, &stats, stats_size))
|
||||
return -EFAULT;
|
||||
if (put_user(sizeof(stats), optlen))
|
||||
if (put_user(stats_size, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
@ -189,6 +189,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
|
||||
|
||||
for (;;) {
|
||||
if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
|
||||
pool->fq->queue_empty_descs++;
|
||||
xp_release(xskb);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
|
||||
{
|
||||
struct xdp_diag_stats du = {};
|
||||
|
||||
du.n_rx_dropped = xs->rx_dropped;
|
||||
du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
|
||||
du.n_rx_full = xs->rx_queue_full;
|
||||
du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
|
||||
du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
|
||||
du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
|
||||
return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
|
||||
}
|
||||
|
||||
static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
|
||||
struct xdp_diag_req *req,
|
||||
struct user_namespace *user_ns,
|
||||
@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
|
||||
sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->xdiag_show & XDP_SHOW_STATS) &&
|
||||
xsk_diag_put_stats(xs, nlskb))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
mutex_unlock(&xs->mutex);
|
||||
nlmsg_end(nlskb, nlh);
|
||||
return 0;
|
||||
|
@ -38,6 +38,7 @@ struct xsk_queue {
|
||||
u32 cached_cons;
|
||||
struct xdp_ring *ring;
|
||||
u64 invalid_descs;
|
||||
u64 queue_empty_descs;
|
||||
};
|
||||
|
||||
/* The structure of the shared state of the rings are the same as the
|
||||
@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
|
||||
return q ? q->invalid_descs : 0;
|
||||
}
|
||||
|
||||
static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
|
||||
{
|
||||
return q ? q->queue_empty_descs : 0;
|
||||
}
|
||||
|
||||
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
|
||||
void xskq_destroy(struct xsk_queue *q_ops);
|
||||
|
||||
|
@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
|
||||
tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
|
||||
lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
|
||||
xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
|
||||
test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
|
||||
test_map_in_map-objs := test_map_in_map_user.o
|
||||
per_socket_stats_example-objs := cookie_uid_helper_example.o
|
||||
xdp_redirect-objs := xdp_redirect_user.o
|
||||
xdp_redirect_map-objs := xdp_redirect_map_user.o
|
||||
|
@ -30,6 +30,8 @@
|
||||
#define BPF_M_MAP 1
|
||||
#define BPF_M_PROG 2
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static void usage(void)
|
||||
{
|
||||
printf("Usage: fds_example [...]\n");
|
||||
@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object)
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
struct bpf_object *obj;
|
||||
int prog_fd;
|
||||
|
||||
|
@ -9,95 +9,100 @@
|
||||
#include <linux/version.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_legacy.h"
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "trace_common.h"
|
||||
|
||||
#define MAX_ENTRIES 1000
|
||||
#define MAX_NR_CPUS 1024
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") hash_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
} hash_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, 10000);
|
||||
} lru_hash_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, 10000);
|
||||
__uint(map_flags, BPF_F_NO_COMMON_LRU);
|
||||
} nocommon_lru_hash_map SEC(".maps");
|
||||
|
||||
struct inner_lru {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__uint(map_flags, BPF_F_NUMA_NODE);
|
||||
__uint(numa_node, 0);
|
||||
} inner_lru_hash_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
|
||||
__uint(max_entries, MAX_NR_CPUS);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__array(values, struct inner_lru); /* use inner_lru as inner map */
|
||||
} array_of_lru_hashs SEC(".maps") = {
|
||||
/* statically initialize the first element */
|
||||
.values = { &inner_lru_hash_map },
|
||||
};
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
|
||||
.type = BPF_MAP_TYPE_LRU_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = 10000,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, sizeof(long));
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
} percpu_hash_map SEC(".maps");
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
|
||||
.type = BPF_MAP_TYPE_LRU_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = 10000,
|
||||
.map_flags = BPF_F_NO_COMMON_LRU,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
} hash_map_alloc SEC(".maps");
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
|
||||
.type = BPF_MAP_TYPE_LRU_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
.map_flags = BPF_F_NUMA_NODE,
|
||||
.numa_node = 0,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, sizeof(long));
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
} percpu_hash_map_alloc SEC(".maps");
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
|
||||
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
|
||||
.key_size = sizeof(u32),
|
||||
.max_entries = MAX_NR_CPUS,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
|
||||
__uint(key_size, 8);
|
||||
__uint(value_size, sizeof(long));
|
||||
__uint(max_entries, 10000);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
} lpm_trie_map_alloc SEC(".maps");
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
} array_map SEC(".maps");
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
.map_flags = BPF_F_NO_PREALLOC,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, long);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
} lru_hash_lookup_map SEC(".maps");
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
.map_flags = BPF_F_NO_PREALLOC,
|
||||
};
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
|
||||
.type = BPF_MAP_TYPE_LPM_TRIE,
|
||||
.key_size = 8,
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = 10000,
|
||||
.map_flags = BPF_F_NO_PREALLOC,
|
||||
};
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") array_map = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
};
|
||||
|
||||
struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
|
||||
.type = BPF_MAP_TYPE_LRU_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(long),
|
||||
.max_entries = MAX_ENTRIES,
|
||||
};
|
||||
|
||||
SEC("kprobe/sys_getuid")
|
||||
SEC("kprobe/" SYSCALL(sys_getuid))
|
||||
int stress_hmap(struct pt_regs *ctx)
|
||||
{
|
||||
u32 key = bpf_get_current_pid_tgid();
|
||||
@ -112,7 +117,7 @@ int stress_hmap(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_geteuid")
|
||||
SEC("kprobe/" SYSCALL(sys_geteuid))
|
||||
int stress_percpu_hmap(struct pt_regs *ctx)
|
||||
{
|
||||
u32 key = bpf_get_current_pid_tgid();
|
||||
@ -126,7 +131,7 @@ int stress_percpu_hmap(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_getgid")
|
||||
SEC("kprobe/" SYSCALL(sys_getgid))
|
||||
int stress_hmap_alloc(struct pt_regs *ctx)
|
||||
{
|
||||
u32 key = bpf_get_current_pid_tgid();
|
||||
@ -140,7 +145,7 @@ int stress_hmap_alloc(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_getegid")
|
||||
SEC("kprobe/" SYSCALL(sys_getegid))
|
||||
int stress_percpu_hmap_alloc(struct pt_regs *ctx)
|
||||
{
|
||||
u32 key = bpf_get_current_pid_tgid();
|
||||
@ -154,9 +159,10 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_connect")
|
||||
SEC("kprobe/" SYSCALL(sys_connect))
|
||||
int stress_lru_hmap_alloc(struct pt_regs *ctx)
|
||||
{
|
||||
struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
|
||||
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
|
||||
union {
|
||||
u16 dst6[8];
|
||||
@ -175,8 +181,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx)
|
||||
long val = 1;
|
||||
u32 key = 0;
|
||||
|
||||
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
|
||||
addrlen = (int)PT_REGS_PARM3(ctx);
|
||||
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
|
||||
addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
|
||||
|
||||
if (addrlen != sizeof(*in6))
|
||||
return 0;
|
||||
@ -233,7 +239,7 @@ done:
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_gettid")
|
||||
SEC("kprobe/" SYSCALL(sys_gettid))
|
||||
int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
|
||||
{
|
||||
union {
|
||||
@ -255,7 +261,7 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_getpgid")
|
||||
SEC("kprobe/" SYSCALL(sys_getpgid))
|
||||
int stress_hash_map_lookup(struct pt_regs *ctx)
|
||||
{
|
||||
u32 key = 1, i;
|
||||
@ -268,7 +274,7 @@ int stress_hash_map_lookup(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_getppid")
|
||||
SEC("kprobe/" SYSCALL(sys_getppid))
|
||||
int stress_array_map_lookup(struct pt_regs *ctx)
|
||||
{
|
||||
u32 key = 1, i;
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <sys/wait.h>
|
||||
#include <stdlib.h>
|
||||
#include <signal.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include <sys/resource.h>
|
||||
@ -19,7 +18,7 @@
|
||||
#include <errno.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include "bpf_load.h"
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#define TEST_BIT(t) (1U << (t))
|
||||
#define MAX_NR_CPUS 1024
|
||||
@ -61,12 +60,18 @@ const char *test_map_names[NR_TESTS] = {
|
||||
[LRU_HASH_LOOKUP] = "lru_hash_lookup_map",
|
||||
};
|
||||
|
||||
enum map_idx {
|
||||
array_of_lru_hashs_idx,
|
||||
hash_map_alloc_idx,
|
||||
lru_hash_lookup_idx,
|
||||
NR_IDXES,
|
||||
};
|
||||
|
||||
static int map_fd[NR_IDXES];
|
||||
|
||||
static int test_flags = ~0;
|
||||
static uint32_t num_map_entries;
|
||||
static uint32_t inner_lru_hash_size;
|
||||
static int inner_lru_hash_idx = -1;
|
||||
static int array_of_lru_hashs_idx = -1;
|
||||
static int lru_hash_lookup_idx = -1;
|
||||
static int lru_hash_lookup_test_entries = 32;
|
||||
static uint32_t max_cnt = 1000000;
|
||||
|
||||
@ -122,30 +127,30 @@ static void do_test_lru(enum test_type test, int cpu)
|
||||
__u64 start_time;
|
||||
int i, ret;
|
||||
|
||||
if (test == INNER_LRU_HASH_PREALLOC) {
|
||||
if (test == INNER_LRU_HASH_PREALLOC && cpu) {
|
||||
/* If CPU is not 0, create inner_lru hash map and insert the fd
|
||||
* value into the array_of_lru_hash map. In case of CPU 0,
|
||||
* 'inner_lru_hash_map' was statically inserted on the map init
|
||||
*/
|
||||
int outer_fd = map_fd[array_of_lru_hashs_idx];
|
||||
unsigned int mycpu, mynode;
|
||||
|
||||
assert(cpu < MAX_NR_CPUS);
|
||||
|
||||
if (cpu) {
|
||||
ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL);
|
||||
assert(!ret);
|
||||
ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL);
|
||||
assert(!ret);
|
||||
|
||||
inner_lru_map_fds[cpu] =
|
||||
bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH,
|
||||
test_map_names[INNER_LRU_HASH_PREALLOC],
|
||||
sizeof(uint32_t),
|
||||
sizeof(long),
|
||||
inner_lru_hash_size, 0,
|
||||
mynode);
|
||||
if (inner_lru_map_fds[cpu] == -1) {
|
||||
printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n",
|
||||
strerror(errno), errno);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx];
|
||||
inner_lru_map_fds[cpu] =
|
||||
bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH,
|
||||
test_map_names[INNER_LRU_HASH_PREALLOC],
|
||||
sizeof(uint32_t),
|
||||
sizeof(long),
|
||||
inner_lru_hash_size, 0,
|
||||
mynode);
|
||||
if (inner_lru_map_fds[cpu] == -1) {
|
||||
printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n",
|
||||
strerror(errno), errno);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ret = bpf_map_update_elem(outer_fd, &cpu,
|
||||
@ -377,7 +382,8 @@ static void fill_lpm_trie(void)
|
||||
key->data[1] = rand() & 0xff;
|
||||
key->data[2] = rand() & 0xff;
|
||||
key->data[3] = rand() & 0xff;
|
||||
r = bpf_map_update_elem(map_fd[6], key, &value, 0);
|
||||
r = bpf_map_update_elem(map_fd[hash_map_alloc_idx],
|
||||
key, &value, 0);
|
||||
assert(!r);
|
||||
}
|
||||
|
||||
@ -388,59 +394,52 @@ static void fill_lpm_trie(void)
|
||||
key->data[3] = 1;
|
||||
value = 128;
|
||||
|
||||
r = bpf_map_update_elem(map_fd[6], key, &value, 0);
|
||||
r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0);
|
||||
assert(!r);
|
||||
}
|
||||
|
||||
static void fixup_map(struct bpf_map_data *map, int idx)
|
||||
static void fixup_map(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
int i;
|
||||
|
||||
if (!strcmp("inner_lru_hash_map", map->name)) {
|
||||
inner_lru_hash_idx = idx;
|
||||
inner_lru_hash_size = map->def.max_entries;
|
||||
}
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char *name = bpf_map__name(map);
|
||||
|
||||
if (!strcmp("array_of_lru_hashs", map->name)) {
|
||||
if (inner_lru_hash_idx == -1) {
|
||||
printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n");
|
||||
exit(1);
|
||||
/* Only change the max_entries for the enabled test(s) */
|
||||
for (i = 0; i < NR_TESTS; i++) {
|
||||
if (!strcmp(test_map_names[i], name) &&
|
||||
(check_test_flags(i))) {
|
||||
bpf_map__resize(map, num_map_entries);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
map->def.inner_map_idx = inner_lru_hash_idx;
|
||||
array_of_lru_hashs_idx = idx;
|
||||
}
|
||||
|
||||
if (!strcmp("lru_hash_lookup_map", map->name))
|
||||
lru_hash_lookup_idx = idx;
|
||||
|
||||
if (num_map_entries <= 0)
|
||||
return;
|
||||
|
||||
inner_lru_hash_size = num_map_entries;
|
||||
|
||||
/* Only change the max_entries for the enabled test(s) */
|
||||
for (i = 0; i < NR_TESTS; i++) {
|
||||
if (!strcmp(test_map_names[i], map->name) &&
|
||||
(check_test_flags(i))) {
|
||||
map->def.max_entries = num_map_entries;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
|
||||
int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
struct bpf_link *links[8];
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_map *map;
|
||||
char filename[256];
|
||||
int num_cpu = 8;
|
||||
int i = 0;
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
setrlimit(RLIMIT_MEMLOCK, &r);
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
|
||||
perror("setrlimit(RLIMIT_MEMLOCK)");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (argc > 1)
|
||||
test_flags = atoi(argv[1]) ? : test_flags;
|
||||
|
||||
if (argc > 2)
|
||||
num_cpu = atoi(argv[2]) ? : num_cpu;
|
||||
nr_cpus = atoi(argv[2]) ? : nr_cpus;
|
||||
|
||||
if (argc > 3)
|
||||
num_map_entries = atoi(argv[3]);
|
||||
@ -448,14 +447,61 @@ int main(int argc, char **argv)
|
||||
if (argc > 4)
|
||||
max_cnt = atoi(argv[4]);
|
||||
|
||||
if (load_bpf_file_fixup_map(filename, fixup_map)) {
|
||||
printf("%s", bpf_log_buf);
|
||||
return 1;
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj)) {
|
||||
fprintf(stderr, "ERROR: opening BPF object file failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map");
|
||||
if (libbpf_get_error(map)) {
|
||||
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
inner_lru_hash_size = bpf_map__max_entries(map);
|
||||
if (!inner_lru_hash_size) {
|
||||
fprintf(stderr, "ERROR: failed to get map attribute\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* resize BPF map prior to loading */
|
||||
if (num_map_entries > 0)
|
||||
fixup_map(obj);
|
||||
|
||||
/* load BPF program */
|
||||
if (bpf_object__load(obj)) {
|
||||
fprintf(stderr, "ERROR: loading BPF object file failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs");
|
||||
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc");
|
||||
map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map");
|
||||
if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) {
|
||||
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
links[i] = bpf_program__attach(prog);
|
||||
if (libbpf_get_error(links[i])) {
|
||||
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
|
||||
links[i] = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
fill_lpm_trie();
|
||||
|
||||
run_perf_test(num_cpu);
|
||||
run_perf_test(nr_cpus);
|
||||
|
||||
cleanup:
|
||||
for (i--; i >= 0; i--)
|
||||
bpf_link__destroy(links[i]);
|
||||
|
||||
bpf_object__close(obj);
|
||||
return 0;
|
||||
}
|
||||
|
@ -11,66 +11,67 @@
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <uapi/linux/in6.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_legacy.h"
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "trace_common.h"
|
||||
|
||||
#define MAX_NR_PORTS 65536
|
||||
|
||||
/* map #0 */
|
||||
struct bpf_map_def_legacy SEC("maps") port_a = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = MAX_NR_PORTS,
|
||||
};
|
||||
struct inner_a {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__type(key, u32);
|
||||
__type(value, int);
|
||||
__uint(max_entries, MAX_NR_PORTS);
|
||||
} port_a SEC(".maps");
|
||||
|
||||
/* map #1 */
|
||||
struct bpf_map_def_legacy SEC("maps") port_h = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct inner_h {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, int);
|
||||
__uint(max_entries, 1);
|
||||
} port_h SEC(".maps");
|
||||
|
||||
/* map #2 */
|
||||
struct bpf_map_def_legacy SEC("maps") reg_result_h = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, int);
|
||||
__uint(max_entries, 1);
|
||||
} reg_result_h SEC(".maps");
|
||||
|
||||
/* map #3 */
|
||||
struct bpf_map_def_legacy SEC("maps") inline_result_h = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, int);
|
||||
__uint(max_entries, 1);
|
||||
} inline_result_h SEC(".maps");
|
||||
|
||||
/* map #4 */ /* Test case #0 */
|
||||
struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
|
||||
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
|
||||
.key_size = sizeof(u32),
|
||||
.inner_map_idx = 0, /* map_fd[0] is port_a */
|
||||
.max_entries = MAX_NR_PORTS,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
|
||||
__uint(max_entries, MAX_NR_PORTS);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__array(values, struct inner_a); /* use inner_a as inner map */
|
||||
} a_of_port_a SEC(".maps");
|
||||
|
||||
/* map #5 */ /* Test case #1 */
|
||||
struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
|
||||
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
|
||||
.key_size = sizeof(u32),
|
||||
.inner_map_idx = 0, /* map_fd[0] is port_a */
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
|
||||
__uint(max_entries, 1);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__array(values, struct inner_a); /* use inner_a as inner map */
|
||||
} h_of_port_a SEC(".maps");
|
||||
|
||||
/* map #6 */ /* Test case #2 */
|
||||
struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
|
||||
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
|
||||
.key_size = sizeof(u32),
|
||||
.inner_map_idx = 1, /* map_fd[1] is port_h */
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
|
||||
__uint(max_entries, 1);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__array(values, struct inner_h); /* use inner_h as inner map */
|
||||
} h_of_port_h SEC(".maps");
|
||||
|
||||
static __always_inline int do_reg_lookup(void *inner_map, u32 port)
|
||||
{
|
||||
@ -102,9 +103,10 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
|
||||
return result ? *result : -ENOENT;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_connect")
|
||||
SEC("kprobe/" SYSCALL(sys_connect))
|
||||
int trace_sys_connect(struct pt_regs *ctx)
|
||||
{
|
||||
struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
|
||||
struct sockaddr_in6 *in6;
|
||||
u16 test_case, port, dst6[8];
|
||||
int addrlen, ret, inline_ret, ret_key = 0;
|
||||
@ -112,8 +114,8 @@ int trace_sys_connect(struct pt_regs *ctx)
|
||||
void *outer_map, *inner_map;
|
||||
bool inline_hash = false;
|
||||
|
||||
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
|
||||
addrlen = (int)PT_REGS_PARM3(ctx);
|
||||
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
|
||||
addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
|
||||
|
||||
if (addrlen != sizeof(*in6))
|
||||
return 0;
|
||||
|
@ -11,7 +11,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include "bpf_load.h"
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
static int map_fd[7];
|
||||
|
||||
#define PORT_A (map_fd[0])
|
||||
#define PORT_H (map_fd[1])
|
||||
@ -113,18 +115,59 @@ static void test_map_in_map(void)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
|
||||
struct bpf_link *link = NULL;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
char filename[256];
|
||||
|
||||
assert(!setrlimit(RLIMIT_MEMLOCK, &r));
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
|
||||
perror("setrlimit(RLIMIT_MEMLOCK)");
|
||||
return 1;
|
||||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj)) {
|
||||
fprintf(stderr, "ERROR: opening BPF object file failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (load_bpf_file(filename)) {
|
||||
printf("%s", bpf_log_buf);
|
||||
return 1;
|
||||
prog = bpf_object__find_program_by_name(obj, "trace_sys_connect");
|
||||
if (!prog) {
|
||||
printf("finding a prog in obj file failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* load BPF program */
|
||||
if (bpf_object__load(obj)) {
|
||||
fprintf(stderr, "ERROR: loading BPF object file failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a");
|
||||
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h");
|
||||
map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h");
|
||||
map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h");
|
||||
map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a");
|
||||
map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a");
|
||||
map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h");
|
||||
if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 ||
|
||||
map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) {
|
||||
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
link = bpf_program__attach(prog);
|
||||
if (libbpf_get_error(link)) {
|
||||
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
|
||||
link = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
test_map_in_map();
|
||||
|
||||
cleanup:
|
||||
bpf_link__destroy(link);
|
||||
bpf_object__close(obj);
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <linux/version.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "trace_common.h"
|
||||
|
||||
struct bpf_map_def SEC("maps") dnat_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
@ -26,13 +28,14 @@ struct bpf_map_def SEC("maps") dnat_map = {
|
||||
* This example sits on a syscall, and the syscall ABI is relatively stable
|
||||
* of course, across platforms, and over time, the ABI may change.
|
||||
*/
|
||||
SEC("kprobe/sys_connect")
|
||||
SEC("kprobe/" SYSCALL(sys_connect))
|
||||
int bpf_prog1(struct pt_regs *ctx)
|
||||
{
|
||||
struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
|
||||
void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs);
|
||||
int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs);
|
||||
struct sockaddr_in new_addr, orig_addr = {};
|
||||
struct sockaddr_in *mapped_addr;
|
||||
void *sockaddr_arg = (void *)PT_REGS_PARM2(ctx);
|
||||
int sockaddr_len = (int)PT_REGS_PARM3(ctx);
|
||||
|
||||
if (sockaddr_len > sizeof(orig_addr))
|
||||
return 0;
|
||||
|
@ -77,6 +77,7 @@ static u32 opt_batch_size = 64;
|
||||
static int opt_pkt_count;
|
||||
static u16 opt_pkt_size = MIN_PKT_SIZE;
|
||||
static u32 opt_pkt_fill_pattern = 0x12345678;
|
||||
static bool opt_extra_stats;
|
||||
static int opt_poll;
|
||||
static int opt_interval = 1;
|
||||
static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
|
||||
@ -103,8 +104,20 @@ struct xsk_socket_info {
|
||||
struct xsk_socket *xsk;
|
||||
unsigned long rx_npkts;
|
||||
unsigned long tx_npkts;
|
||||
unsigned long rx_dropped_npkts;
|
||||
unsigned long rx_invalid_npkts;
|
||||
unsigned long tx_invalid_npkts;
|
||||
unsigned long rx_full_npkts;
|
||||
unsigned long rx_fill_empty_npkts;
|
||||
unsigned long tx_empty_npkts;
|
||||
unsigned long prev_rx_npkts;
|
||||
unsigned long prev_tx_npkts;
|
||||
unsigned long prev_rx_dropped_npkts;
|
||||
unsigned long prev_rx_invalid_npkts;
|
||||
unsigned long prev_tx_invalid_npkts;
|
||||
unsigned long prev_rx_full_npkts;
|
||||
unsigned long prev_rx_fill_empty_npkts;
|
||||
unsigned long prev_tx_empty_npkts;
|
||||
u32 outstanding_tx;
|
||||
};
|
||||
|
||||
@ -147,6 +160,30 @@ static void print_benchmark(bool running)
|
||||
}
|
||||
}
|
||||
|
||||
static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
|
||||
{
|
||||
struct xdp_statistics stats;
|
||||
socklen_t optlen;
|
||||
int err;
|
||||
|
||||
optlen = sizeof(stats);
|
||||
err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (optlen == sizeof(struct xdp_statistics)) {
|
||||
xsk->rx_dropped_npkts = stats.rx_dropped;
|
||||
xsk->rx_invalid_npkts = stats.rx_invalid_descs;
|
||||
xsk->tx_invalid_npkts = stats.tx_invalid_descs;
|
||||
xsk->rx_full_npkts = stats.rx_ring_full;
|
||||
xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
|
||||
xsk->tx_empty_npkts = stats.tx_ring_empty_descs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void dump_stats(void)
|
||||
{
|
||||
unsigned long now = get_nsecs();
|
||||
@ -157,7 +194,8 @@ static void dump_stats(void)
|
||||
|
||||
for (i = 0; i < num_socks && xsks[i]; i++) {
|
||||
char *fmt = "%-15s %'-11.0f %'-11lu\n";
|
||||
double rx_pps, tx_pps;
|
||||
double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
|
||||
tx_invalid_pps, tx_empty_pps;
|
||||
|
||||
rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
|
||||
1000000000. / dt;
|
||||
@ -175,6 +213,46 @@ static void dump_stats(void)
|
||||
|
||||
xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
|
||||
xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
|
||||
|
||||
if (opt_extra_stats) {
|
||||
if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
|
||||
dropped_pps = (xsks[i]->rx_dropped_npkts -
|
||||
xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt;
|
||||
rx_invalid_pps = (xsks[i]->rx_invalid_npkts -
|
||||
xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt;
|
||||
tx_invalid_pps = (xsks[i]->tx_invalid_npkts -
|
||||
xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt;
|
||||
full_pps = (xsks[i]->rx_full_npkts -
|
||||
xsks[i]->prev_rx_full_npkts) * 1000000000. / dt;
|
||||
fill_empty_pps = (xsks[i]->rx_fill_empty_npkts -
|
||||
xsks[i]->prev_rx_fill_empty_npkts)
|
||||
* 1000000000. / dt;
|
||||
tx_empty_pps = (xsks[i]->tx_empty_npkts -
|
||||
xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt;
|
||||
|
||||
printf(fmt, "rx dropped", dropped_pps,
|
||||
xsks[i]->rx_dropped_npkts);
|
||||
printf(fmt, "rx invalid", rx_invalid_pps,
|
||||
xsks[i]->rx_invalid_npkts);
|
||||
printf(fmt, "tx invalid", tx_invalid_pps,
|
||||
xsks[i]->tx_invalid_npkts);
|
||||
printf(fmt, "rx queue full", full_pps,
|
||||
xsks[i]->rx_full_npkts);
|
||||
printf(fmt, "fill ring empty", fill_empty_pps,
|
||||
xsks[i]->rx_fill_empty_npkts);
|
||||
printf(fmt, "tx ring empty", tx_empty_pps,
|
||||
xsks[i]->tx_empty_npkts);
|
||||
|
||||
xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts;
|
||||
xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts;
|
||||
xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts;
|
||||
xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts;
|
||||
xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts;
|
||||
xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts;
|
||||
} else {
|
||||
printf("%-15s\n", "Error retrieving extra stats");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -630,6 +708,7 @@ static struct option long_options[] = {
|
||||
{"tx-pkt-count", required_argument, 0, 'C'},
|
||||
{"tx-pkt-size", required_argument, 0, 's'},
|
||||
{"tx-pkt-pattern", required_argument, 0, 'P'},
|
||||
{"extra-stats", no_argument, 0, 'x'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
@ -664,6 +743,7 @@ static void usage(const char *prog)
|
||||
" (Default: %d bytes)\n"
|
||||
" Min size: %d, Max size %d.\n"
|
||||
" -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
|
||||
" -x, --extra-stats Display extra statistics.\n"
|
||||
"\n";
|
||||
fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
|
||||
opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
|
||||
@ -679,7 +759,7 @@ static void parse_command_line(int argc, char **argv)
|
||||
opterr = 0;
|
||||
|
||||
for (;;) {
|
||||
c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:",
|
||||
c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x",
|
||||
long_options, &option_index);
|
||||
if (c == -1)
|
||||
break;
|
||||
@ -760,6 +840,9 @@ static void parse_command_line(int argc, char **argv)
|
||||
case 'P':
|
||||
opt_pkt_fill_pattern = strtol(optarg, NULL, 16);
|
||||
break;
|
||||
case 'x':
|
||||
opt_extra_stats = 1;
|
||||
break;
|
||||
default:
|
||||
usage(basename(argv[0]));
|
||||
}
|
||||
|
@ -336,6 +336,12 @@ fi
|
||||
|
||||
vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
|
||||
|
||||
# fill in BTF IDs
|
||||
if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
|
||||
info BTFIDS vmlinux
|
||||
${RESOLVE_BTFIDS} vmlinux
|
||||
fi
|
||||
|
||||
if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then
|
||||
info SORTTAB vmlinux
|
||||
if ! sorttable vmlinux; then
|
||||
|
@ -67,6 +67,9 @@ cpupower: FORCE
|
||||
cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
|
||||
$(call descend,$@)
|
||||
|
||||
bpf/%: FORCE
|
||||
$(call descend,$@)
|
||||
|
||||
liblockdep: FORCE
|
||||
$(call descend,lib/lockdep)
|
||||
|
||||
|
@ -123,5 +123,12 @@ runqslower_install:
|
||||
runqslower_clean:
|
||||
$(call descend,runqslower,clean)
|
||||
|
||||
resolve_btfids:
|
||||
$(call descend,resolve_btfids)
|
||||
|
||||
resolve_btfids_clean:
|
||||
$(call descend,resolve_btfids,clean)
|
||||
|
||||
.PHONY: all install clean bpftool bpftool_install bpftool_clean \
|
||||
runqslower runqslower_install runqslower_clean
|
||||
runqslower runqslower_install runqslower_clean \
|
||||
resolve_btfids resolve_btfids_clean
|
||||
|
@ -33,6 +33,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
|
||||
[BPF_CGROUP_INET_INGRESS] = "ingress",
|
||||
[BPF_CGROUP_INET_EGRESS] = "egress",
|
||||
[BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
|
||||
[BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release",
|
||||
[BPF_CGROUP_SOCK_OPS] = "sock_ops",
|
||||
[BPF_CGROUP_DEVICE] = "device",
|
||||
[BPF_CGROUP_INET4_BIND] = "bind4",
|
||||
|
@ -88,7 +88,7 @@ static const char *get_map_ident(const struct bpf_map *map)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args)
|
||||
static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
|
||||
{
|
||||
vprintf(fmt, args);
|
||||
}
|
||||
@ -104,17 +104,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
|
||||
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
|
||||
const char *sec_ident;
|
||||
char var_ident[256];
|
||||
bool strip_mods = false;
|
||||
|
||||
if (strcmp(sec_name, ".data") == 0)
|
||||
if (strcmp(sec_name, ".data") == 0) {
|
||||
sec_ident = "data";
|
||||
else if (strcmp(sec_name, ".bss") == 0)
|
||||
} else if (strcmp(sec_name, ".bss") == 0) {
|
||||
sec_ident = "bss";
|
||||
else if (strcmp(sec_name, ".rodata") == 0)
|
||||
} else if (strcmp(sec_name, ".rodata") == 0) {
|
||||
sec_ident = "rodata";
|
||||
else if (strcmp(sec_name, ".kconfig") == 0)
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".kconfig") == 0) {
|
||||
sec_ident = "kconfig";
|
||||
else
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
printf(" struct %s__%s {\n", obj_name, sec_ident);
|
||||
for (i = 0; i < vlen; i++, sec_var++) {
|
||||
@ -123,16 +126,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
|
||||
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
|
||||
.field_name = var_ident,
|
||||
.indent_level = 2,
|
||||
.strip_mods = strip_mods,
|
||||
);
|
||||
int need_off = sec_var->offset, align_off, align;
|
||||
__u32 var_type_id = var->type;
|
||||
const struct btf_type *t;
|
||||
|
||||
t = btf__type_by_id(btf, var_type_id);
|
||||
while (btf_is_mod(t)) {
|
||||
var_type_id = t->type;
|
||||
t = btf__type_by_id(btf, var_type_id);
|
||||
}
|
||||
|
||||
if (off > need_off) {
|
||||
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
|
||||
|
@ -15,11 +15,11 @@
|
||||
|
||||
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
{
|
||||
p_err("bpftool built without PID iterator support");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
void delete_obj_refs_table(struct obj_refs_table *table) {}
|
||||
void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {}
|
||||
void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {}
|
||||
|
||||
#else /* BPFTOOL_WITHOUT_SKELETONS */
|
||||
|
||||
|
10
tools/bpf/resolve_btfids/Build
Normal file
10
tools/bpf/resolve_btfids/Build
Normal file
@ -0,0 +1,10 @@
|
||||
resolve_btfids-y += main.o
|
||||
resolve_btfids-y += rbtree.o
|
||||
resolve_btfids-y += zalloc.o
|
||||
resolve_btfids-y += string.o
|
||||
resolve_btfids-y += ctype.o
|
||||
resolve_btfids-y += str_error_r.o
|
||||
|
||||
$(OUTPUT)%.o: ../../lib/%.c FORCE
|
||||
$(call rule_mkdir)
|
||||
$(call if_changed_dep,cc_o_c)
|
77
tools/bpf/resolve_btfids/Makefile
Normal file
77
tools/bpf/resolve_btfids/Makefile
Normal file
@ -0,0 +1,77 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
include ../../scripts/Makefile.include
|
||||
|
||||
ifeq ($(srctree),)
|
||||
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
endif
|
||||
|
||||
ifeq ($(V),1)
|
||||
Q =
|
||||
msg =
|
||||
else
|
||||
Q = @
|
||||
msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
|
||||
MAKEFLAGS=--no-print-directory
|
||||
endif
|
||||
|
||||
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
|
||||
|
||||
LIBBPF_SRC := $(srctree)/tools/lib/bpf/
|
||||
SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
|
||||
|
||||
BPFOBJ := $(OUTPUT)/libbpf.a
|
||||
SUBCMDOBJ := $(OUTPUT)/libsubcmd.a
|
||||
|
||||
BINARY := $(OUTPUT)/resolve_btfids
|
||||
BINARY_IN := $(BINARY)-in.o
|
||||
|
||||
all: $(BINARY)
|
||||
|
||||
$(OUTPUT):
|
||||
$(call msg,MKDIR,,$@)
|
||||
$(Q)mkdir -p $(OUTPUT)
|
||||
|
||||
$(SUBCMDOBJ): fixdep FORCE
|
||||
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT)
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
|
||||
CFLAGS := -g \
|
||||
-I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/include/uapi \
|
||||
-I$(LIBBPF_SRC) \
|
||||
-I$(SUBCMD_SRC)
|
||||
|
||||
LIBS = -lelf -lz
|
||||
|
||||
export srctree OUTPUT CFLAGS Q
|
||||
include $(srctree)/tools/build/Makefile.include
|
||||
|
||||
$(BINARY_IN): fixdep FORCE
|
||||
$(Q)$(MAKE) $(build)=resolve_btfids
|
||||
|
||||
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
|
||||
$(call msg,LINK,$@)
|
||||
$(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
|
||||
|
||||
libsubcmd-clean:
|
||||
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean
|
||||
|
||||
libbpf-clean:
|
||||
$(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean
|
||||
|
||||
clean: libsubcmd-clean libbpf-clean fixdep-clean
|
||||
$(call msg,CLEAN,$(BINARY))
|
||||
$(Q)$(RM) -f $(BINARY); \
|
||||
find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
|
||||
|
||||
tags:
|
||||
$(call msg,GEN,,tags)
|
||||
$(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC)
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: all FORCE clean tags
|
721
tools/bpf/resolve_btfids/main.c
Normal file
721
tools/bpf/resolve_btfids/main.c
Normal file
@ -0,0 +1,721 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
/*
|
||||
* resolve_btfids scans Elf object for .BTF_ids section and resolves
|
||||
* its symbols with BTF ID values.
|
||||
*
|
||||
* Each symbol points to 4 bytes data and is expected to have
|
||||
* following name syntax:
|
||||
*
|
||||
* __BTF_ID__<type>__<symbol>[__<id>]
|
||||
*
|
||||
* type is:
|
||||
*
|
||||
* func - lookup BTF_KIND_FUNC symbol with <symbol> name
|
||||
* and store its ID into the data:
|
||||
*
|
||||
* __BTF_ID__func__vfs_close__1:
|
||||
* .zero 4
|
||||
*
|
||||
* struct - lookup BTF_KIND_STRUCT symbol with <symbol> name
|
||||
* and store its ID into the data:
|
||||
*
|
||||
* __BTF_ID__struct__sk_buff__1:
|
||||
* .zero 4
|
||||
*
|
||||
* union - lookup BTF_KIND_UNION symbol with <symbol> name
|
||||
* and store its ID into the data:
|
||||
*
|
||||
* __BTF_ID__union__thread_union__1:
|
||||
* .zero 4
|
||||
*
|
||||
* typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name
|
||||
* and store its ID into the data:
|
||||
*
|
||||
* __BTF_ID__typedef__pid_t__1:
|
||||
* .zero 4
|
||||
*
|
||||
* set - store symbol size into first 4 bytes and sort following
|
||||
* ID list
|
||||
*
|
||||
* __BTF_ID__set__list:
|
||||
* .zero 4
|
||||
* list:
|
||||
* __BTF_ID__func__vfs_getattr__3:
|
||||
* .zero 4
|
||||
* __BTF_ID__func__vfs_fallocate__4:
|
||||
* .zero 4
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <libelf.h>
|
||||
#include <gelf.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <linux/err.h>
|
||||
#include <btf.h>
|
||||
#include <libbpf.h>
|
||||
#include <parse-options.h>
|
||||
|
||||
#define BTF_IDS_SECTION ".BTF_ids"
|
||||
#define BTF_ID "__BTF_ID__"
|
||||
|
||||
#define BTF_STRUCT "struct"
|
||||
#define BTF_UNION "union"
|
||||
#define BTF_TYPEDEF "typedef"
|
||||
#define BTF_FUNC "func"
|
||||
#define BTF_SET "set"
|
||||
|
||||
#define ADDR_CNT 100
|
||||
|
||||
struct btf_id {
|
||||
struct rb_node rb_node;
|
||||
char *name;
|
||||
union {
|
||||
int id;
|
||||
int cnt;
|
||||
};
|
||||
int addr_cnt;
|
||||
Elf64_Addr addr[ADDR_CNT];
|
||||
};
|
||||
|
||||
struct object {
|
||||
const char *path;
|
||||
const char *btf;
|
||||
|
||||
struct {
|
||||
int fd;
|
||||
Elf *elf;
|
||||
Elf_Data *symbols;
|
||||
Elf_Data *idlist;
|
||||
int symbols_shndx;
|
||||
int idlist_shndx;
|
||||
size_t strtabidx;
|
||||
unsigned long idlist_addr;
|
||||
} efile;
|
||||
|
||||
struct rb_root sets;
|
||||
struct rb_root structs;
|
||||
struct rb_root unions;
|
||||
struct rb_root typedefs;
|
||||
struct rb_root funcs;
|
||||
|
||||
int nr_funcs;
|
||||
int nr_structs;
|
||||
int nr_unions;
|
||||
int nr_typedefs;
|
||||
};
|
||||
|
||||
static int verbose;
|
||||
|
||||
int eprintf(int level, int var, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int ret;
|
||||
|
||||
if (var >= level) {
|
||||
va_start(args, fmt);
|
||||
ret = vfprintf(stderr, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
#endif
|
||||
|
||||
#define pr_debug(fmt, ...) \
|
||||
eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#define pr_debugN(n, fmt, ...) \
|
||||
eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#define pr_err(fmt, ...) \
|
||||
eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
static bool is_btf_id(const char *name)
|
||||
{
|
||||
return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
|
||||
}
|
||||
|
||||
static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
|
||||
{
|
||||
struct rb_node *p = root->rb_node;
|
||||
struct btf_id *id;
|
||||
int cmp;
|
||||
|
||||
while (p) {
|
||||
id = rb_entry(p, struct btf_id, rb_node);
|
||||
cmp = strcmp(id->name, name);
|
||||
if (cmp < 0)
|
||||
p = p->rb_left;
|
||||
else if (cmp > 0)
|
||||
p = p->rb_right;
|
||||
else
|
||||
return id;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct btf_id*
|
||||
btf_id__add(struct rb_root *root, char *name, bool unique)
|
||||
{
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct btf_id *id;
|
||||
int cmp;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
id = rb_entry(parent, struct btf_id, rb_node);
|
||||
cmp = strcmp(id->name, name);
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (cmp > 0)
|
||||
p = &(*p)->rb_right;
|
||||
else
|
||||
return unique ? NULL : id;
|
||||
}
|
||||
|
||||
id = zalloc(sizeof(*id));
|
||||
if (id) {
|
||||
pr_debug("adding symbol %s\n", name);
|
||||
id->name = name;
|
||||
rb_link_node(&id->rb_node, parent, p);
|
||||
rb_insert_color(&id->rb_node, root);
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
static char *get_id(const char *prefix_end)
|
||||
{
|
||||
/*
|
||||
* __BTF_ID__func__vfs_truncate__0
|
||||
* prefix_end = ^
|
||||
*/
|
||||
char *p, *id = strdup(prefix_end + sizeof("__") - 1);
|
||||
|
||||
if (id) {
|
||||
/*
|
||||
* __BTF_ID__func__vfs_truncate__0
|
||||
* id = ^
|
||||
*
|
||||
* cut the unique id part
|
||||
*/
|
||||
p = strrchr(id, '_');
|
||||
p--;
|
||||
if (*p != '_') {
|
||||
free(id);
|
||||
return NULL;
|
||||
}
|
||||
*p = '\0';
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
|
||||
{
|
||||
char *id;
|
||||
|
||||
id = get_id(name + size);
|
||||
if (!id) {
|
||||
pr_err("FAILED to parse symbol name: %s\n", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return btf_id__add(root, id, false);
|
||||
}
|
||||
|
||||
static int elf_collect(struct object *obj)
|
||||
{
|
||||
Elf_Scn *scn = NULL;
|
||||
size_t shdrstrndx;
|
||||
int idx = 0;
|
||||
Elf *elf;
|
||||
int fd;
|
||||
|
||||
fd = open(obj->path, O_RDWR, 0666);
|
||||
if (fd == -1) {
|
||||
pr_err("FAILED cannot open %s: %s\n",
|
||||
obj->path, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
elf_version(EV_CURRENT);
|
||||
|
||||
elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL);
|
||||
if (!elf) {
|
||||
pr_err("FAILED cannot create ELF descriptor: %s\n",
|
||||
elf_errmsg(-1));
|
||||
return -1;
|
||||
}
|
||||
|
||||
obj->efile.fd = fd;
|
||||
obj->efile.elf = elf;
|
||||
|
||||
elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT);
|
||||
|
||||
if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) {
|
||||
pr_err("FAILED cannot get shdr str ndx\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan all the elf sections and look for save data
|
||||
* from .BTF_ids section and symbols.
|
||||
*/
|
||||
while ((scn = elf_nextscn(elf, scn)) != NULL) {
|
||||
Elf_Data *data;
|
||||
GElf_Shdr sh;
|
||||
char *name;
|
||||
|
||||
idx++;
|
||||
if (gelf_getshdr(scn, &sh) != &sh) {
|
||||
pr_err("FAILED get section(%d) header\n", idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
name = elf_strptr(elf, shdrstrndx, sh.sh_name);
|
||||
if (!name) {
|
||||
pr_err("FAILED get section(%d) name\n", idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
data = elf_getdata(scn, 0);
|
||||
if (!data) {
|
||||
pr_err("FAILED to get section(%d) data from %s\n",
|
||||
idx, name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
|
||||
idx, name, (unsigned long) data->d_size,
|
||||
(int) sh.sh_link, (unsigned long) sh.sh_flags,
|
||||
(int) sh.sh_type);
|
||||
|
||||
if (sh.sh_type == SHT_SYMTAB) {
|
||||
obj->efile.symbols = data;
|
||||
obj->efile.symbols_shndx = idx;
|
||||
obj->efile.strtabidx = sh.sh_link;
|
||||
} else if (!strcmp(name, BTF_IDS_SECTION)) {
|
||||
obj->efile.idlist = data;
|
||||
obj->efile.idlist_shndx = idx;
|
||||
obj->efile.idlist_addr = sh.sh_addr;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int symbols_collect(struct object *obj)
|
||||
{
|
||||
Elf_Scn *scn = NULL;
|
||||
int n, i, err = 0;
|
||||
GElf_Shdr sh;
|
||||
char *name;
|
||||
|
||||
scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
|
||||
if (!scn)
|
||||
return -1;
|
||||
|
||||
if (gelf_getshdr(scn, &sh) != &sh)
|
||||
return -1;
|
||||
|
||||
n = sh.sh_size / sh.sh_entsize;
|
||||
|
||||
/*
|
||||
* Scan symbols and look for the ones starting with
|
||||
* __BTF_ID__* over .BTF_ids section.
|
||||
*/
|
||||
for (i = 0; !err && i < n; i++) {
|
||||
char *tmp, *prefix;
|
||||
struct btf_id *id;
|
||||
GElf_Sym sym;
|
||||
int err = -1;
|
||||
|
||||
if (!gelf_getsym(obj->efile.symbols, i, &sym))
|
||||
return -1;
|
||||
|
||||
if (sym.st_shndx != obj->efile.idlist_shndx)
|
||||
continue;
|
||||
|
||||
name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
|
||||
sym.st_name);
|
||||
|
||||
if (!is_btf_id(name))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* __BTF_ID__TYPE__vfs_truncate__0
|
||||
* prefix = ^
|
||||
*/
|
||||
prefix = name + sizeof(BTF_ID) - 1;
|
||||
|
||||
/* struct */
|
||||
if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
|
||||
obj->nr_structs++;
|
||||
id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1);
|
||||
/* union */
|
||||
} else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) {
|
||||
obj->nr_unions++;
|
||||
id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1);
|
||||
/* typedef */
|
||||
} else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) {
|
||||
obj->nr_typedefs++;
|
||||
id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1);
|
||||
/* func */
|
||||
} else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) {
|
||||
obj->nr_funcs++;
|
||||
id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
|
||||
/* set */
|
||||
} else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
|
||||
id = add_symbol(&obj->sets, prefix, sizeof(BTF_SET) - 1);
|
||||
/*
|
||||
* SET objects store list's count, which is encoded
|
||||
* in symbol's size, together with 'cnt' field hence
|
||||
* that - 1.
|
||||
*/
|
||||
if (id)
|
||||
id->cnt = sym.st_size / sizeof(int) - 1;
|
||||
} else {
|
||||
pr_err("FAILED unsupported prefix %s\n", prefix);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!id)
|
||||
return -ENOMEM;
|
||||
|
||||
if (id->addr_cnt >= ADDR_CNT) {
|
||||
pr_err("FAILED symbol %s crossed the number of allowed lists",
|
||||
id->name);
|
||||
return -1;
|
||||
}
|
||||
id->addr[id->addr_cnt++] = sym.st_value;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btf *btf__parse_raw(const char *file)
|
||||
{
|
||||
struct btf *btf;
|
||||
struct stat st;
|
||||
__u8 *buf;
|
||||
FILE *f;
|
||||
|
||||
if (stat(file, &st))
|
||||
return NULL;
|
||||
|
||||
f = fopen(file, "rb");
|
||||
if (!f)
|
||||
return NULL;
|
||||
|
||||
buf = malloc(st.st_size);
|
||||
if (!buf) {
|
||||
btf = ERR_PTR(-ENOMEM);
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) {
|
||||
btf = ERR_PTR(-EINVAL);
|
||||
goto exit_free;
|
||||
}
|
||||
|
||||
btf = btf__new(buf, st.st_size);
|
||||
|
||||
exit_free:
|
||||
free(buf);
|
||||
exit_close:
|
||||
fclose(f);
|
||||
return btf;
|
||||
}
|
||||
|
||||
static bool is_btf_raw(const char *file)
|
||||
{
|
||||
__u16 magic = 0;
|
||||
int fd, nb_read;
|
||||
|
||||
fd = open(file, O_RDONLY);
|
||||
if (fd < 0)
|
||||
return false;
|
||||
|
||||
nb_read = read(fd, &magic, sizeof(magic));
|
||||
close(fd);
|
||||
return nb_read == sizeof(magic) && magic == BTF_MAGIC;
|
||||
}
|
||||
|
||||
static struct btf *btf_open(const char *path)
|
||||
{
|
||||
if (is_btf_raw(path))
|
||||
return btf__parse_raw(path);
|
||||
else
|
||||
return btf__parse_elf(path, NULL);
|
||||
}
|
||||
|
||||
static int symbols_resolve(struct object *obj)
|
||||
{
|
||||
int nr_typedefs = obj->nr_typedefs;
|
||||
int nr_structs = obj->nr_structs;
|
||||
int nr_unions = obj->nr_unions;
|
||||
int nr_funcs = obj->nr_funcs;
|
||||
int err, type_id;
|
||||
struct btf *btf;
|
||||
__u32 nr;
|
||||
|
||||
btf = btf_open(obj->btf ?: obj->path);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load BTF from %s: %s",
|
||||
obj->path, strerror(err));
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = -1;
|
||||
nr = btf__get_nr_types(btf);
|
||||
|
||||
/*
|
||||
* Iterate all the BTF types and search for collected symbol IDs.
|
||||
*/
|
||||
for (type_id = 1; type_id <= nr; type_id++) {
|
||||
const struct btf_type *type;
|
||||
struct rb_root *root;
|
||||
struct btf_id *id;
|
||||
const char *str;
|
||||
int *nr;
|
||||
|
||||
type = btf__type_by_id(btf, type_id);
|
||||
if (!type) {
|
||||
pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n",
|
||||
type_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (btf_is_func(type) && nr_funcs) {
|
||||
nr = &nr_funcs;
|
||||
root = &obj->funcs;
|
||||
} else if (btf_is_struct(type) && nr_structs) {
|
||||
nr = &nr_structs;
|
||||
root = &obj->structs;
|
||||
} else if (btf_is_union(type) && nr_unions) {
|
||||
nr = &nr_unions;
|
||||
root = &obj->unions;
|
||||
} else if (btf_is_typedef(type) && nr_typedefs) {
|
||||
nr = &nr_typedefs;
|
||||
root = &obj->typedefs;
|
||||
} else
|
||||
continue;
|
||||
|
||||
str = btf__name_by_offset(btf, type->name_off);
|
||||
if (!str) {
|
||||
pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n",
|
||||
type_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
id = btf_id__find(root, str);
|
||||
if (id) {
|
||||
id->id = type_id;
|
||||
(*nr)--;
|
||||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
btf__free(btf);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int id_patch(struct object *obj, struct btf_id *id)
|
||||
{
|
||||
Elf_Data *data = obj->efile.idlist;
|
||||
int *ptr = data->d_buf;
|
||||
int i;
|
||||
|
||||
if (!id->id) {
|
||||
pr_err("FAILED unresolved symbol %s\n", id->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < id->addr_cnt; i++) {
|
||||
unsigned long addr = id->addr[i];
|
||||
unsigned long idx = addr - obj->efile.idlist_addr;
|
||||
|
||||
pr_debug("patching addr %5lu: ID %7d [%s]\n",
|
||||
idx, id->id, id->name);
|
||||
|
||||
if (idx >= data->d_size) {
|
||||
pr_err("FAILED patching index %lu out of bounds %lu\n",
|
||||
idx, data->d_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
idx = idx / sizeof(int);
|
||||
ptr[idx] = id->id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __symbols_patch(struct object *obj, struct rb_root *root)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct btf_id *id;
|
||||
|
||||
next = rb_first(root);
|
||||
while (next) {
|
||||
id = rb_entry(next, struct btf_id, rb_node);
|
||||
|
||||
if (id_patch(obj, id))
|
||||
return -1;
|
||||
|
||||
next = rb_next(next);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmp_id(const void *pa, const void *pb)
|
||||
{
|
||||
const int *a = pa, *b = pb;
|
||||
|
||||
return *a - *b;
|
||||
}
|
||||
|
||||
static int sets_patch(struct object *obj)
|
||||
{
|
||||
Elf_Data *data = obj->efile.idlist;
|
||||
int *ptr = data->d_buf;
|
||||
struct rb_node *next;
|
||||
|
||||
next = rb_first(&obj->sets);
|
||||
while (next) {
|
||||
unsigned long addr, idx;
|
||||
struct btf_id *id;
|
||||
int *base;
|
||||
int cnt;
|
||||
|
||||
id = rb_entry(next, struct btf_id, rb_node);
|
||||
addr = id->addr[0];
|
||||
idx = addr - obj->efile.idlist_addr;
|
||||
|
||||
/* sets are unique */
|
||||
if (id->addr_cnt != 1) {
|
||||
pr_err("FAILED malformed data for set '%s'\n",
|
||||
id->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
idx = idx / sizeof(int);
|
||||
base = &ptr[idx] + 1;
|
||||
cnt = ptr[idx];
|
||||
|
||||
pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
|
||||
(idx + 1) * sizeof(int), cnt, id->name);
|
||||
|
||||
qsort(base, cnt, sizeof(int), cmp_id);
|
||||
|
||||
next = rb_next(next);
|
||||
}
|
||||
}
|
||||
|
||||
static int symbols_patch(struct object *obj)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (__symbols_patch(obj, &obj->structs) ||
|
||||
__symbols_patch(obj, &obj->unions) ||
|
||||
__symbols_patch(obj, &obj->typedefs) ||
|
||||
__symbols_patch(obj, &obj->funcs) ||
|
||||
__symbols_patch(obj, &obj->sets))
|
||||
return -1;
|
||||
|
||||
if (sets_patch(obj))
|
||||
return -1;
|
||||
|
||||
elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
|
||||
|
||||
err = elf_update(obj->efile.elf, ELF_C_WRITE);
|
||||
if (err < 0) {
|
||||
pr_err("FAILED elf_update(WRITE): %s\n",
|
||||
elf_errmsg(-1));
|
||||
}
|
||||
|
||||
pr_debug("update %s for %s\n",
|
||||
err >= 0 ? "ok" : "failed", obj->path);
|
||||
return err < 0 ? -1 : 0;
|
||||
}
|
||||
|
||||
static const char * const resolve_btfids_usage[] = {
|
||||
"resolve_btfids [<options>] <ELF object>",
|
||||
NULL
|
||||
};
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
bool no_fail = false;
|
||||
struct object obj = {
|
||||
.efile = {
|
||||
.idlist_shndx = -1,
|
||||
.symbols_shndx = -1,
|
||||
},
|
||||
.structs = RB_ROOT,
|
||||
.unions = RB_ROOT,
|
||||
.typedefs = RB_ROOT,
|
||||
.funcs = RB_ROOT,
|
||||
.sets = RB_ROOT,
|
||||
};
|
||||
struct option btfid_options[] = {
|
||||
OPT_INCR('v', "verbose", &verbose,
|
||||
"be more verbose (show errors, etc)"),
|
||||
OPT_STRING(0, "btf", &obj.btf, "BTF data",
|
||||
"BTF data"),
|
||||
OPT_BOOLEAN(0, "no-fail", &no_fail,
|
||||
"do not fail if " BTF_IDS_SECTION " section is not found"),
|
||||
OPT_END()
|
||||
};
|
||||
int err = -1;
|
||||
|
||||
argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
if (argc != 1)
|
||||
usage_with_options(resolve_btfids_usage, btfid_options);
|
||||
|
||||
obj.path = argv[0];
|
||||
|
||||
if (elf_collect(&obj))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We did not find .BTF_ids section or symbols section,
|
||||
* nothing to do..
|
||||
*/
|
||||
if (obj.efile.idlist_shndx == -1 ||
|
||||
obj.efile.symbols_shndx == -1) {
|
||||
if (no_fail)
|
||||
return 0;
|
||||
pr_err("FAILED to find needed sections\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (symbols_collect(&obj))
|
||||
goto out;
|
||||
|
||||
if (symbols_resolve(&obj))
|
||||
goto out;
|
||||
|
||||
if (symbols_patch(&obj))
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
if (obj.efile.elf)
|
||||
elf_end(obj.efile.elf);
|
||||
close(obj.efile.fd);
|
||||
return err;
|
||||
}
|
87
tools/include/linux/btf_ids.h
Normal file
87
tools/include/linux/btf_ids.h
Normal file
@ -0,0 +1,87 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _LINUX_BTF_IDS_H
|
||||
#define _LINUX_BTF_IDS_H
|
||||
|
||||
#include <linux/compiler.h> /* for __PASTE */
|
||||
|
||||
/*
|
||||
* Following macros help to define lists of BTF IDs placed
|
||||
* in .BTF_ids section. They are initially filled with zeros
|
||||
* (during compilation) and resolved later during the
|
||||
* linking phase by resolve_btfids tool.
|
||||
*
|
||||
* Any change in list layout must be reflected in resolve_btfids
|
||||
* tool logic.
|
||||
*/
|
||||
|
||||
#define BTF_IDS_SECTION ".BTF_ids"
|
||||
|
||||
#define ____BTF_ID(symbol) \
|
||||
asm( \
|
||||
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
||||
".local " #symbol " ; \n" \
|
||||
".type " #symbol ", @object; \n" \
|
||||
".size " #symbol ", 4; \n" \
|
||||
#symbol ": \n" \
|
||||
".zero 4 \n" \
|
||||
".popsection; \n");
|
||||
|
||||
#define __BTF_ID(symbol) \
|
||||
____BTF_ID(symbol)
|
||||
|
||||
#define __ID(prefix) \
|
||||
__PASTE(prefix, __COUNTER__)
|
||||
|
||||
/*
|
||||
* The BTF_ID defines unique symbol for each ID pointing
|
||||
* to 4 zero bytes.
|
||||
*/
|
||||
#define BTF_ID(prefix, name) \
|
||||
__BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
|
||||
|
||||
/*
|
||||
* The BTF_ID_LIST macro defines pure (unsorted) list
|
||||
* of BTF IDs, with following layout:
|
||||
*
|
||||
* BTF_ID_LIST(list1)
|
||||
* BTF_ID(type1, name1)
|
||||
* BTF_ID(type2, name2)
|
||||
*
|
||||
* list1:
|
||||
* __BTF_ID__type1__name1__1:
|
||||
* .zero 4
|
||||
* __BTF_ID__type2__name2__2:
|
||||
* .zero 4
|
||||
*
|
||||
*/
|
||||
#define __BTF_ID_LIST(name) \
|
||||
asm( \
|
||||
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
||||
".local " #name "; \n" \
|
||||
#name ":; \n" \
|
||||
".popsection; \n"); \
|
||||
|
||||
#define BTF_ID_LIST(name) \
|
||||
__BTF_ID_LIST(name) \
|
||||
extern u32 name[];
|
||||
|
||||
/*
|
||||
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
||||
* It's used when we want to define 'unused' entry
|
||||
* in BTF_ID_LIST, like:
|
||||
*
|
||||
* BTF_ID_LIST(bpf_skb_output_btf_ids)
|
||||
* BTF_ID(struct, sk_buff)
|
||||
* BTF_ID_UNUSED
|
||||
* BTF_ID(struct, task_struct)
|
||||
*/
|
||||
|
||||
#define BTF_ID_UNUSED \
|
||||
asm( \
|
||||
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
||||
".zero 4 \n" \
|
||||
".popsection; \n");
|
||||
|
||||
|
||||
#endif
|
@ -201,4 +201,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
# define __fallthrough
|
||||
#endif
|
||||
|
||||
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
|
||||
#define ___PASTE(a, b) a##b
|
||||
#define __PASTE(a, b) ___PASTE(a, b)
|
||||
|
||||
#endif /* _TOOLS_LINUX_COMPILER_H */
|
||||
|
@ -226,6 +226,7 @@ enum bpf_attach_type {
|
||||
BPF_CGROUP_INET4_GETSOCKNAME,
|
||||
BPF_CGROUP_INET6_GETSOCKNAME,
|
||||
BPF_XDP_DEVMAP,
|
||||
BPF_CGROUP_INET_SOCK_RELEASE,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
@ -73,9 +73,12 @@ struct xdp_umem_reg {
|
||||
};
|
||||
|
||||
struct xdp_statistics {
|
||||
__u64 rx_dropped; /* Dropped for reasons other than invalid desc */
|
||||
__u64 rx_dropped; /* Dropped for other reasons */
|
||||
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 rx_ring_full; /* Dropped due to rx ring being full */
|
||||
__u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
|
||||
__u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
|
||||
};
|
||||
|
||||
struct xdp_options {
|
||||
|
@ -389,7 +389,7 @@ void btf__free(struct btf *btf)
|
||||
if (!btf)
|
||||
return;
|
||||
|
||||
if (btf->fd != -1)
|
||||
if (btf->fd >= 0)
|
||||
close(btf->fd);
|
||||
|
||||
free(btf->data);
|
||||
@ -397,7 +397,7 @@ void btf__free(struct btf *btf)
|
||||
free(btf);
|
||||
}
|
||||
|
||||
struct btf *btf__new(__u8 *data, __u32 size)
|
||||
struct btf *btf__new(const void *data, __u32 size)
|
||||
{
|
||||
struct btf *btf;
|
||||
int err;
|
||||
@ -700,6 +700,11 @@ int btf__fd(const struct btf *btf)
|
||||
return btf->fd;
|
||||
}
|
||||
|
||||
void btf__set_fd(struct btf *btf, int fd)
|
||||
{
|
||||
btf->fd = fd;
|
||||
}
|
||||
|
||||
const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
|
||||
{
|
||||
*size = btf->data_size;
|
||||
|
@ -63,7 +63,7 @@ struct btf_ext_header {
|
||||
};
|
||||
|
||||
LIBBPF_API void btf__free(struct btf *btf);
|
||||
LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
|
||||
LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
|
||||
LIBBPF_API struct btf *btf__parse_elf(const char *path,
|
||||
struct btf_ext **btf_ext);
|
||||
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
|
||||
@ -79,6 +79,7 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
|
||||
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
|
||||
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
|
||||
LIBBPF_API int btf__fd(const struct btf *btf);
|
||||
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
|
||||
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
|
||||
@ -143,8 +144,10 @@ struct btf_dump_emit_type_decl_opts {
|
||||
* necessary indentation already
|
||||
*/
|
||||
int indent_level;
|
||||
/* strip all the const/volatile/restrict mods */
|
||||
bool strip_mods;
|
||||
};
|
||||
#define btf_dump_emit_type_decl_opts__last_field indent_level
|
||||
#define btf_dump_emit_type_decl_opts__last_field strip_mods
|
||||
|
||||
LIBBPF_API int
|
||||
btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
|
||||
|
@ -60,6 +60,7 @@ struct btf_dump {
|
||||
const struct btf_ext *btf_ext;
|
||||
btf_dump_printf_fn_t printf_fn;
|
||||
struct btf_dump_opts opts;
|
||||
bool strip_mods;
|
||||
|
||||
/* per-type auxiliary state */
|
||||
struct btf_dump_type_aux_state *type_states;
|
||||
@ -1032,7 +1033,9 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
|
||||
|
||||
fname = OPTS_GET(opts, field_name, "");
|
||||
lvl = OPTS_GET(opts, indent_level, 0);
|
||||
d->strip_mods = OPTS_GET(opts, strip_mods, false);
|
||||
btf_dump_emit_type_decl(d, id, fname, lvl);
|
||||
d->strip_mods = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1045,6 +1048,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
|
||||
|
||||
stack_start = d->decl_stack_cnt;
|
||||
for (;;) {
|
||||
t = btf__type_by_id(d->btf, id);
|
||||
if (d->strip_mods && btf_is_mod(t))
|
||||
goto skip_mod;
|
||||
|
||||
err = btf_dump_push_decl_stack_id(d, id);
|
||||
if (err < 0) {
|
||||
/*
|
||||
@ -1056,12 +1063,11 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
|
||||
d->decl_stack_cnt = stack_start;
|
||||
return;
|
||||
}
|
||||
|
||||
skip_mod:
|
||||
/* VOID */
|
||||
if (id == 0)
|
||||
break;
|
||||
|
||||
t = btf__type_by_id(d->btf, id);
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_VOLATILE:
|
||||
|
@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
||||
static bool btf_needs_sanitization(struct bpf_object *obj)
|
||||
{
|
||||
bool has_func_global = obj->caps.btf_func_global;
|
||||
bool has_datasec = obj->caps.btf_datasec;
|
||||
bool has_func = obj->caps.btf_func;
|
||||
|
||||
return !has_func || !has_datasec || !has_func_global;
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
{
|
||||
bool has_func_global = obj->caps.btf_func_global;
|
||||
bool has_datasec = obj->caps.btf_datasec;
|
||||
bool has_func = obj->caps.btf_func;
|
||||
struct btf *btf = obj->btf;
|
||||
struct btf_type *t;
|
||||
int i, j, vlen;
|
||||
|
||||
if (!obj->btf || (has_func && has_datasec && has_func_global))
|
||||
return;
|
||||
|
||||
for (i = 1; i <= btf__get_nr_types(btf); i++) {
|
||||
t = (struct btf_type *)btf__type_by_id(btf, i);
|
||||
|
||||
@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
|
||||
{
|
||||
if (!obj->btf_ext)
|
||||
return;
|
||||
|
||||
if (!obj->caps.btf_func) {
|
||||
btf_ext__free(obj->btf_ext);
|
||||
obj->btf_ext = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool libbpf_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.btf_maps_shndx >= 0 ||
|
||||
@ -2473,19 +2467,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
|
||||
return 0;
|
||||
|
||||
err = btf__finalize_data(obj, obj->btf);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
|
||||
btf__free(obj->btf);
|
||||
obj->btf = NULL;
|
||||
btf_ext__free(obj->btf_ext);
|
||||
obj->btf_ext = NULL;
|
||||
|
||||
if (libbpf_needs_btf(obj)) {
|
||||
pr_warn("BTF is required, but is missing or corrupted.\n");
|
||||
return -ENOENT;
|
||||
if (err) {
|
||||
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2538,30 +2524,45 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
|
||||
|
||||
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
|
||||
{
|
||||
struct btf *kern_btf = obj->btf;
|
||||
bool btf_mandatory, sanitize;
|
||||
int err = 0;
|
||||
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
bpf_object__sanitize_btf(obj);
|
||||
bpf_object__sanitize_btf_ext(obj);
|
||||
sanitize = btf_needs_sanitization(obj);
|
||||
if (sanitize) {
|
||||
const void *raw_data;
|
||||
__u32 sz;
|
||||
|
||||
err = btf__load(obj->btf);
|
||||
if (err) {
|
||||
pr_warn("Error loading %s into kernel: %d.\n",
|
||||
BTF_ELF_SEC, err);
|
||||
btf__free(obj->btf);
|
||||
obj->btf = NULL;
|
||||
/* btf_ext can't exist without btf, so free it as well */
|
||||
if (obj->btf_ext) {
|
||||
btf_ext__free(obj->btf_ext);
|
||||
obj->btf_ext = NULL;
|
||||
}
|
||||
/* clone BTF to sanitize a copy and leave the original intact */
|
||||
raw_data = btf__get_raw_data(obj->btf, &sz);
|
||||
kern_btf = btf__new(raw_data, sz);
|
||||
if (IS_ERR(kern_btf))
|
||||
return PTR_ERR(kern_btf);
|
||||
|
||||
if (kernel_needs_btf(obj))
|
||||
return err;
|
||||
bpf_object__sanitize_btf(obj, kern_btf);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err = btf__load(kern_btf);
|
||||
if (sanitize) {
|
||||
if (!err) {
|
||||
/* move fd to libbpf's BTF */
|
||||
btf__set_fd(obj->btf, btf__fd(kern_btf));
|
||||
btf__set_fd(kern_btf, -1);
|
||||
}
|
||||
btf__free(kern_btf);
|
||||
}
|
||||
if (err) {
|
||||
btf_mandatory = kernel_needs_btf(obj);
|
||||
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
|
||||
btf_mandatory ? "BTF is mandatory, can't proceed."
|
||||
: "BTF is optional, ignoring.");
|
||||
if (!btf_mandatory)
|
||||
err = 0;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
@ -3785,7 +3786,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
create_attr.btf_fd = 0;
|
||||
create_attr.btf_key_type_id = 0;
|
||||
create_attr.btf_value_type_id = 0;
|
||||
if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
|
||||
if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
|
||||
create_attr.btf_fd = btf__fd(obj->btf);
|
||||
create_attr.btf_key_type_id = map->btf_key_type_id;
|
||||
create_attr.btf_value_type_id = map->btf_value_type_id;
|
||||
@ -5375,18 +5376,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
|
||||
load_attr.kern_version = kern_version;
|
||||
load_attr.prog_ifindex = prog->prog_ifindex;
|
||||
}
|
||||
/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
|
||||
if (prog->obj->btf_ext)
|
||||
btf_fd = bpf_object__btf_fd(prog->obj);
|
||||
else
|
||||
btf_fd = -1;
|
||||
load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
|
||||
load_attr.func_info = prog->func_info;
|
||||
load_attr.func_info_rec_size = prog->func_info_rec_size;
|
||||
load_attr.func_info_cnt = prog->func_info_cnt;
|
||||
load_attr.line_info = prog->line_info;
|
||||
load_attr.line_info_rec_size = prog->line_info_rec_size;
|
||||
load_attr.line_info_cnt = prog->line_info_cnt;
|
||||
/* specify func_info/line_info only if kernel supports them */
|
||||
btf_fd = bpf_object__btf_fd(prog->obj);
|
||||
if (btf_fd >= 0 && prog->obj->caps.btf_func) {
|
||||
load_attr.prog_btf_fd = btf_fd;
|
||||
load_attr.func_info = prog->func_info;
|
||||
load_attr.func_info_rec_size = prog->func_info_rec_size;
|
||||
load_attr.func_info_cnt = prog->func_info_cnt;
|
||||
load_attr.line_info = prog->line_info;
|
||||
load_attr.line_info_rec_size = prog->line_info_rec_size;
|
||||
load_attr.line_info_cnt = prog->line_info_cnt;
|
||||
}
|
||||
load_attr.log_level = prog->log_level;
|
||||
load_attr.prog_flags = prog->prog_flags;
|
||||
|
||||
@ -6923,6 +6923,10 @@ static const struct bpf_sec_def section_defs[] = {
|
||||
BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
|
||||
BPF_CGROUP_INET_EGRESS),
|
||||
BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
|
||||
BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
BPF_CGROUP_INET_SOCK_CREATE),
|
||||
BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
BPF_CGROUP_INET_SOCK_RELEASE),
|
||||
BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
BPF_CGROUP_INET_SOCK_CREATE),
|
||||
BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
@ -8588,7 +8592,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
||||
struct perf_buffer_params *p)
|
||||
{
|
||||
const char *online_cpus_file = "/sys/devices/system/cpu/online";
|
||||
struct bpf_map_info map = {};
|
||||
struct bpf_map_info map;
|
||||
char msg[STRERR_BUFSIZE];
|
||||
struct perf_buffer *pb;
|
||||
bool *online = NULL;
|
||||
@ -8601,19 +8605,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* best-effort sanity checks */
|
||||
memset(&map, 0, sizeof(map));
|
||||
map_info_len = sizeof(map);
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("failed to get map info for map FD %d: %s\n",
|
||||
map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
|
||||
pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
|
||||
map.name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
|
||||
* -EBADFD, -EFAULT, or -E2BIG on real error
|
||||
*/
|
||||
if (err != -EINVAL) {
|
||||
pr_warn("failed to get map info for map FD %d: %s\n",
|
||||
map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
|
||||
map_fd);
|
||||
} else {
|
||||
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
|
||||
pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
|
||||
map.name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
pb = calloc(1, sizeof(*pb));
|
||||
@ -8645,7 +8658,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
||||
err = pb->cpu_cnt;
|
||||
goto error;
|
||||
}
|
||||
if (map.max_entries < pb->cpu_cnt)
|
||||
if (map.max_entries && map.max_entries < pb->cpu_cnt)
|
||||
pb->cpu_cnt = map.max_entries;
|
||||
}
|
||||
|
||||
|
@ -288,4 +288,5 @@ LIBBPF_0.1.0 {
|
||||
bpf_map__value_size;
|
||||
bpf_program__autoload;
|
||||
bpf_program__set_autoload;
|
||||
btf__set_fd;
|
||||
} LIBBPF_0.0.9;
|
||||
|
@ -111,6 +111,7 @@ SCRATCH_DIR := $(OUTPUT)/tools
|
||||
BUILD_DIR := $(SCRATCH_DIR)/build
|
||||
INCLUDE_DIR := $(SCRATCH_DIR)/include
|
||||
BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
|
||||
RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids
|
||||
|
||||
# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
|
||||
# to build individual tests.
|
||||
@ -177,7 +178,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
|
||||
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
|
||||
|
||||
$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR):
|
||||
$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
|
||||
$(call msg,MKDIR,,$@)
|
||||
mkdir -p $@
|
||||
|
||||
@ -190,6 +191,16 @@ else
|
||||
cp "$(VMLINUX_H)" $@
|
||||
endif
|
||||
|
||||
$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \
|
||||
$(TOOLSDIR)/bpf/resolve_btfids/main.c \
|
||||
$(TOOLSDIR)/lib/rbtree.c \
|
||||
$(TOOLSDIR)/lib/zalloc.c \
|
||||
$(TOOLSDIR)/lib/string.c \
|
||||
$(TOOLSDIR)/lib/ctype.c \
|
||||
$(TOOLSDIR)/lib/str_error_r.c
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
|
||||
OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ)
|
||||
|
||||
# Get Clang's default includes on this system, as opposed to those seen by
|
||||
# '-target bpf'. This fixes "missing" files on some architectures/distros,
|
||||
# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
|
||||
@ -352,9 +363,11 @@ endif
|
||||
|
||||
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
|
||||
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
|
||||
$(RESOLVE_BTFIDS) \
|
||||
| $(TRUNNER_BINARY)-extras
|
||||
$$(call msg,BINARY,,$$@)
|
||||
$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
|
||||
$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
|
||||
|
||||
endef
|
||||
|
||||
|
@ -2,20 +2,6 @@
|
||||
#ifndef __BPF_LEGACY__
|
||||
#define __BPF_LEGACY__
|
||||
|
||||
/*
|
||||
* legacy bpf_map_def with extra fields supported only by bpf_load(), do not
|
||||
* use outside of samples/bpf
|
||||
*/
|
||||
struct bpf_map_def_legacy {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
unsigned int inner_map_idx;
|
||||
unsigned int numa_node;
|
||||
};
|
||||
|
||||
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
|
||||
struct ____btf_map_##name { \
|
||||
type_key key; \
|
||||
|
33
tools/testing/selftests/bpf/prog_tests/core_retro.c
Normal file
33
tools/testing/selftests/bpf/prog_tests/core_retro.c
Normal file
@ -0,0 +1,33 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020 Facebook
|
||||
#define _GNU_SOURCE
|
||||
#include <test_progs.h>
|
||||
#include "test_core_retro.skel.h"
|
||||
|
||||
void test_core_retro(void)
|
||||
{
|
||||
int err, zero = 0, res, duration = 0;
|
||||
struct test_core_retro *skel;
|
||||
|
||||
/* load program */
|
||||
skel = test_core_retro__open_and_load();
|
||||
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
|
||||
goto out_close;
|
||||
|
||||
/* attach probe */
|
||||
err = test_core_retro__attach(skel);
|
||||
if (CHECK(err, "attach_kprobe", "err %d\n", err))
|
||||
goto out_close;
|
||||
|
||||
/* trigger */
|
||||
usleep(1);
|
||||
|
||||
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
|
||||
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
|
||||
goto out_close;
|
||||
|
||||
CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid());
|
||||
|
||||
out_close:
|
||||
test_core_retro__destroy(skel);
|
||||
}
|
@ -4,6 +4,7 @@
|
||||
#include <sched.h>
|
||||
#include <sys/socket.h>
|
||||
#include <test_progs.h>
|
||||
#include "test_perf_buffer.skel.h"
|
||||
#include "bpf/libbpf_internal.h"
|
||||
|
||||
/* AddressSanitizer sometimes crashes due to data dereference below, due to
|
||||
@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
|
||||
|
||||
void test_perf_buffer(void)
|
||||
{
|
||||
int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
|
||||
const char *prog_name = "kprobe/sys_nanosleep";
|
||||
const char *file = "./test_perf_buffer.o";
|
||||
int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
|
||||
struct perf_buffer_opts pb_opts = {};
|
||||
struct bpf_map *perf_buf_map;
|
||||
struct test_perf_buffer *skel;
|
||||
cpu_set_t cpu_set, cpu_seen;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct perf_buffer *pb;
|
||||
struct bpf_link *link;
|
||||
bool *online;
|
||||
|
||||
nr_cpus = libbpf_num_possible_cpus();
|
||||
@ -51,33 +47,21 @@ void test_perf_buffer(void)
|
||||
nr_on_cpus++;
|
||||
|
||||
/* load program */
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
|
||||
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
|
||||
obj = NULL;
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
|
||||
skel = test_perf_buffer__open_and_load();
|
||||
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
|
||||
goto out_close;
|
||||
|
||||
/* load map */
|
||||
perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
|
||||
if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
|
||||
goto out_close;
|
||||
|
||||
/* attach kprobe */
|
||||
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
|
||||
SYS_NANOSLEEP_KPROBE_NAME);
|
||||
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
|
||||
/* attach probe */
|
||||
err = test_perf_buffer__attach(skel);
|
||||
if (CHECK(err, "attach_kprobe", "err %d\n", err))
|
||||
goto out_close;
|
||||
|
||||
/* set up perf buffer */
|
||||
pb_opts.sample_cb = on_sample;
|
||||
pb_opts.ctx = &cpu_seen;
|
||||
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
|
||||
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
|
||||
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
|
||||
goto out_detach;
|
||||
goto out_close;
|
||||
|
||||
/* trigger kprobe on every CPU */
|
||||
CPU_ZERO(&cpu_seen);
|
||||
@ -94,7 +78,7 @@ void test_perf_buffer(void)
|
||||
&cpu_set);
|
||||
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
|
||||
i, err))
|
||||
goto out_detach;
|
||||
goto out_close;
|
||||
|
||||
usleep(1);
|
||||
}
|
||||
@ -110,9 +94,7 @@ void test_perf_buffer(void)
|
||||
|
||||
out_free_pb:
|
||||
perf_buffer__free(pb);
|
||||
out_detach:
|
||||
bpf_link__destroy(link);
|
||||
out_close:
|
||||
bpf_object__close(obj);
|
||||
test_perf_buffer__destroy(skel);
|
||||
free(online);
|
||||
}
|
||||
|
111
tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
Normal file
111
tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
Normal file
@ -0,0 +1,111 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <string.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include "test_progs.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
struct symbol {
|
||||
const char *name;
|
||||
int type;
|
||||
int id;
|
||||
};
|
||||
|
||||
struct symbol test_symbols[] = {
|
||||
{ "unused", BTF_KIND_UNKN, 0 },
|
||||
{ "S", BTF_KIND_TYPEDEF, -1 },
|
||||
{ "T", BTF_KIND_TYPEDEF, -1 },
|
||||
{ "U", BTF_KIND_TYPEDEF, -1 },
|
||||
{ "S", BTF_KIND_STRUCT, -1 },
|
||||
{ "U", BTF_KIND_UNION, -1 },
|
||||
{ "func", BTF_KIND_FUNC, -1 },
|
||||
};
|
||||
|
||||
BTF_ID_LIST(test_list)
|
||||
BTF_ID_UNUSED
|
||||
BTF_ID(typedef, S)
|
||||
BTF_ID(typedef, T)
|
||||
BTF_ID(typedef, U)
|
||||
BTF_ID(struct, S)
|
||||
BTF_ID(union, U)
|
||||
BTF_ID(func, func)
|
||||
|
||||
static int
|
||||
__resolve_symbol(struct btf *btf, int type_id)
|
||||
{
|
||||
const struct btf_type *type;
|
||||
const char *str;
|
||||
unsigned int i;
|
||||
|
||||
type = btf__type_by_id(btf, type_id);
|
||||
if (!type) {
|
||||
PRINT_FAIL("Failed to get type for ID %d\n", type_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
|
||||
if (test_symbols[i].id != -1)
|
||||
continue;
|
||||
|
||||
if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
|
||||
continue;
|
||||
|
||||
str = btf__name_by_offset(btf, type->name_off);
|
||||
if (!str) {
|
||||
PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!strcmp(str, test_symbols[i].name))
|
||||
test_symbols[i].id = type_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int resolve_symbols(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
int type_id;
|
||||
__u32 nr;
|
||||
|
||||
btf = btf__parse_elf("btf_data.o", NULL);
|
||||
if (CHECK(libbpf_get_error(btf), "resolve",
|
||||
"Failed to load BTF from btf_data.o\n"))
|
||||
return -1;
|
||||
|
||||
nr = btf__get_nr_types(btf);
|
||||
|
||||
for (type_id = 1; type_id <= nr; type_id++) {
|
||||
if (__resolve_symbol(btf, type_id))
|
||||
break;
|
||||
}
|
||||
|
||||
btf__free(btf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_resolve_btfids(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
if (resolve_symbols())
|
||||
return -1;
|
||||
|
||||
/* Check BTF_ID_LIST(test_list) IDs */
|
||||
for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) {
|
||||
ret = CHECK(test_list[i] != test_symbols[i].id,
|
||||
"id_check",
|
||||
"wrong ID for %s (%d != %d)\n", test_symbols[i].name,
|
||||
test_list[i], test_symbols[i].id);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
@ -41,7 +41,7 @@ void test_skeleton(void)
|
||||
CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL);
|
||||
CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL);
|
||||
|
||||
CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0);
|
||||
CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0);
|
||||
CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0);
|
||||
|
||||
/* validate we can pre-setup global variables, even in .bss */
|
||||
@ -49,7 +49,7 @@ void test_skeleton(void)
|
||||
data->in2 = 11;
|
||||
bss->in3 = 12;
|
||||
bss->in4 = 13;
|
||||
rodata->in6 = 14;
|
||||
rodata->in.in6 = 14;
|
||||
|
||||
err = test_skeleton__load(skel);
|
||||
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
|
||||
@ -60,7 +60,7 @@ void test_skeleton(void)
|
||||
CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL);
|
||||
CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12);
|
||||
CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL);
|
||||
CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14);
|
||||
CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14);
|
||||
|
||||
/* now set new values and attach to get them into outX variables */
|
||||
data->in1 = 1;
|
||||
|
75
tools/testing/selftests/bpf/prog_tests/trace_printk.c
Normal file
75
tools/testing/selftests/bpf/prog_tests/trace_printk.c
Normal file
@ -0,0 +1,75 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020, Oracle and/or its affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "trace_printk.skel.h"
|
||||
|
||||
#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
|
||||
#define SEARCHMSG "testing,testing"
|
||||
|
||||
void test_trace_printk(void)
|
||||
{
|
||||
int err, iter = 0, duration = 0, found = 0;
|
||||
struct trace_printk__bss *bss;
|
||||
struct trace_printk *skel;
|
||||
char *buf = NULL;
|
||||
FILE *fp = NULL;
|
||||
size_t buflen;
|
||||
|
||||
skel = trace_printk__open();
|
||||
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
|
||||
return;
|
||||
|
||||
err = trace_printk__load(skel);
|
||||
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
bss = skel->bss;
|
||||
|
||||
err = trace_printk__attach(skel);
|
||||
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
fp = fopen(TRACEBUF, "r");
|
||||
if (CHECK(fp == NULL, "could not open trace buffer",
|
||||
"error %d opening %s", errno, TRACEBUF))
|
||||
goto cleanup;
|
||||
|
||||
/* We do not want to wait forever if this test fails... */
|
||||
fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
|
||||
|
||||
/* wait for tracepoint to trigger */
|
||||
usleep(1);
|
||||
trace_printk__detach(skel);
|
||||
|
||||
if (CHECK(bss->trace_printk_ran == 0,
|
||||
"bpf_trace_printk never ran",
|
||||
"ran == %d", bss->trace_printk_ran))
|
||||
goto cleanup;
|
||||
|
||||
if (CHECK(bss->trace_printk_ret <= 0,
|
||||
"bpf_trace_printk returned <= 0 value",
|
||||
"got %d", bss->trace_printk_ret))
|
||||
goto cleanup;
|
||||
|
||||
/* verify our search string is in the trace buffer */
|
||||
while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
|
||||
if (strstr(buf, SEARCHMSG) != NULL)
|
||||
found++;
|
||||
if (found == bss->trace_printk_ran)
|
||||
break;
|
||||
if (++iter > 1000)
|
||||
break;
|
||||
}
|
||||
|
||||
if (CHECK(!found, "message from bpf_trace_printk not found",
|
||||
"no instance of %s in %s", SEARCHMSG, TRACEBUF))
|
||||
goto cleanup;
|
||||
|
||||
cleanup:
|
||||
trace_printk__destroy(skel);
|
||||
free(buf);
|
||||
if (fp)
|
||||
fclose(fp);
|
||||
}
|
75
tools/testing/selftests/bpf/prog_tests/udp_limit.c
Normal file
75
tools/testing/selftests/bpf/prog_tests/udp_limit.c
Normal file
@ -0,0 +1,75 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include "udp_limit.skel.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
static int duration;
|
||||
|
||||
void test_udp_limit(void)
|
||||
{
|
||||
struct udp_limit *skel;
|
||||
int fd1 = -1, fd2 = -1;
|
||||
int cgroup_fd;
|
||||
|
||||
cgroup_fd = test__join_cgroup("/udp_limit");
|
||||
if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
|
||||
return;
|
||||
|
||||
skel = udp_limit__open_and_load();
|
||||
if (CHECK(!skel, "skel-load", "errno %d", errno))
|
||||
goto close_cgroup_fd;
|
||||
|
||||
skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
|
||||
skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
|
||||
if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release),
|
||||
"cg-attach", "sock %ld sock_release %ld",
|
||||
PTR_ERR(skel->links.sock),
|
||||
PTR_ERR(skel->links.sock_release)))
|
||||
goto close_skeleton;
|
||||
|
||||
/* BPF program enforces a single UDP socket per cgroup,
|
||||
* verify that.
|
||||
*/
|
||||
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
|
||||
if (CHECK(fd1 < 0, "fd1", "errno %d", errno))
|
||||
goto close_skeleton;
|
||||
|
||||
fd2 = socket(AF_INET, SOCK_DGRAM, 0);
|
||||
if (CHECK(fd2 >= 0, "fd2", "errno %d", errno))
|
||||
goto close_skeleton;
|
||||
|
||||
/* We can reopen again after close. */
|
||||
close(fd1);
|
||||
fd1 = -1;
|
||||
|
||||
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
|
||||
if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno))
|
||||
goto close_skeleton;
|
||||
|
||||
/* Make sure the program was invoked the expected
|
||||
* number of times:
|
||||
* - open fd1 - BPF_CGROUP_INET_SOCK_CREATE
|
||||
* - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE
|
||||
* - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE
|
||||
* - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE
|
||||
*/
|
||||
if (CHECK(skel->bss->invocations != 4, "bss-invocations",
|
||||
"invocations=%d", skel->bss->invocations))
|
||||
goto close_skeleton;
|
||||
|
||||
/* We should still have a single socket in use */
|
||||
if (CHECK(skel->bss->in_use != 1, "bss-in_use",
|
||||
"in_use=%d", skel->bss->in_use))
|
||||
goto close_skeleton;
|
||||
|
||||
close_skeleton:
|
||||
if (fd1 >= 0)
|
||||
close(fd1);
|
||||
if (fd2 >= 0)
|
||||
close(fd2);
|
||||
udp_limit__destroy(skel);
|
||||
close_cgroup_fd:
|
||||
close(cgroup_fd);
|
||||
}
|
50
tools/testing/selftests/bpf/progs/btf_data.c
Normal file
50
tools/testing/selftests/bpf/progs/btf_data.c
Normal file
@ -0,0 +1,50 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
struct S {
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
};
|
||||
|
||||
union U {
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
};
|
||||
|
||||
struct S1 {
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
};
|
||||
|
||||
union U1 {
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
};
|
||||
|
||||
typedef int T;
|
||||
typedef int S;
|
||||
typedef int U;
|
||||
typedef int T1;
|
||||
typedef int S1;
|
||||
typedef int U1;
|
||||
|
||||
struct root_struct {
|
||||
S m_1;
|
||||
T m_2;
|
||||
U m_3;
|
||||
S1 m_4;
|
||||
T1 m_5;
|
||||
U1 m_6;
|
||||
struct S m_7;
|
||||
struct S1 m_8;
|
||||
union U m_9;
|
||||
union U1 m_10;
|
||||
};
|
||||
|
||||
int func(struct root_struct *root)
|
||||
{
|
||||
return 0;
|
||||
}
|
30
tools/testing/selftests/bpf/progs/test_core_retro.c
Normal file
30
tools/testing/selftests/bpf/progs/test_core_retro.c
Normal file
@ -0,0 +1,30 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020 Facebook
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
struct task_struct {
|
||||
int tgid;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} results SEC(".maps");
|
||||
|
||||
SEC("tp/raw_syscalls/sys_enter")
|
||||
int handle_sys_enter(void *ctx)
|
||||
{
|
||||
struct task_struct *task = (void *)bpf_get_current_task();
|
||||
int tgid = BPF_CORE_READ(task, tgid);
|
||||
int zero = 0;
|
||||
|
||||
bpf_map_update_elem(&results, &zero, &tgid, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -12,8 +12,8 @@ struct {
|
||||
__uint(value_size, sizeof(int));
|
||||
} perf_buf_map SEC(".maps");
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int BPF_KPROBE(handle_sys_nanosleep_entry)
|
||||
SEC("tp/raw_syscalls/sys_enter")
|
||||
int handle_sys_enter(void *ctx)
|
||||
{
|
||||
int cpu = bpf_get_smp_processor_id();
|
||||
|
||||
|
@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0;
|
||||
struct s in5 = {};
|
||||
|
||||
/* .rodata section */
|
||||
const volatile int in6 = 0;
|
||||
const volatile struct {
|
||||
const int in6;
|
||||
} in = {};
|
||||
|
||||
/* .data section */
|
||||
int out1 = -1;
|
||||
@ -46,7 +48,7 @@ int handler(const void *ctx)
|
||||
out3 = in3;
|
||||
out4 = in4;
|
||||
out5 = in5;
|
||||
out6 = in6;
|
||||
out6 = in.in6;
|
||||
|
||||
bpf_syscall = CONFIG_BPF_SYSCALL;
|
||||
kern_ver = LINUX_KERNEL_VERSION;
|
||||
|
21
tools/testing/selftests/bpf/progs/trace_printk.c
Normal file
21
tools/testing/selftests/bpf/progs/trace_printk.c
Normal file
@ -0,0 +1,21 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020, Oracle and/or its affiliates.
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
int trace_printk_ret = 0;
|
||||
int trace_printk_ran = 0;
|
||||
|
||||
SEC("tp/raw_syscalls/sys_enter")
|
||||
int sys_enter(void *ctx)
|
||||
{
|
||||
static const char fmt[] = "testing,testing %d\n";
|
||||
|
||||
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
|
||||
++trace_printk_ran);
|
||||
return 0;
|
||||
}
|
42
tools/testing/selftests/bpf/progs/udp_limit.c
Normal file
42
tools/testing/selftests/bpf/progs/udp_limit.c
Normal file
@ -0,0 +1,42 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <sys/socket.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
int invocations = 0, in_use = 0;
|
||||
|
||||
SEC("cgroup/sock_create")
|
||||
int sock(struct bpf_sock *ctx)
|
||||
{
|
||||
__u32 key;
|
||||
|
||||
if (ctx->type != SOCK_DGRAM)
|
||||
return 1;
|
||||
|
||||
__sync_fetch_and_add(&invocations, 1);
|
||||
|
||||
if (in_use > 0) {
|
||||
/* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called
|
||||
* when we return an error from the BPF
|
||||
* program!
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
__sync_fetch_and_add(&in_use, 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("cgroup/sock_release")
|
||||
int sock_release(struct bpf_sock *ctx)
|
||||
{
|
||||
__u32 key;
|
||||
|
||||
if (ctx->type != SOCK_DGRAM)
|
||||
return 1;
|
||||
|
||||
__sync_fetch_and_add(&invocations, 1);
|
||||
__sync_fetch_and_add(&in_use, -1);
|
||||
return 1;
|
||||
}
|
@ -12,6 +12,9 @@
|
||||
#include <string.h>
|
||||
#include <execinfo.h> /* backtrace */
|
||||
|
||||
#define EXIT_NO_TEST 2
|
||||
#define EXIT_ERR_SETUP_INFRA 3
|
||||
|
||||
/* defined in test_progs.h */
|
||||
struct test_env env = {};
|
||||
|
||||
@ -111,13 +114,13 @@ static void reset_affinity() {
|
||||
if (err < 0) {
|
||||
stdio_restore();
|
||||
fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
|
||||
exit(-1);
|
||||
exit(EXIT_ERR_SETUP_INFRA);
|
||||
}
|
||||
err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
|
||||
if (err < 0) {
|
||||
stdio_restore();
|
||||
fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
|
||||
exit(-1);
|
||||
exit(EXIT_ERR_SETUP_INFRA);
|
||||
}
|
||||
}
|
||||
|
||||
@ -126,7 +129,7 @@ static void save_netns(void)
|
||||
env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
|
||||
if (env.saved_netns_fd == -1) {
|
||||
perror("open(/proc/self/ns/net)");
|
||||
exit(-1);
|
||||
exit(EXIT_ERR_SETUP_INFRA);
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,7 +138,7 @@ static void restore_netns(void)
|
||||
if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
|
||||
stdio_restore();
|
||||
perror("setns(CLONE_NEWNS)");
|
||||
exit(-1);
|
||||
exit(EXIT_ERR_SETUP_INFRA);
|
||||
}
|
||||
}
|
||||
|
||||
@ -740,7 +743,7 @@ out:
|
||||
close(env.saved_netns_fd);
|
||||
|
||||
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
|
||||
return EXIT_FAILURE;
|
||||
return EXIT_NO_TEST;
|
||||
|
||||
return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user