mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 07:24:39 +08:00
93b8952d22
Enact deprecation of legacy BPF map definition in SEC("maps") ([0]). For the definitions themselves introduce LIBBPF_STRICT_MAP_DEFINITIONS flag for libbpf strict mode. If it is set, error out on any struct bpf_map_def-based map definition. If not set, libbpf will print out a warning for each legacy BPF map to raise awareness that it goes away. For any use of BPF_ANNOTATE_KV_PAIR() macro providing a legacy way to associate BTF key/value type information with legacy BPF map definition, warn through libbpf's pr_warn() error message (but don't fail BPF object open). BPF-side struct bpf_map_def is marked as deprecated. User-space struct bpf_map_def has to be used internally in libbpf, so it is left untouched. It should be enough for bpf_map__def() to be marked deprecated to raise awareness that it goes away. bpftool is an interesting case that utilizes libbpf to open BPF ELF object to generate skeleton. As such, even though bpftool itself uses full on strict libbpf mode (LIBBPF_STRICT_ALL), it has to relax it a bit for BPF map definition handling to minimize unnecessary disruptions. So opt-out of LIBBPF_STRICT_MAP_DEFINITIONS for bpftool. User's code that will later use generated skeleton will make its own decision whether to enforce LIBBPF_STRICT_MAP_DEFINITIONS or not. There are few tests in selftests/bpf that are consciously using legacy BPF map definitions to test libbpf functionality. For those, temporary opt out of LIBBPF_STRICT_MAP_DEFINITIONS mode for the duration of those tests. [0] Closes: https://github.com/libbpf/libbpf/issues/272 Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20220120060529.1890907-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
263 lines
8.7 KiB
C
263 lines
8.7 KiB
C
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
|
#ifndef __BPF_HELPERS__
|
|
#define __BPF_HELPERS__
|
|
|
|
/*
|
|
* Note that bpf programs need to include either
|
|
* vmlinux.h (auto-generated from BTF) or linux/types.h
|
|
* in advance since bpf_helper_defs.h uses such types
|
|
* as __u64.
|
|
*/
|
|
#include "bpf_helper_defs.h"
|
|
|
|
#define __uint(name, val) int (*name)[val]
|
|
#define __type(name, val) typeof(val) *name
|
|
#define __array(name, val) typeof(val) *name[]
|
|
|
|
/*
|
|
* Helper macro to place programs, maps, license in
|
|
* different sections in elf_bpf file. Section names
|
|
* are interpreted by libbpf depending on the context (BPF programs, BPF maps,
|
|
* extern variables, etc).
|
|
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
|
|
* make sure __attribute__((unused)) doesn't trigger compilation warning.
|
|
*/
|
|
#define SEC(name) \
|
|
_Pragma("GCC diagnostic push") \
|
|
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
|
|
__attribute__((section(name), used)) \
|
|
_Pragma("GCC diagnostic pop") \
|
|
|
|
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
|
|
#undef __always_inline
|
|
#define __always_inline inline __attribute__((always_inline))
|
|
|
|
#ifndef __noinline
|
|
#define __noinline __attribute__((noinline))
|
|
#endif
|
|
#ifndef __weak
|
|
#define __weak __attribute__((weak))
|
|
#endif
|
|
|
|
/*
|
|
* Use __hidden attribute to mark a non-static BPF subprogram effectively
|
|
* static for BPF verifier's verification algorithm purposes, allowing more
|
|
* extensive and permissive BPF verification process, taking into account
|
|
* subprogram's caller context.
|
|
*/
|
|
#define __hidden __attribute__((visibility("hidden")))
|
|
|
|
/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
|
|
* any system-level headers (such as stddef.h, linux/version.h, etc), and
|
|
* commonly-used macros like NULL and KERNEL_VERSION aren't available through
|
|
* vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
|
|
* them on their own. So as a convenience, provide such definitions here.
|
|
*/
|
|
#ifndef NULL
|
|
#define NULL ((void *)0)
|
|
#endif
|
|
|
|
#ifndef KERNEL_VERSION
|
|
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
|
|
#endif
|
|
|
|
/*
|
|
* Helper macros to manipulate data structures
|
|
*/
|
|
#ifndef offsetof
|
|
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
|
|
#endif
|
|
#ifndef container_of
|
|
#define container_of(ptr, type, member) \
|
|
({ \
|
|
void *__mptr = (void *)(ptr); \
|
|
((type *)(__mptr - offsetof(type, member))); \
|
|
})
|
|
#endif
|
|
|
|
/*
|
|
* Helper macro to throw a compilation error if __bpf_unreachable() gets
|
|
* built into the resulting code. This works given BPF back end does not
|
|
* implement __builtin_trap(). This is useful to assert that certain paths
|
|
* of the program code are never used and hence eliminated by the compiler.
|
|
*
|
|
* For example, consider a switch statement that covers known cases used by
|
|
* the program. __bpf_unreachable() can then reside in the default case. If
|
|
* the program gets extended such that a case is not covered in the switch
|
|
* statement, then it will throw a build error due to the default case not
|
|
* being compiled out.
|
|
*/
|
|
#ifndef __bpf_unreachable
|
|
# define __bpf_unreachable() __builtin_trap()
|
|
#endif
|
|
|
|
/*
|
|
* Helper function to perform a tail call with a constant/immediate map slot.
|
|
*/
|
|
#if __clang_major__ >= 8 && defined(__bpf__)
|
|
static __always_inline void
|
|
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
|
|
{
|
|
if (!__builtin_constant_p(slot))
|
|
__bpf_unreachable();
|
|
|
|
/*
|
|
* Provide a hard guarantee that LLVM won't optimize setting r2 (map
|
|
* pointer) and r3 (constant map index) from _different paths_ ending
|
|
* up at the _same_ call insn as otherwise we won't be able to use the
|
|
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
|
|
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
|
|
* tracking for prog array pokes") for details on verifier tracking.
|
|
*
|
|
* Note on clobber list: we need to stay in-line with BPF calling
|
|
* convention, so even if we don't end up using r0, r4, r5, we need
|
|
* to mark them as clobber so that LLVM doesn't end up using them
|
|
* before / after the call.
|
|
*/
|
|
asm volatile("r1 = %[ctx]\n\t"
|
|
"r2 = %[map]\n\t"
|
|
"r3 = %[slot]\n\t"
|
|
"call 12"
|
|
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
|
|
: "r0", "r1", "r2", "r3", "r4", "r5");
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Helper structure used by eBPF C program
|
|
* to describe BPF map attributes to libbpf loader
|
|
*/
|
|
struct bpf_map_def {
|
|
unsigned int type;
|
|
unsigned int key_size;
|
|
unsigned int value_size;
|
|
unsigned int max_entries;
|
|
unsigned int map_flags;
|
|
} __attribute__((deprecated("use BTF-defined maps in .maps section")));
|
|
|
|
enum libbpf_pin_type {
|
|
LIBBPF_PIN_NONE,
|
|
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
|
|
LIBBPF_PIN_BY_NAME,
|
|
};
|
|
|
|
enum libbpf_tristate {
|
|
TRI_NO = 0,
|
|
TRI_YES = 1,
|
|
TRI_MODULE = 2,
|
|
};
|
|
|
|
#define __kconfig __attribute__((section(".kconfig")))
|
|
#define __ksym __attribute__((section(".ksyms")))
|
|
|
|
#ifndef ___bpf_concat
|
|
#define ___bpf_concat(a, b) a ## b
|
|
#endif
|
|
#ifndef ___bpf_apply
|
|
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
|
|
#endif
|
|
#ifndef ___bpf_nth
|
|
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
|
|
#endif
|
|
#ifndef ___bpf_narg
|
|
#define ___bpf_narg(...) \
|
|
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
|
#endif
|
|
|
|
#define ___bpf_fill0(arr, p, x) do {} while (0)
|
|
#define ___bpf_fill1(arr, p, x) arr[p] = x
|
|
#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
|
|
#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
|
|
#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
|
|
#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
|
|
#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
|
|
#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
|
|
#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
|
|
#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
|
|
#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
|
|
#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
|
|
#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
|
|
#define ___bpf_fill(arr, args...) \
|
|
___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
|
|
|
|
/*
|
|
* BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
|
|
* in a structure.
|
|
*/
|
|
#define BPF_SEQ_PRINTF(seq, fmt, args...) \
|
|
({ \
|
|
static const char ___fmt[] = fmt; \
|
|
unsigned long long ___param[___bpf_narg(args)]; \
|
|
\
|
|
_Pragma("GCC diagnostic push") \
|
|
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
|
___bpf_fill(___param, args); \
|
|
_Pragma("GCC diagnostic pop") \
|
|
\
|
|
bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
|
|
___param, sizeof(___param)); \
|
|
})
|
|
|
|
/*
|
|
* BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
|
|
* an array of u64.
|
|
*/
|
|
#define BPF_SNPRINTF(out, out_size, fmt, args...) \
|
|
({ \
|
|
static const char ___fmt[] = fmt; \
|
|
unsigned long long ___param[___bpf_narg(args)]; \
|
|
\
|
|
_Pragma("GCC diagnostic push") \
|
|
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
|
___bpf_fill(___param, args); \
|
|
_Pragma("GCC diagnostic pop") \
|
|
\
|
|
bpf_snprintf(out, out_size, ___fmt, \
|
|
___param, sizeof(___param)); \
|
|
})
|
|
|
|
#ifdef BPF_NO_GLOBAL_DATA
|
|
#define BPF_PRINTK_FMT_MOD
|
|
#else
|
|
#define BPF_PRINTK_FMT_MOD static const
|
|
#endif
|
|
|
|
#define __bpf_printk(fmt, ...) \
|
|
({ \
|
|
BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
|
|
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
|
##__VA_ARGS__); \
|
|
})
|
|
|
|
/*
|
|
* __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
|
|
* instead of an array of u64.
|
|
*/
|
|
#define __bpf_vprintk(fmt, args...) \
|
|
({ \
|
|
static const char ___fmt[] = fmt; \
|
|
unsigned long long ___param[___bpf_narg(args)]; \
|
|
\
|
|
_Pragma("GCC diagnostic push") \
|
|
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
|
___bpf_fill(___param, args); \
|
|
_Pragma("GCC diagnostic pop") \
|
|
\
|
|
bpf_trace_vprintk(___fmt, sizeof(___fmt), \
|
|
___param, sizeof(___param)); \
|
|
})
|
|
|
|
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
|
|
* Otherwise use __bpf_vprintk
|
|
*/
|
|
#define ___bpf_pick_printk(...) \
|
|
___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
|
|
__bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
|
|
__bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
|
|
__bpf_printk /*1*/, __bpf_printk /*0*/)
|
|
|
|
/* Helper macro to print out debug messages */
|
|
#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
|
|
|
|
#endif
|