mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
Daniel Borkmann says: ==================== pull-request: bpf-next 2022-06-17 We've added 72 non-merge commits during the last 15 day(s) which contain a total of 92 files changed, 4582 insertions(+), 834 deletions(-). The main changes are: 1) Add 64 bit enum value support to BTF, from Yonghong Song. 2) Implement support for sleepable BPF uprobe programs, from Delyan Kratunov. 3) Add new BPF helpers to issue and check TCP SYN cookies without binding to a socket especially useful in synproxy scenarios, from Maxim Mikityanskiy. 4) Fix libbpf's internal USDT address translation logic for shared libraries as well as uprobe's symbol file offset calculation, from Andrii Nakryiko. 5) Extend libbpf to provide an API for textual representation of the various map/prog/attach/link types and use it in bpftool, from Daniel Müller. 6) Provide BTF line info for RV64 and RV32 JITs, and fix a put_user bug in the core seen in 32 bit when storing BPF function addresses, from Pu Lehui. 7) Fix libbpf's BTF pointer size guessing by adding a list of various aliases for 'long' types, from Douglas Raillard. 8) Fix bpftool to readd setting rlimit since probing for memcg-based accounting has been unreliable and caused a regression on COS, from Quentin Monnet. 9) Fix UAF in BPF cgroup's effective program computation triggered upon BPF link detachment, from Tadeusz Struk. 10) Fix bpftool build bootstrapping during cross compilation which was pointing to the wrong AR process, from Shahab Vahedi. 11) Fix logic bug in libbpf's is_pow_of_2 implementation, from Yuze Chi. 12) BPF hash map optimization to avoid grabbing spinlocks of all CPUs when there is no free element. Also add a benchmark as reproducer, from Feng Zhou. 13) Fix bpftool's codegen to bail out when there's no BTF, from Michael Mullin. 14) Various minor cleanup and improvements all over the place. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (72 commits) bpf: Fix bpf_skc_lookup comment wrt. return type bpf: Fix non-static bpf_func_proto struct definitions selftests/bpf: Don't force lld on non-x86 architectures selftests/bpf: Add selftests for raw syncookie helpers in TC mode bpf: Allow the new syncookie helpers to work with SKBs selftests/bpf: Add selftests for raw syncookie helpers bpf: Add helpers to issue and check SYN cookies in XDP bpf: Allow helpers to accept pointers with a fixed size bpf: Fix documentation of th_len in bpf_tcp_{gen,check}_syncookie selftests/bpf: add tests for sleepable (uk)probes libbpf: add support for sleepable uprobe programs bpf: allow sleepable uprobe programs to attach bpf: implement sleepable uprobes by chaining gps bpf: move bpf_prog to bpf.h libbpf: Fix internal USDT address translation logic for shared libraries samples/bpf: Check detach prog exist or not in xdp_fwd selftests/bpf: Avoid skipping certain subtests selftests/bpf: Fix test_varlen verification failure with latest llvm bpftool: Do not check return value from libbpf_set_strict_mode() Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK" ... ==================== Link: https://lore.kernel.org/r/20220617220836.7373-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9fb424c4c2
@ -74,7 +74,7 @@ sequentially and type id is assigned to each recognized type starting from id
|
||||
#define BTF_KIND_ARRAY 3 /* Array */
|
||||
#define BTF_KIND_STRUCT 4 /* Struct */
|
||||
#define BTF_KIND_UNION 5 /* Union */
|
||||
#define BTF_KIND_ENUM 6 /* Enumeration */
|
||||
#define BTF_KIND_ENUM 6 /* Enumeration up to 32-bit values */
|
||||
#define BTF_KIND_FWD 7 /* Forward */
|
||||
#define BTF_KIND_TYPEDEF 8 /* Typedef */
|
||||
#define BTF_KIND_VOLATILE 9 /* Volatile */
|
||||
@ -87,6 +87,7 @@ sequentially and type id is assigned to each recognized type starting from id
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
|
||||
#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
|
||||
#define BTF_KIND_ENUM64 19 /* Enumeration up to 64-bit values */
|
||||
|
||||
Note that the type section encodes debug info, not just pure types.
|
||||
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
|
||||
@ -101,10 +102,10 @@ Each type contains the following common data::
|
||||
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
||||
* bits 29-30: unused
|
||||
* bit 31: kind_flag, currently used by
|
||||
* struct, union and fwd
|
||||
* struct, union, fwd, enum and enum64.
|
||||
*/
|
||||
__u32 info;
|
||||
/* "size" is used by INT, ENUM, STRUCT and UNION.
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION and ENUM64.
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
@ -281,10 +282,10 @@ modes exist:
|
||||
|
||||
``struct btf_type`` encoding requirement:
|
||||
* ``name_off``: 0 or offset to a valid C identifier
|
||||
* ``info.kind_flag``: 0
|
||||
* ``info.kind_flag``: 0 for unsigned, 1 for signed
|
||||
* ``info.kind``: BTF_KIND_ENUM
|
||||
* ``info.vlen``: number of enum values
|
||||
* ``size``: 4
|
||||
* ``size``: 1/2/4/8
|
||||
|
||||
``btf_type`` is followed by ``info.vlen`` number of ``struct btf_enum``.::
|
||||
|
||||
@ -297,6 +298,10 @@ The ``btf_enum`` encoding:
|
||||
* ``name_off``: offset to a valid C identifier
|
||||
* ``val``: any value
|
||||
|
||||
If the original enum value is signed and the size is less than 4,
|
||||
that value will be sign extended into 4 bytes. If the size is 8,
|
||||
the value will be truncated into 4 bytes.
|
||||
|
||||
2.2.7 BTF_KIND_FWD
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -493,7 +498,7 @@ the attribute is applied to a ``struct``/``union`` member or
|
||||
a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a
|
||||
valid index (starting from 0) pointing to a member or an argument.
|
||||
|
||||
2.2.17 BTF_KIND_TYPE_TAG
|
||||
2.2.18 BTF_KIND_TYPE_TAG
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``struct btf_type`` encoding requirement:
|
||||
@ -516,6 +521,32 @@ type_tag, then zero or more const/volatile/restrict/typedef
|
||||
and finally the base type. The base type is one of
|
||||
int, ptr, array, struct, union, enum, func_proto and float types.
|
||||
|
||||
2.2.19 BTF_KIND_ENUM64
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``struct btf_type`` encoding requirement:
|
||||
* ``name_off``: 0 or offset to a valid C identifier
|
||||
* ``info.kind_flag``: 0 for unsigned, 1 for signed
|
||||
* ``info.kind``: BTF_KIND_ENUM64
|
||||
* ``info.vlen``: number of enum values
|
||||
* ``size``: 1/2/4/8
|
||||
|
||||
``btf_type`` is followed by ``info.vlen`` number of ``struct btf_enum64``.::
|
||||
|
||||
struct btf_enum64 {
|
||||
__u32 name_off;
|
||||
__u32 val_lo32;
|
||||
__u32 val_hi32;
|
||||
};
|
||||
|
||||
The ``btf_enum64`` encoding:
|
||||
* ``name_off``: offset to a valid C identifier
|
||||
* ``val_lo32``: lower 32-bit value for a 64-bit value
|
||||
* ``val_hi32``: high 32-bit value for a 64-bit value
|
||||
|
||||
If the original enum value is signed and the size is less than 8,
|
||||
that value will be sign extended into 8 bytes.
|
||||
|
||||
3. BTF Kernel API
|
||||
=================
|
||||
|
||||
|
@ -127,7 +127,7 @@ BPF_XOR | BPF_K | BPF_ALU64 means::
|
||||
Byte swap instructions
|
||||
----------------------
|
||||
|
||||
The byte swap instructions use an instruction class of ``BFP_ALU`` and a 4-bit
|
||||
The byte swap instructions use an instruction class of ``BPF_ALU`` and a 4-bit
|
||||
code field of ``BPF_END``.
|
||||
|
||||
The byte swap instructions operate on the destination register
|
||||
|
@ -712,22 +712,6 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
|
||||
}
|
||||
}
|
||||
|
||||
/* ALU operation (32 bit)
|
||||
* dst = dst (op) src
|
||||
*/
|
||||
static inline void emit_a32_alu_r(const s8 dst, const s8 src,
|
||||
struct jit_ctx *ctx, const bool is64,
|
||||
const bool hi, const u8 op) {
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
s8 rn, rd;
|
||||
|
||||
rn = arm_bpf_get_reg32(src, tmp[1], ctx);
|
||||
rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
|
||||
/* ALU operation */
|
||||
emit_alu_r(rd, rn, is64, hi, op, ctx);
|
||||
arm_bpf_put_reg32(dst, rd, ctx);
|
||||
}
|
||||
|
||||
/* ALU operation (64 bit) */
|
||||
static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
|
||||
const s8 src[], struct jit_ctx *ctx,
|
||||
|
@ -69,6 +69,7 @@ struct rv_jit_context {
|
||||
struct bpf_prog *prog;
|
||||
u16 *insns; /* RV insns */
|
||||
int ninsns;
|
||||
int body_len;
|
||||
int epilogue_offset;
|
||||
int *offset; /* BPF to RV */
|
||||
int nexentries;
|
||||
|
@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
unsigned int prog_size = 0, extable_size = 0;
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
int pass = 0, prev_ninsns = 0, i;
|
||||
int pass = 0, prev_ninsns = 0, prologue_len, i;
|
||||
struct rv_jit_data *jit_data;
|
||||
struct rv_jit_context *ctx;
|
||||
|
||||
@ -95,6 +95,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
ctx->body_len = ctx->ninsns;
|
||||
bpf_jit_build_prologue(ctx);
|
||||
ctx->epilogue_offset = ctx->ninsns;
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
@ -161,6 +162,11 @@ skip_init_ctx:
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(jit_data->header);
|
||||
prologue_len = ctx->epilogue_offset - ctx->body_len;
|
||||
for (i = 0; i < prog->len; i++)
|
||||
ctx->offset[i] = ninsns_rvoff(prologue_len +
|
||||
ctx->offset[i]);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx->offset);
|
||||
out_offset:
|
||||
kfree(ctx->offset);
|
||||
kfree(jit_data);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#define _LINUX_BPF_H 1
|
||||
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <uapi/linux/filter.h>
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/file.h>
|
||||
@ -22,8 +23,10 @@
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/bpfptr.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/rcupdate_trace.h>
|
||||
|
||||
struct bpf_verifier_env;
|
||||
struct bpf_verifier_log;
|
||||
@ -398,6 +401,9 @@ enum bpf_type_flag {
|
||||
/* DYNPTR points to a ringbuf record. */
|
||||
DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* Size is known at compile time. */
|
||||
MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_FLAG_MAX,
|
||||
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
|
||||
};
|
||||
@ -461,6 +467,8 @@ enum bpf_arg_type {
|
||||
* all bytes or clear them in error case.
|
||||
*/
|
||||
ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
|
||||
/* Pointer to valid memory of size known at compile time. */
|
||||
ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
|
||||
|
||||
/* This must be the last entry. Its purpose is to ensure the enum is
|
||||
* wide enough to hold the higher bits reserved for bpf_type_flag.
|
||||
@ -526,6 +534,14 @@ struct bpf_func_proto {
|
||||
u32 *arg5_btf_id;
|
||||
};
|
||||
u32 *arg_btf_id[5];
|
||||
struct {
|
||||
size_t arg1_size;
|
||||
size_t arg2_size;
|
||||
size_t arg3_size;
|
||||
size_t arg4_size;
|
||||
size_t arg5_size;
|
||||
};
|
||||
size_t arg_size[5];
|
||||
};
|
||||
int *ret_btf_id; /* return value btf_id */
|
||||
bool (*allowed)(const struct bpf_prog *prog);
|
||||
@ -1084,6 +1100,40 @@ struct bpf_prog_aux {
|
||||
};
|
||||
};
|
||||
|
||||
struct bpf_prog {
|
||||
u16 pages; /* Number of allocated pages */
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
jit_requested:1,/* archs need to JIT the prog */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
blinding_requested:1, /* needs constant blinding */
|
||||
blinded:1, /* Was blinded */
|
||||
is_func:1, /* program is a bpf function */
|
||||
kprobe_override:1, /* Do we override a kprobe? */
|
||||
has_callchain_buf:1, /* callchain buffer allocated? */
|
||||
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
|
||||
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
||||
call_get_func_ip:1, /* Do we call get_func_ip() */
|
||||
tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||
u32 len; /* Number of filter blocks */
|
||||
u32 jited_len; /* Size of jited insns in bytes */
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
struct bpf_prog_stats __percpu *stats;
|
||||
int __percpu *active;
|
||||
unsigned int (*bpf_func)(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||
/* Instructions for interpreter */
|
||||
union {
|
||||
DECLARE_FLEX_ARRAY(struct sock_filter, insns);
|
||||
DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
|
||||
};
|
||||
};
|
||||
|
||||
struct bpf_array_aux {
|
||||
/* Programs with direct jumps into programs part of this array. */
|
||||
struct list_head poke_progs;
|
||||
@ -1336,6 +1386,8 @@ extern struct bpf_empty_prog_array bpf_empty_prog_array;
|
||||
|
||||
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array *progs);
|
||||
/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
|
||||
void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array *progs);
|
||||
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
|
||||
@ -1427,6 +1479,55 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
|
||||
*
|
||||
* We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
|
||||
* overall. As a result, we must use the bpf_prog_array_free_sleepable
|
||||
* in order to use the tasks_trace rcu grace period.
|
||||
*
|
||||
* When a non-sleepable program is inside the array, we take the rcu read
|
||||
* section and disable preemption for that program alone, so it can access
|
||||
* rcu-protected dynamically sized maps.
|
||||
*/
|
||||
static __always_inline u32
|
||||
bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
const struct bpf_prog_array *array;
|
||||
struct bpf_run_ctx *old_run_ctx;
|
||||
struct bpf_trace_run_ctx run_ctx;
|
||||
u32 ret = 1;
|
||||
|
||||
might_fault();
|
||||
|
||||
rcu_read_lock_trace();
|
||||
migrate_disable();
|
||||
|
||||
array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
|
||||
if (unlikely(!array))
|
||||
goto out;
|
||||
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
item = &array->items[0];
|
||||
while ((prog = READ_ONCE(item->prog))) {
|
||||
if (!prog->aux->sleepable)
|
||||
rcu_read_lock();
|
||||
|
||||
run_ctx.bpf_cookie = item->bpf_cookie;
|
||||
ret &= run_prog(prog, ctx);
|
||||
item++;
|
||||
|
||||
if (!prog->aux->sleepable)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
out:
|
||||
migrate_enable();
|
||||
rcu_read_unlock_trace();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
DECLARE_PER_CPU(int, bpf_prog_active);
|
||||
extern struct mutex bpf_stats_enabled_mutex;
|
||||
@ -2104,6 +2205,7 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
void sock_map_unhash(struct sock *sk);
|
||||
void sock_map_destroy(struct sock *sk);
|
||||
void sock_map_close(struct sock *sk, long timeout);
|
||||
#else
|
||||
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
|
||||
@ -2261,12 +2363,9 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
|
||||
extern const struct bpf_func_proto bpf_find_vma_proto;
|
||||
extern const struct bpf_func_proto bpf_loop_proto;
|
||||
extern const struct bpf_func_proto bpf_strncmp_proto;
|
||||
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
|
||||
extern const struct bpf_func_proto bpf_kptr_xchg_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
@ -299,7 +299,7 @@ struct bpf_verifier_state {
|
||||
* If is_state_visited() sees a state with branches > 0 it means
|
||||
* there is a loop. If such state is exactly equal to the current state
|
||||
* it's an infinite loop. Note states_equal() checks for states
|
||||
* equvalency, so two states being 'states_equal' does not mean
|
||||
* equivalency, so two states being 'states_equal' does not mean
|
||||
* infinite loop. The exact comparison is provided by
|
||||
* states_maybe_looping() function. It's a stronger pre-check and
|
||||
* much faster than states_equal().
|
||||
|
@ -177,6 +177,19 @@ static inline bool btf_type_is_enum(const struct btf_type *t)
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
|
||||
}
|
||||
|
||||
static inline bool btf_is_any_enum(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM ||
|
||||
BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64;
|
||||
}
|
||||
|
||||
static inline bool btf_kind_core_compat(const struct btf_type *t1,
|
||||
const struct btf_type *t2)
|
||||
{
|
||||
return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) ||
|
||||
(btf_is_any_enum(t1) && btf_is_any_enum(t2));
|
||||
}
|
||||
|
||||
static inline bool str_is_empty(const char *s)
|
||||
{
|
||||
return !s || !s[0];
|
||||
@ -192,6 +205,16 @@ static inline bool btf_is_enum(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_ENUM;
|
||||
}
|
||||
|
||||
static inline bool btf_is_enum64(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_ENUM64;
|
||||
}
|
||||
|
||||
static inline u64 btf_enum64_value(const struct btf_enum64 *e)
|
||||
{
|
||||
return ((u64)e->val_hi32 << 32) | e->val_lo32;
|
||||
}
|
||||
|
||||
static inline bool btf_is_composite(const struct btf_type *t)
|
||||
{
|
||||
u16 kind = btf_kind(t);
|
||||
@ -332,6 +355,11 @@ static inline struct btf_enum *btf_enum(const struct btf_type *t)
|
||||
return (struct btf_enum *)(t + 1);
|
||||
}
|
||||
|
||||
static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_enum64 *)(t + 1);
|
||||
}
|
||||
|
||||
static inline const struct btf_var_secinfo *btf_type_var_secinfo(
|
||||
const struct btf_type *t)
|
||||
{
|
||||
|
@ -559,40 +559,6 @@ struct bpf_prog_stats {
|
||||
struct u64_stats_sync syncp;
|
||||
} __aligned(2 * sizeof(u64));
|
||||
|
||||
struct bpf_prog {
|
||||
u16 pages; /* Number of allocated pages */
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
jit_requested:1,/* archs need to JIT the prog */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
blinding_requested:1, /* needs constant blinding */
|
||||
blinded:1, /* Was blinded */
|
||||
is_func:1, /* program is a bpf function */
|
||||
kprobe_override:1, /* Do we override a kprobe? */
|
||||
has_callchain_buf:1, /* callchain buffer allocated? */
|
||||
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
|
||||
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
||||
call_get_func_ip:1, /* Do we call get_func_ip() */
|
||||
tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||
u32 len; /* Number of filter blocks */
|
||||
u32 jited_len; /* Size of jited insns in bytes */
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
struct bpf_prog_stats __percpu *stats;
|
||||
int __percpu *active;
|
||||
unsigned int (*bpf_func)(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||
/* Instructions for interpreter */
|
||||
union {
|
||||
DECLARE_FLEX_ARRAY(struct sock_filter, insns);
|
||||
DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
|
||||
};
|
||||
};
|
||||
|
||||
struct sk_filter {
|
||||
refcount_t refcnt;
|
||||
struct rcu_head rcu;
|
||||
|
@ -95,6 +95,7 @@ struct sk_psock {
|
||||
spinlock_t link_lock;
|
||||
refcount_t refcnt;
|
||||
void (*saved_unhash)(struct sock *sk);
|
||||
void (*saved_destroy)(struct sock *sk);
|
||||
void (*saved_close)(struct sock *sk, long timeout);
|
||||
void (*saved_write_space)(struct sock *sk);
|
||||
void (*saved_data_ready)(struct sock *sk);
|
||||
|
@ -434,6 +434,7 @@ u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
|
||||
struct tcphdr *th, u32 *cookie);
|
||||
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
|
||||
struct tcphdr *th, u32 *cookie);
|
||||
u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
|
||||
u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
|
||||
const struct tcp_request_sock_ops *af_ops,
|
||||
struct sock *sk, struct tcphdr *th);
|
||||
|
@ -3597,10 +3597,11 @@ union bpf_attr {
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains **sizeof**\ (**struct tcphdr**).
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
|
||||
* error otherwise.
|
||||
@ -3783,10 +3784,11 @@ union bpf_attr {
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header.
|
||||
* contains the length of the TCP header with options (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
@ -5249,6 +5251,80 @@ union bpf_attr {
|
||||
* Pointer to the underlying dynptr data, NULL if the dynptr is
|
||||
* read-only, if the dynptr is invalid, or if the offset and length
|
||||
* is out of bounds.
|
||||
*
|
||||
* s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
|
||||
* Description
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv4/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
|
||||
* Description
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv6/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*
|
||||
* long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
|
||||
* Description
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
|
||||
* Description
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5455,6 +5531,10 @@ union bpf_attr {
|
||||
FN(dynptr_read), \
|
||||
FN(dynptr_write), \
|
||||
FN(dynptr_data), \
|
||||
FN(tcp_raw_gen_syncookie_ipv4), \
|
||||
FN(tcp_raw_gen_syncookie_ipv6), \
|
||||
FN(tcp_raw_check_syncookie_ipv4), \
|
||||
FN(tcp_raw_check_syncookie_ipv6), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
@ -36,10 +36,10 @@ struct btf_type {
|
||||
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
||||
* bits 29-30: unused
|
||||
* bit 31: kind_flag, currently used by
|
||||
* struct, union and fwd
|
||||
* struct, union, enum, fwd and enum64
|
||||
*/
|
||||
__u32 info;
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
@ -63,7 +63,7 @@ enum {
|
||||
BTF_KIND_ARRAY = 3, /* Array */
|
||||
BTF_KIND_STRUCT = 4, /* Struct */
|
||||
BTF_KIND_UNION = 5, /* Union */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
|
||||
BTF_KIND_FWD = 7, /* Forward */
|
||||
BTF_KIND_TYPEDEF = 8, /* Typedef */
|
||||
BTF_KIND_VOLATILE = 9, /* Volatile */
|
||||
@ -76,6 +76,7 @@ enum {
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
|
||||
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
|
||||
BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
@ -186,4 +187,14 @@ struct btf_decl_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
|
||||
* The exact number of btf_enum64 is stored in the vlen (of the
|
||||
* info in "struct btf_type").
|
||||
*/
|
||||
struct btf_enum64 {
|
||||
__u32 name_off;
|
||||
__u32 val_lo32;
|
||||
__u32 val_hi32;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BTF_H__ */
|
||||
|
140
kernel/bpf/btf.c
140
kernel/bpf/btf.c
@ -309,6 +309,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_DECL_TAG] = "DECL_TAG",
|
||||
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
|
||||
[BTF_KIND_ENUM64] = "ENUM64",
|
||||
};
|
||||
|
||||
const char *btf_type_str(const struct btf_type *t)
|
||||
@ -666,6 +667,7 @@ static bool btf_type_has_size(const struct btf_type *t)
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM64:
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -711,6 +713,11 @@ static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
|
||||
return (const struct btf_decl_tag *)(t + 1);
|
||||
}
|
||||
|
||||
static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
|
||||
{
|
||||
return (const struct btf_enum64 *)(t + 1);
|
||||
}
|
||||
|
||||
static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
|
||||
{
|
||||
return kind_ops[BTF_INFO_KIND(t->info)];
|
||||
@ -1019,6 +1026,7 @@ static const char *btf_show_name(struct btf_show *show)
|
||||
parens = "{";
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
prefix = "enum";
|
||||
break;
|
||||
default:
|
||||
@ -1834,6 +1842,7 @@ __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM64:
|
||||
size = type->size;
|
||||
goto resolved;
|
||||
|
||||
@ -3670,6 +3679,7 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
|
||||
{
|
||||
const struct btf_enum *enums = btf_type_enum(t);
|
||||
struct btf *btf = env->btf;
|
||||
const char *fmt_str;
|
||||
u16 i, nr_enums;
|
||||
u32 meta_needed;
|
||||
|
||||
@ -3683,11 +3693,6 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (btf_type_kflag(t)) {
|
||||
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (t->size > 8 || !is_power_of_2(t->size)) {
|
||||
btf_verifier_log_type(env, t, "Unexpected size");
|
||||
return -EINVAL;
|
||||
@ -3718,7 +3723,8 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
|
||||
|
||||
if (env->log.level == BPF_LOG_KERNEL)
|
||||
continue;
|
||||
btf_verifier_log(env, "\t%s val=%d\n",
|
||||
fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
|
||||
btf_verifier_log(env, fmt_str,
|
||||
__btf_name_by_offset(btf, enums[i].name_off),
|
||||
enums[i].val);
|
||||
}
|
||||
@ -3759,7 +3765,10 @@ static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
|
||||
return;
|
||||
}
|
||||
|
||||
if (btf_type_kflag(t))
|
||||
btf_show_type_value(show, "%d", v);
|
||||
else
|
||||
btf_show_type_value(show, "%u", v);
|
||||
btf_show_end_type(show);
|
||||
}
|
||||
|
||||
@ -3772,6 +3781,109 @@ static struct btf_kind_operations enum_ops = {
|
||||
.show = btf_enum_show,
|
||||
};
|
||||
|
||||
static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
|
||||
const struct btf_type *t,
|
||||
u32 meta_left)
|
||||
{
|
||||
const struct btf_enum64 *enums = btf_type_enum64(t);
|
||||
struct btf *btf = env->btf;
|
||||
const char *fmt_str;
|
||||
u16 i, nr_enums;
|
||||
u32 meta_needed;
|
||||
|
||||
nr_enums = btf_type_vlen(t);
|
||||
meta_needed = nr_enums * sizeof(*enums);
|
||||
|
||||
if (meta_left < meta_needed) {
|
||||
btf_verifier_log_basic(env, t,
|
||||
"meta_left:%u meta_needed:%u",
|
||||
meta_left, meta_needed);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (t->size > 8 || !is_power_of_2(t->size)) {
|
||||
btf_verifier_log_type(env, t, "Unexpected size");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* enum type either no name or a valid one */
|
||||
if (t->name_off &&
|
||||
!btf_name_valid_identifier(env->btf, t->name_off)) {
|
||||
btf_verifier_log_type(env, t, "Invalid name");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
btf_verifier_log_type(env, t, NULL);
|
||||
|
||||
for (i = 0; i < nr_enums; i++) {
|
||||
if (!btf_name_offset_valid(btf, enums[i].name_off)) {
|
||||
btf_verifier_log(env, "\tInvalid name_offset:%u",
|
||||
enums[i].name_off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* enum member must have a valid name */
|
||||
if (!enums[i].name_off ||
|
||||
!btf_name_valid_identifier(btf, enums[i].name_off)) {
|
||||
btf_verifier_log_type(env, t, "Invalid name");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (env->log.level == BPF_LOG_KERNEL)
|
||||
continue;
|
||||
|
||||
fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
|
||||
btf_verifier_log(env, fmt_str,
|
||||
__btf_name_by_offset(btf, enums[i].name_off),
|
||||
btf_enum64_value(enums + i));
|
||||
}
|
||||
|
||||
return meta_needed;
|
||||
}
|
||||
|
||||
static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
|
||||
u32 type_id, void *data, u8 bits_offset,
|
||||
struct btf_show *show)
|
||||
{
|
||||
const struct btf_enum64 *enums = btf_type_enum64(t);
|
||||
u32 i, nr_enums = btf_type_vlen(t);
|
||||
void *safe_data;
|
||||
s64 v;
|
||||
|
||||
safe_data = btf_show_start_type(show, t, type_id, data);
|
||||
if (!safe_data)
|
||||
return;
|
||||
|
||||
v = *(u64 *)safe_data;
|
||||
|
||||
for (i = 0; i < nr_enums; i++) {
|
||||
if (v != btf_enum64_value(enums + i))
|
||||
continue;
|
||||
|
||||
btf_show_type_value(show, "%s",
|
||||
__btf_name_by_offset(btf,
|
||||
enums[i].name_off));
|
||||
|
||||
btf_show_end_type(show);
|
||||
return;
|
||||
}
|
||||
|
||||
if (btf_type_kflag(t))
|
||||
btf_show_type_value(show, "%lld", v);
|
||||
else
|
||||
btf_show_type_value(show, "%llu", v);
|
||||
btf_show_end_type(show);
|
||||
}
|
||||
|
||||
static struct btf_kind_operations enum64_ops = {
|
||||
.check_meta = btf_enum64_check_meta,
|
||||
.resolve = btf_df_resolve,
|
||||
.check_member = btf_enum_check_member,
|
||||
.check_kflag_member = btf_enum_check_kflag_member,
|
||||
.log_details = btf_enum_log,
|
||||
.show = btf_enum64_show,
|
||||
};
|
||||
|
||||
static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
|
||||
const struct btf_type *t,
|
||||
u32 meta_left)
|
||||
@ -4438,6 +4550,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_FLOAT] = &float_ops,
|
||||
[BTF_KIND_DECL_TAG] = &decl_tag_ops,
|
||||
[BTF_KIND_TYPE_TAG] = &modifier_ops,
|
||||
[BTF_KIND_ENUM64] = &enum64_ops,
|
||||
};
|
||||
|
||||
static s32 btf_check_meta(struct btf_verifier_env *env,
|
||||
@ -5299,7 +5412,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
||||
/* skip modifiers */
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (btf_type_is_small_int(t) || btf_type_is_enum(t))
|
||||
if (btf_type_is_small_int(t) || btf_is_any_enum(t))
|
||||
/* accessing a scalar */
|
||||
return true;
|
||||
if (!btf_type_is_ptr(t)) {
|
||||
@ -5763,7 +5876,7 @@ static int __get_type_size(struct btf *btf, u32 btf_id,
|
||||
if (btf_type_is_ptr(t))
|
||||
/* kernel size of pointer. Not BPF's size of pointer*/
|
||||
return sizeof(void *);
|
||||
if (btf_type_is_int(t) || btf_type_is_enum(t))
|
||||
if (btf_type_is_int(t) || btf_is_any_enum(t))
|
||||
return t->size;
|
||||
*bad_type = t;
|
||||
return -EINVAL;
|
||||
@ -5911,7 +6024,7 @@ static int btf_check_func_type_match(struct bpf_verifier_log *log,
|
||||
* to context only. And only global functions can be replaced.
|
||||
* Hence type check only those types.
|
||||
*/
|
||||
if (btf_type_is_int(t1) || btf_type_is_enum(t1))
|
||||
if (btf_type_is_int(t1) || btf_is_any_enum(t1))
|
||||
continue;
|
||||
if (!btf_type_is_ptr(t1)) {
|
||||
bpf_log(log,
|
||||
@ -6409,7 +6522,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
|
||||
if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
|
||||
bpf_log(log,
|
||||
"Global function %s() doesn't return scalar. Only those are supported.\n",
|
||||
tname);
|
||||
@ -6424,7 +6537,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
t = btf_type_by_id(btf, args[i].type);
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (btf_type_is_int(t) || btf_type_is_enum(t)) {
|
||||
if (btf_type_is_int(t) || btf_is_any_enum(t)) {
|
||||
reg->type = SCALAR_VALUE;
|
||||
continue;
|
||||
}
|
||||
@ -7336,6 +7449,7 @@ recur:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 1;
|
||||
case BTF_KIND_INT:
|
||||
/* just reject deprecated bitfield-like integers; all other
|
||||
@ -7388,10 +7502,10 @@ recur:
|
||||
* field-based relocations. This function assumes that root types were already
|
||||
* checked for name match. Beyond that initial root-level name check, names
|
||||
* are completely ignored. Compatibility rules are as follows:
|
||||
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
|
||||
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
|
||||
* kind should match for local and target types (i.e., STRUCT is not
|
||||
* compatible with UNION);
|
||||
* - for ENUMs, the size is ignored;
|
||||
* - for ENUMs/ENUM64s, the size is ignored;
|
||||
* - for INT, size and signedness are ignored;
|
||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||
* compatibility recursively;
|
||||
|
@ -720,6 +720,60 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
/**
|
||||
* purge_effective_progs() - After compute_effective_progs fails to alloc new
|
||||
* cgrp->bpf.inactive table we can recover by
|
||||
* recomputing the array in place.
|
||||
*
|
||||
* @cgrp: The cgroup which descendants to travers
|
||||
* @prog: A program to detach or NULL
|
||||
* @link: A link to detach or NULL
|
||||
* @atype: Type of detach operation
|
||||
*/
|
||||
static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
struct bpf_cgroup_link *link,
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_array *progs;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *head;
|
||||
struct cgroup *cg;
|
||||
int pos;
|
||||
|
||||
/* recompute effective prog array in place */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
if (percpu_ref_is_zero(&desc->bpf.refcnt))
|
||||
continue;
|
||||
|
||||
/* find position of link or prog in effective progs array */
|
||||
for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
|
||||
if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
|
||||
continue;
|
||||
|
||||
head = &cg->bpf.progs[atype];
|
||||
list_for_each_entry(pl, head, node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
if (pl->prog == prog && pl->link == link)
|
||||
goto found;
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
found:
|
||||
BUG_ON(!cg);
|
||||
progs = rcu_dereference_protected(
|
||||
desc->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
|
||||
/* Remove the program from the array */
|
||||
WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
|
||||
"Failed to purge a prog from array at index %d", pos);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
|
||||
* propagate the change to descendants
|
||||
@ -739,7 +793,6 @@ static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
if (atype < 0)
|
||||
@ -761,9 +814,12 @@ static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
pl->prog = NULL;
|
||||
pl->link = NULL;
|
||||
|
||||
err = update_effective_progs(cgrp, atype);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
if (update_effective_progs(cgrp, atype)) {
|
||||
/* if update effective array failed replace the prog with a dummy prog*/
|
||||
pl->prog = old_prog;
|
||||
pl->link = link;
|
||||
purge_effective_progs(cgrp, old_prog, link, atype);
|
||||
}
|
||||
|
||||
/* now can actually delete it from this cgroup list */
|
||||
list_del(&pl->node);
|
||||
@ -775,12 +831,6 @@ static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
bpf_prog_put(old_prog);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[atype]);
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/* restore back prog or link */
|
||||
pl->prog = old_prog;
|
||||
pl->link = link;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
|
@ -176,7 +176,7 @@ void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
|
||||
* here is relative to the prog itself instead of the main prog.
|
||||
* This array has one entry for each xlated bpf insn.
|
||||
*
|
||||
* jited_off is the byte off to the last byte of the jited insn.
|
||||
* jited_off is the byte off to the end of the jited insn.
|
||||
*
|
||||
* Hence, with
|
||||
* insn_start:
|
||||
@ -2279,6 +2279,21 @@ void bpf_prog_array_free(struct bpf_prog_array *progs)
|
||||
kfree_rcu(progs, rcu);
|
||||
}
|
||||
|
||||
static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
|
||||
{
|
||||
struct bpf_prog_array *progs;
|
||||
|
||||
progs = container_of(rcu, struct bpf_prog_array, rcu);
|
||||
kfree_rcu(progs, rcu);
|
||||
}
|
||||
|
||||
void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
|
||||
{
|
||||
if (!progs || progs == &bpf_empty_prog_array.hdr)
|
||||
return;
|
||||
call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
|
||||
}
|
||||
|
||||
int bpf_prog_array_length(struct bpf_prog_array *array)
|
||||
{
|
||||
struct bpf_prog_array_item *item;
|
||||
|
@ -584,7 +584,7 @@ BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
|
||||
return strncmp(s1, s2, s1_sz);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_strncmp_proto = {
|
||||
static const struct bpf_func_proto bpf_strncmp_proto = {
|
||||
.func = bpf_strncmp,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@ -1402,7 +1402,7 @@ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
|
||||
*/
|
||||
#define BPF_PTR_POISON ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
|
||||
|
||||
const struct bpf_func_proto bpf_kptr_xchg_proto = {
|
||||
static const struct bpf_func_proto bpf_kptr_xchg_proto = {
|
||||
.func = bpf_kptr_xchg,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
@ -1487,7 +1487,7 @@ error:
|
||||
return err;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
|
||||
static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
|
||||
.func = bpf_dynptr_from_mem,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@ -1513,7 +1513,7 @@ BPF_CALL_4(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_dynptr_read_proto = {
|
||||
static const struct bpf_func_proto bpf_dynptr_read_proto = {
|
||||
.func = bpf_dynptr_read,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@ -1539,7 +1539,7 @@ BPF_CALL_4(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *,
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_dynptr_write_proto = {
|
||||
static const struct bpf_func_proto bpf_dynptr_write_proto = {
|
||||
.func = bpf_dynptr_write,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@ -1566,7 +1566,7 @@ BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len
|
||||
return (unsigned long)(ptr->data + ptr->offset + offset);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_dynptr_data_proto = {
|
||||
static const struct bpf_func_proto bpf_dynptr_data_proto = {
|
||||
.func = bpf_dynptr_data,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
|
||||
|
@ -31,7 +31,7 @@ static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
node->next = head->first;
|
||||
head->first = node;
|
||||
WRITE_ONCE(head->first, node);
|
||||
}
|
||||
|
||||
static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
|
||||
@ -130,14 +130,17 @@ static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
orig_cpu = cpu = raw_smp_processor_id();
|
||||
while (1) {
|
||||
head = per_cpu_ptr(s->freelist, cpu);
|
||||
if (!READ_ONCE(head->first))
|
||||
goto next_cpu;
|
||||
raw_spin_lock(&head->lock);
|
||||
node = head->first;
|
||||
if (node) {
|
||||
head->first = node->next;
|
||||
WRITE_ONCE(head->first, node->next);
|
||||
raw_spin_unlock(&head->lock);
|
||||
return node;
|
||||
}
|
||||
raw_spin_unlock(&head->lock);
|
||||
next_cpu:
|
||||
cpu = cpumask_next(cpu, cpu_possible_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = 0;
|
||||
@ -146,10 +149,12 @@ static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
}
|
||||
|
||||
/* per cpu lists are all empty, try extralist */
|
||||
if (!READ_ONCE(s->extralist.first))
|
||||
return NULL;
|
||||
raw_spin_lock(&s->extralist.lock);
|
||||
node = s->extralist.first;
|
||||
if (node)
|
||||
s->extralist.first = node->next;
|
||||
WRITE_ONCE(s->extralist.first, node->next);
|
||||
raw_spin_unlock(&s->extralist.lock);
|
||||
return node;
|
||||
}
|
||||
@ -164,15 +169,18 @@ ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
|
||||
orig_cpu = cpu = raw_smp_processor_id();
|
||||
while (1) {
|
||||
head = per_cpu_ptr(s->freelist, cpu);
|
||||
if (!READ_ONCE(head->first))
|
||||
goto next_cpu;
|
||||
if (raw_spin_trylock(&head->lock)) {
|
||||
node = head->first;
|
||||
if (node) {
|
||||
head->first = node->next;
|
||||
WRITE_ONCE(head->first, node->next);
|
||||
raw_spin_unlock(&head->lock);
|
||||
return node;
|
||||
}
|
||||
raw_spin_unlock(&head->lock);
|
||||
}
|
||||
next_cpu:
|
||||
cpu = cpumask_next(cpu, cpu_possible_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = 0;
|
||||
@ -181,11 +189,11 @@ ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
|
||||
}
|
||||
|
||||
/* cannot pop from per cpu lists, try extralist */
|
||||
if (!raw_spin_trylock(&s->extralist.lock))
|
||||
if (!READ_ONCE(s->extralist.first) || !raw_spin_trylock(&s->extralist.lock))
|
||||
return NULL;
|
||||
node = s->extralist.first;
|
||||
if (node)
|
||||
s->extralist.first = node->next;
|
||||
WRITE_ONCE(s->extralist.first, node->next);
|
||||
raw_spin_unlock(&s->extralist.lock);
|
||||
return node;
|
||||
}
|
||||
|
@ -4090,14 +4090,15 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
||||
info.nr_jited_line_info = 0;
|
||||
if (info.nr_jited_line_info && ulen) {
|
||||
if (bpf_dump_raw_ok(file->f_cred)) {
|
||||
unsigned long line_addr;
|
||||
__u64 __user *user_linfo;
|
||||
u32 i;
|
||||
|
||||
user_linfo = u64_to_user_ptr(info.jited_line_info);
|
||||
ulen = min_t(u32, info.nr_jited_line_info, ulen);
|
||||
for (i = 0; i < ulen; i++) {
|
||||
if (put_user((__u64)(long)prog->aux->jited_linfo[i],
|
||||
&user_linfo[i]))
|
||||
line_addr = (unsigned long)prog->aux->jited_linfo[i];
|
||||
if (put_user((__u64)line_addr, &user_linfo[i]))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
@ -5130,7 +5131,7 @@ BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flag
|
||||
return *res ? 0 : -ENOENT;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
|
||||
static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
|
||||
.func = bpf_kallsyms_lookup_name,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
|
@ -5848,6 +5848,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
enum bpf_arg_type arg_type = fn->arg_type[arg];
|
||||
enum bpf_reg_type type = reg->type;
|
||||
u32 *arg_btf_id = NULL;
|
||||
int err = 0;
|
||||
|
||||
if (arg_type == ARG_DONTCARE)
|
||||
@ -5884,7 +5885,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
*/
|
||||
goto skip_type_check;
|
||||
|
||||
err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta);
|
||||
/* arg_btf_id and arg_size are in a union. */
|
||||
if (base_type(arg_type) == ARG_PTR_TO_BTF_ID)
|
||||
arg_btf_id = fn->arg_btf_id[arg];
|
||||
|
||||
err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -6011,6 +6016,11 @@ skip_type_check:
|
||||
* next is_mem_size argument below.
|
||||
*/
|
||||
meta->raw_mode = arg_type & MEM_UNINIT;
|
||||
if (arg_type & MEM_FIXED_SIZE) {
|
||||
err = check_helper_mem_access(env, regno,
|
||||
fn->arg_size[arg], false,
|
||||
meta);
|
||||
}
|
||||
} else if (arg_type_is_mem_size(arg_type)) {
|
||||
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
|
||||
|
||||
@ -6400,11 +6410,19 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
|
||||
return count <= 1;
|
||||
}
|
||||
|
||||
static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
|
||||
enum bpf_arg_type arg_next)
|
||||
static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
|
||||
{
|
||||
return (base_type(arg_curr) == ARG_PTR_TO_MEM) !=
|
||||
arg_type_is_mem_size(arg_next);
|
||||
bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
|
||||
bool has_size = fn->arg_size[arg] != 0;
|
||||
bool is_next_size = false;
|
||||
|
||||
if (arg + 1 < ARRAY_SIZE(fn->arg_type))
|
||||
is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
|
||||
|
||||
if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
|
||||
return is_next_size;
|
||||
|
||||
return has_size == is_next_size || is_next_size == is_fixed;
|
||||
}
|
||||
|
||||
static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
|
||||
@ -6415,11 +6433,11 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
|
||||
* helper function specification.
|
||||
*/
|
||||
if (arg_type_is_mem_size(fn->arg1_type) ||
|
||||
base_type(fn->arg5_type) == ARG_PTR_TO_MEM ||
|
||||
check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
|
||||
check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
|
||||
check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
|
||||
check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
|
||||
check_args_pair_invalid(fn, 0) ||
|
||||
check_args_pair_invalid(fn, 1) ||
|
||||
check_args_pair_invalid(fn, 2) ||
|
||||
check_args_pair_invalid(fn, 3) ||
|
||||
check_args_pair_invalid(fn, 4))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -6460,7 +6478,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
|
||||
if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
|
||||
return false;
|
||||
|
||||
if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
|
||||
if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
|
||||
/* arg_btf_id and arg_size are in a union. */
|
||||
(base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
|
||||
!(fn->arg_type[i] & MEM_FIXED_SIZE)))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -10901,7 +10922,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
|
||||
goto err_free;
|
||||
ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
|
||||
scalar_return =
|
||||
btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
|
||||
btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
|
||||
if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
|
||||
verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
|
||||
goto err_free;
|
||||
@ -14829,8 +14850,8 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
}
|
||||
|
||||
if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
|
||||
prog->type != BPF_PROG_TYPE_LSM) {
|
||||
verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
|
||||
prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
|
||||
verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -10068,26 +10068,30 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
|
||||
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
|
||||
u64 bpf_cookie)
|
||||
{
|
||||
bool is_kprobe, is_tracepoint, is_syscall_tp;
|
||||
bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
|
||||
|
||||
if (!perf_event_is_tracing(event))
|
||||
return perf_event_set_bpf_handler(event, prog, bpf_cookie);
|
||||
|
||||
is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
|
||||
is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
|
||||
is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
|
||||
is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
|
||||
is_syscall_tp = is_syscall_trace_event(event->tp_event);
|
||||
if (!is_kprobe && !is_tracepoint && !is_syscall_tp)
|
||||
if (!is_kprobe && !is_uprobe && !is_tracepoint && !is_syscall_tp)
|
||||
/* bpf programs can only be attached to u/kprobe or tracepoint */
|
||||
return -EINVAL;
|
||||
|
||||
if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
|
||||
if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) ||
|
||||
(is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
|
||||
(is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
|
||||
return -EINVAL;
|
||||
|
||||
if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
|
||||
/* only uprobe programs are allowed to be sleepable */
|
||||
return -EINVAL;
|
||||
|
||||
/* Kprobe override only works for kprobes, not uprobes. */
|
||||
if (prog->kprobe_override &&
|
||||
!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE))
|
||||
if (prog->kprobe_override && !is_kprobe)
|
||||
return -EINVAL;
|
||||
|
||||
if (is_tracepoint || is_syscall_tp) {
|
||||
|
@ -1936,7 +1936,7 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
|
||||
event->prog = prog;
|
||||
event->bpf_cookie = bpf_cookie;
|
||||
rcu_assign_pointer(event->tp_event->prog_array, new_array);
|
||||
bpf_prog_array_free(old_array);
|
||||
bpf_prog_array_free_sleepable(old_array);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
@ -1962,7 +1962,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
|
||||
bpf_prog_array_delete_safe(old_array, event->prog);
|
||||
} else {
|
||||
rcu_assign_pointer(event->tp_event->prog_array, new_array);
|
||||
bpf_prog_array_free(old_array);
|
||||
bpf_prog_array_free_sleepable(old_array);
|
||||
}
|
||||
|
||||
bpf_prog_put(event->prog);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/namei.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/filter.h>
|
||||
|
||||
#include "trace_dynevent.h"
|
||||
#include "trace_probe.h"
|
||||
@ -1346,9 +1347,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
||||
if (bpf_prog_array_valid(call)) {
|
||||
u32 ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = trace_call_bpf(call, regs);
|
||||
preempt_enable();
|
||||
ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run);
|
||||
if (!ret)
|
||||
return;
|
||||
}
|
||||
|
@ -1420,9 +1420,6 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
|
||||
void *data;
|
||||
int ret;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
|
||||
return -EINVAL;
|
||||
|
||||
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1487,9 +1484,6 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
|
||||
u32 retval, duration;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
|
||||
return -EINVAL;
|
||||
|
||||
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -6463,8 +6463,6 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
|
||||
|
||||
/* bpf_skc_lookup performs the core lookup for different types of sockets,
|
||||
* taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
|
||||
* Returns the socket as an 'unsigned long' to simplify the casting in the
|
||||
* callers to satisfy BPF_CALL declarations.
|
||||
*/
|
||||
static struct sock *
|
||||
__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
|
||||
@ -7444,6 +7442,114 @@ static const struct bpf_func_proto bpf_skb_set_tstamp_proto = {
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv4, struct iphdr *, iph,
|
||||
struct tcphdr *, th, u32, th_len)
|
||||
{
|
||||
u32 cookie;
|
||||
u16 mss;
|
||||
|
||||
if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
|
||||
return -EINVAL;
|
||||
|
||||
mss = tcp_parse_mss_option(th, 0) ?: TCP_MSS_DEFAULT;
|
||||
cookie = __cookie_v4_init_sequence(iph, th, &mss);
|
||||
|
||||
return cookie | ((u64)mss << 32);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv4_proto = {
|
||||
.func = bpf_tcp_raw_gen_syncookie_ipv4,
|
||||
.gpl_only = true, /* __cookie_v4_init_sequence() is GPL */
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
|
||||
.arg1_size = sizeof(struct iphdr),
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv6, struct ipv6hdr *, iph,
|
||||
struct tcphdr *, th, u32, th_len)
|
||||
{
|
||||
#if IS_BUILTIN(CONFIG_IPV6)
|
||||
const u16 mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
|
||||
sizeof(struct ipv6hdr);
|
||||
u32 cookie;
|
||||
u16 mss;
|
||||
|
||||
if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
|
||||
return -EINVAL;
|
||||
|
||||
mss = tcp_parse_mss_option(th, 0) ?: mss_clamp;
|
||||
cookie = __cookie_v6_init_sequence(iph, th, &mss);
|
||||
|
||||
return cookie | ((u64)mss << 32);
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv6_proto = {
|
||||
.func = bpf_tcp_raw_gen_syncookie_ipv6,
|
||||
.gpl_only = true, /* __cookie_v6_init_sequence() is GPL */
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
|
||||
.arg1_size = sizeof(struct ipv6hdr),
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv4, struct iphdr *, iph,
|
||||
struct tcphdr *, th)
|
||||
{
|
||||
u32 cookie = ntohl(th->ack_seq) - 1;
|
||||
|
||||
if (__cookie_v4_check(iph, th, cookie) > 0)
|
||||
return 0;
|
||||
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv4_proto = {
|
||||
.func = bpf_tcp_raw_check_syncookie_ipv4,
|
||||
.gpl_only = true, /* __cookie_v4_check is GPL */
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
|
||||
.arg1_size = sizeof(struct iphdr),
|
||||
.arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM,
|
||||
.arg2_size = sizeof(struct tcphdr),
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv6, struct ipv6hdr *, iph,
|
||||
struct tcphdr *, th)
|
||||
{
|
||||
#if IS_BUILTIN(CONFIG_IPV6)
|
||||
u32 cookie = ntohl(th->ack_seq) - 1;
|
||||
|
||||
if (__cookie_v6_check(iph, th, cookie) > 0)
|
||||
return 0;
|
||||
|
||||
return -EACCES;
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = {
|
||||
.func = bpf_tcp_raw_check_syncookie_ipv6,
|
||||
.gpl_only = true, /* __cookie_v6_check is GPL */
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
|
||||
.arg1_size = sizeof(struct ipv6hdr),
|
||||
.arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM,
|
||||
.arg2_size = sizeof(struct tcphdr),
|
||||
};
|
||||
#endif /* CONFIG_SYN_COOKIES */
|
||||
|
||||
#endif /* CONFIG_INET */
|
||||
|
||||
bool bpf_helper_changes_pkt_data(void *func)
|
||||
@ -7807,6 +7913,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_sk_assign_proto;
|
||||
case BPF_FUNC_skb_set_tstamp:
|
||||
return &bpf_skb_set_tstamp_proto;
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
case BPF_FUNC_tcp_raw_gen_syncookie_ipv4:
|
||||
return &bpf_tcp_raw_gen_syncookie_ipv4_proto;
|
||||
case BPF_FUNC_tcp_raw_gen_syncookie_ipv6:
|
||||
return &bpf_tcp_raw_gen_syncookie_ipv6_proto;
|
||||
case BPF_FUNC_tcp_raw_check_syncookie_ipv4:
|
||||
return &bpf_tcp_raw_check_syncookie_ipv4_proto;
|
||||
case BPF_FUNC_tcp_raw_check_syncookie_ipv6:
|
||||
return &bpf_tcp_raw_check_syncookie_ipv6_proto;
|
||||
#endif
|
||||
#endif
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
@ -7856,6 +7972,16 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_tcp_check_syncookie_proto;
|
||||
case BPF_FUNC_tcp_gen_syncookie:
|
||||
return &bpf_tcp_gen_syncookie_proto;
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
case BPF_FUNC_tcp_raw_gen_syncookie_ipv4:
|
||||
return &bpf_tcp_raw_gen_syncookie_ipv4_proto;
|
||||
case BPF_FUNC_tcp_raw_gen_syncookie_ipv6:
|
||||
return &bpf_tcp_raw_gen_syncookie_ipv6_proto;
|
||||
case BPF_FUNC_tcp_raw_check_syncookie_ipv4:
|
||||
return &bpf_tcp_raw_check_syncookie_ipv4_proto;
|
||||
case BPF_FUNC_tcp_raw_check_syncookie_ipv6:
|
||||
return &bpf_tcp_raw_check_syncookie_ipv6_proto;
|
||||
#endif
|
||||
#endif
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
|
@ -715,6 +715,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
|
||||
psock->eval = __SK_NONE;
|
||||
psock->sk_proto = prot;
|
||||
psock->saved_unhash = prot->unhash;
|
||||
psock->saved_destroy = prot->destroy;
|
||||
psock->saved_close = prot->close;
|
||||
psock->saved_write_space = sk->sk_write_space;
|
||||
|
||||
|
@ -1561,6 +1561,29 @@ void sock_map_unhash(struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sock_map_unhash);
|
||||
|
||||
void sock_map_destroy(struct sock *sk)
|
||||
{
|
||||
void (*saved_destroy)(struct sock *sk);
|
||||
struct sk_psock *psock;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock_get(sk);
|
||||
if (unlikely(!psock)) {
|
||||
rcu_read_unlock();
|
||||
if (sk->sk_prot->destroy)
|
||||
sk->sk_prot->destroy(sk);
|
||||
return;
|
||||
}
|
||||
|
||||
saved_destroy = psock->saved_destroy;
|
||||
sock_map_remove_links(sk, psock);
|
||||
rcu_read_unlock();
|
||||
sk_psock_stop(psock, true);
|
||||
sk_psock_put(sk, psock);
|
||||
saved_destroy(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sock_map_destroy);
|
||||
|
||||
void sock_map_close(struct sock *sk, long timeout)
|
||||
{
|
||||
void (*saved_close)(struct sock *sk, long timeout);
|
||||
|
@ -540,6 +540,7 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
|
||||
struct proto *base)
|
||||
{
|
||||
prot[TCP_BPF_BASE] = *base;
|
||||
prot[TCP_BPF_BASE].destroy = sock_map_destroy;
|
||||
prot[TCP_BPF_BASE].close = sock_map_close;
|
||||
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
|
||||
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
|
||||
|
@ -3966,7 +3966,7 @@ static bool smc_parse_options(const struct tcphdr *th,
|
||||
/* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
|
||||
* value on success.
|
||||
*/
|
||||
static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
|
||||
u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
|
||||
{
|
||||
const unsigned char *ptr = (const unsigned char *)(th + 1);
|
||||
int length = (th->doff * 4) - sizeof(struct tcphdr);
|
||||
@ -4005,6 +4005,7 @@ static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
|
||||
}
|
||||
return mss;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_parse_mss_option);
|
||||
|
||||
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
|
||||
* But, this can also be called on packets in the established flow when
|
||||
|
@ -57,7 +57,7 @@ static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
|
||||
static void xdp_umem_release(struct xdp_umem *umem)
|
||||
{
|
||||
umem->zc = false;
|
||||
ida_simple_remove(&umem_ida, umem->id);
|
||||
ida_free(&umem_ida, umem->id);
|
||||
|
||||
xdp_umem_addr_unmap(umem);
|
||||
xdp_umem_unpin_pages(umem);
|
||||
@ -242,7 +242,7 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
|
||||
if (!umem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
|
||||
err = ida_alloc(&umem_ida, GFP_KERNEL);
|
||||
if (err < 0) {
|
||||
kfree(umem);
|
||||
return ERR_PTR(err);
|
||||
@ -251,7 +251,7 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
|
||||
|
||||
err = xdp_umem_reg(umem, mr);
|
||||
if (err) {
|
||||
ida_simple_remove(&umem_ida, umem->id);
|
||||
ida_free(&umem_ida, umem->id);
|
||||
kfree(umem);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -47,17 +47,60 @@ static int do_attach(int idx, int prog_fd, int map_fd, const char *name)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_detach(int idx, const char *name)
|
||||
static int do_detach(int ifindex, const char *ifname, const char *app_name)
|
||||
{
|
||||
int err;
|
||||
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
|
||||
struct bpf_prog_info prog_info = {};
|
||||
char prog_name[BPF_OBJ_NAME_LEN];
|
||||
__u32 info_len, curr_prog_id;
|
||||
int prog_fd;
|
||||
int err = 1;
|
||||
|
||||
err = bpf_xdp_detach(idx, xdp_flags, NULL);
|
||||
if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
|
||||
printf("ERROR: bpf_xdp_query_id failed (%s)\n",
|
||||
strerror(errno));
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!curr_prog_id) {
|
||||
printf("ERROR: flags(0x%x) xdp prog is not attached to %s\n",
|
||||
xdp_flags, ifname);
|
||||
return err;
|
||||
}
|
||||
|
||||
info_len = sizeof(prog_info);
|
||||
prog_fd = bpf_prog_get_fd_by_id(curr_prog_id);
|
||||
if (prog_fd < 0) {
|
||||
printf("ERROR: bpf_prog_get_fd_by_id failed (%s)\n",
|
||||
strerror(errno));
|
||||
return prog_fd;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
if (err) {
|
||||
printf("ERROR: bpf_obj_get_info_by_fd failed (%s)\n",
|
||||
strerror(errno));
|
||||
goto close_out;
|
||||
}
|
||||
snprintf(prog_name, sizeof(prog_name), "%s_prog", app_name);
|
||||
prog_name[BPF_OBJ_NAME_LEN - 1] = '\0';
|
||||
|
||||
if (strcmp(prog_info.name, prog_name)) {
|
||||
printf("ERROR: %s isn't attached to %s\n", app_name, ifname);
|
||||
err = 1;
|
||||
goto close_out;
|
||||
}
|
||||
|
||||
opts.old_prog_fd = prog_fd;
|
||||
err = bpf_xdp_detach(ifindex, xdp_flags, &opts);
|
||||
if (err < 0)
|
||||
printf("ERROR: failed to detach program from %s\n", name);
|
||||
|
||||
printf("ERROR: failed to detach program from %s (%s)\n",
|
||||
ifname, strerror(errno));
|
||||
/* TODO: Remember to cleanup map, when adding use of shared map
|
||||
* bpf_map_delete_elem((map_fd, &idx);
|
||||
*/
|
||||
close_out:
|
||||
close(prog_fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -169,7 +212,7 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
if (!attach) {
|
||||
err = do_detach(idx, argv[i]);
|
||||
err = do_detach(idx, argv[i], prog_name);
|
||||
if (err)
|
||||
ret = err;
|
||||
} else {
|
||||
|
@ -150,6 +150,15 @@ int xdp_router_ipv4_prog(struct xdp_md *ctx)
|
||||
|
||||
dest_mac = bpf_map_lookup_elem(&arp_table,
|
||||
&prefix_value->gw);
|
||||
if (!dest_mac) {
|
||||
/* Forward the packet to the kernel in
|
||||
* order to trigger ARP discovery for
|
||||
* the default gw.
|
||||
*/
|
||||
if (rec)
|
||||
NO_TEAR_INC(rec->xdp_pass);
|
||||
return XDP_PASS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,6 +635,8 @@ class PrinterHelpers(Printer):
|
||||
'struct bpf_timer',
|
||||
'struct mptcp_sock',
|
||||
'struct bpf_dynptr',
|
||||
'struct iphdr',
|
||||
'struct ipv6hdr',
|
||||
]
|
||||
known_types = {
|
||||
'...',
|
||||
@ -686,6 +688,8 @@ class PrinterHelpers(Printer):
|
||||
'struct bpf_timer',
|
||||
'struct mptcp_sock',
|
||||
'struct bpf_dynptr',
|
||||
'struct iphdr',
|
||||
'struct ipv6hdr',
|
||||
}
|
||||
mapped_types = {
|
||||
'u8': '__u8',
|
||||
|
@ -31,11 +31,17 @@ CGROUP COMMANDS
|
||||
| **bpftool** **cgroup help**
|
||||
|
|
||||
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
|
||||
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
|
||||
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
|
||||
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
|
||||
| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
|
||||
| **sock_release** }
|
||||
| *ATTACH_TYPE* := { **cgroup_inet_ingress** | **cgroup_inet_egress** |
|
||||
| **cgroup_inet_sock_create** | **cgroup_sock_ops** |
|
||||
| **cgroup_device** | **cgroup_inet4_bind** | **cgroup_inet6_bind** |
|
||||
| **cgroup_inet4_post_bind** | **cgroup_inet6_post_bind** |
|
||||
| **cgroup_inet4_connect** | **cgroup_inet6_connect** |
|
||||
| **cgroup_inet4_getpeername** | **cgroup_inet6_getpeername** |
|
||||
| **cgroup_inet4_getsockname** | **cgroup_inet6_getsockname** |
|
||||
| **cgroup_udp4_sendmsg** | **cgroup_udp6_sendmsg** |
|
||||
| **cgroup_udp4_recvmsg** | **cgroup_udp6_recvmsg** |
|
||||
| **cgroup_sysctl** | **cgroup_getsockopt** | **cgroup_setsockopt** |
|
||||
| **cgroup_inet_sock_release** }
|
||||
| *ATTACH_FLAGS* := { **multi** | **override** }
|
||||
|
||||
DESCRIPTION
|
||||
|
@ -54,7 +54,8 @@ PROG COMMANDS
|
||||
| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
|
||||
| }
|
||||
| *ATTACH_TYPE* := {
|
||||
| **msg_verdict** | **skb_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
|
||||
| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** |
|
||||
| **sk_skb_stream_parser** | **flow_dissector**
|
||||
| }
|
||||
| *METRICs* := {
|
||||
| **cycles** | **instructions** | **l1d_loads** | **llc_misses** |
|
||||
|
@ -53,7 +53,7 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_
|
||||
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
|
||||
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) prefix= \
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) $@ install_headers
|
||||
|
||||
$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
|
||||
$(call QUIET_INSTALL, $@)
|
||||
|
@ -407,8 +407,8 @@ _bpftool()
|
||||
return 0
|
||||
;;
|
||||
5)
|
||||
local BPFTOOL_PROG_ATTACH_TYPES='msg_verdict \
|
||||
skb_verdict stream_verdict stream_parser \
|
||||
local BPFTOOL_PROG_ATTACH_TYPES='sk_msg_verdict \
|
||||
sk_skb_verdict sk_skb_stream_verdict sk_skb_stream_parser \
|
||||
flow_dissector'
|
||||
COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_ATTACH_TYPES" -- "$cur" ) )
|
||||
return 0
|
||||
@ -1039,12 +1039,14 @@ _bpftool()
|
||||
return 0
|
||||
;;
|
||||
attach|detach)
|
||||
local BPFTOOL_CGROUP_ATTACH_TYPES='ingress egress \
|
||||
sock_create sock_ops device \
|
||||
bind4 bind6 post_bind4 post_bind6 connect4 connect6 \
|
||||
getpeername4 getpeername6 getsockname4 getsockname6 \
|
||||
sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \
|
||||
setsockopt sock_release'
|
||||
local BPFTOOL_CGROUP_ATTACH_TYPES='cgroup_inet_ingress cgroup_inet_egress \
|
||||
cgroup_inet_sock_create cgroup_sock_ops cgroup_device cgroup_inet4_bind \
|
||||
cgroup_inet6_bind cgroup_inet4_post_bind cgroup_inet6_post_bind \
|
||||
cgroup_inet4_connect cgroup_inet6_connect cgroup_inet4_getpeername \
|
||||
cgroup_inet6_getpeername cgroup_inet4_getsockname cgroup_inet6_getsockname \
|
||||
cgroup_udp4_sendmsg cgroup_udp6_sendmsg cgroup_udp4_recvmsg \
|
||||
cgroup_udp6_recvmsg cgroup_sysctl cgroup_getsockopt cgroup_setsockopt \
|
||||
cgroup_inet_sock_release'
|
||||
local ATTACH_FLAGS='multi override'
|
||||
local PROG_TYPE='id pinned tag name'
|
||||
# Check for $prev = $command first
|
||||
|
@ -40,6 +40,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_DECL_TAG] = "DECL_TAG",
|
||||
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
|
||||
[BTF_KIND_ENUM64] = "ENUM64",
|
||||
};
|
||||
|
||||
struct btf_attach_point {
|
||||
@ -212,15 +213,18 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
case BTF_KIND_ENUM: {
|
||||
const struct btf_enum *v = (const void *)(t + 1);
|
||||
__u16 vlen = BTF_INFO_VLEN(t->info);
|
||||
const char *encoding;
|
||||
int i;
|
||||
|
||||
encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
|
||||
if (json_output) {
|
||||
jsonw_string_field(w, "encoding", encoding);
|
||||
jsonw_uint_field(w, "size", t->size);
|
||||
jsonw_uint_field(w, "vlen", vlen);
|
||||
jsonw_name(w, "values");
|
||||
jsonw_start_array(w);
|
||||
} else {
|
||||
printf(" size=%u vlen=%u", t->size, vlen);
|
||||
printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
|
||||
}
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
const char *name = btf_str(btf, v->name_off);
|
||||
@ -228,9 +232,15 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
if (json_output) {
|
||||
jsonw_start_object(w);
|
||||
jsonw_string_field(w, "name", name);
|
||||
if (btf_kflag(t))
|
||||
jsonw_int_field(w, "val", v->val);
|
||||
else
|
||||
jsonw_uint_field(w, "val", v->val);
|
||||
jsonw_end_object(w);
|
||||
} else {
|
||||
if (btf_kflag(t))
|
||||
printf("\n\t'%s' val=%d", name, v->val);
|
||||
else
|
||||
printf("\n\t'%s' val=%u", name, v->val);
|
||||
}
|
||||
}
|
||||
@ -238,6 +248,47 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
jsonw_end_array(w);
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
const struct btf_enum64 *v = btf_enum64(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
const char *encoding;
|
||||
int i;
|
||||
|
||||
encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
|
||||
if (json_output) {
|
||||
jsonw_string_field(w, "encoding", encoding);
|
||||
jsonw_uint_field(w, "size", t->size);
|
||||
jsonw_uint_field(w, "vlen", vlen);
|
||||
jsonw_name(w, "values");
|
||||
jsonw_start_array(w);
|
||||
} else {
|
||||
printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
|
||||
}
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
const char *name = btf_str(btf, v->name_off);
|
||||
__u64 val = ((__u64)v->val_hi32 << 32) | v->val_lo32;
|
||||
|
||||
if (json_output) {
|
||||
jsonw_start_object(w);
|
||||
jsonw_string_field(w, "name", name);
|
||||
if (btf_kflag(t))
|
||||
jsonw_int_field(w, "val", val);
|
||||
else
|
||||
jsonw_uint_field(w, "val", val);
|
||||
jsonw_end_object(w);
|
||||
} else {
|
||||
if (btf_kflag(t))
|
||||
printf("\n\t'%s' val=%lldLL", name,
|
||||
(unsigned long long)val);
|
||||
else
|
||||
printf("\n\t'%s' val=%lluULL", name,
|
||||
(unsigned long long)val);
|
||||
}
|
||||
}
|
||||
if (json_output)
|
||||
jsonw_end_array(w);
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FWD: {
|
||||
const char *fwd_kind = BTF_INFO_KFLAG(t->info) ? "union"
|
||||
: "struct";
|
||||
|
@ -182,6 +182,32 @@ static int btf_dumper_enum(const struct btf_dumper *d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_dumper_enum64(const struct btf_dumper *d,
|
||||
const struct btf_type *t,
|
||||
const void *data)
|
||||
{
|
||||
const struct btf_enum64 *enums = btf_enum64(t);
|
||||
__u32 val_lo32, val_hi32;
|
||||
__u64 value;
|
||||
__u16 i;
|
||||
|
||||
value = *(__u64 *)data;
|
||||
val_lo32 = (__u32)value;
|
||||
val_hi32 = value >> 32;
|
||||
|
||||
for (i = 0; i < btf_vlen(t); i++) {
|
||||
if (val_lo32 == enums[i].val_lo32 && val_hi32 == enums[i].val_hi32) {
|
||||
jsonw_string(d->jw,
|
||||
btf__name_by_offset(d->btf,
|
||||
enums[i].name_off));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
jsonw_int(d->jw, value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
|
||||
const char *s)
|
||||
{
|
||||
@ -542,6 +568,8 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
|
||||
return btf_dumper_array(d, type_id, data);
|
||||
case BTF_KIND_ENUM:
|
||||
return btf_dumper_enum(d, t, data);
|
||||
case BTF_KIND_ENUM64:
|
||||
return btf_dumper_enum64(d, t, data);
|
||||
case BTF_KIND_PTR:
|
||||
btf_dumper_ptr(d, t, data);
|
||||
return 0;
|
||||
@ -618,6 +646,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
|
||||
btf__name_by_offset(btf, t->name_off));
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
BTF_PRINT_ARG("enum %s ",
|
||||
btf__name_by_offset(btf, t->name_off));
|
||||
break;
|
||||
|
@ -22,24 +22,42 @@
|
||||
"ATTACH_FLAGS := { multi | override }"
|
||||
|
||||
#define HELP_SPEC_ATTACH_TYPES \
|
||||
" ATTACH_TYPE := { ingress | egress | sock_create |\n" \
|
||||
" sock_ops | device | bind4 | bind6 |\n" \
|
||||
" post_bind4 | post_bind6 | connect4 |\n" \
|
||||
" connect6 | getpeername4 | getpeername6 |\n" \
|
||||
" getsockname4 | getsockname6 | sendmsg4 |\n" \
|
||||
" sendmsg6 | recvmsg4 | recvmsg6 |\n" \
|
||||
" sysctl | getsockopt | setsockopt |\n" \
|
||||
" sock_release }"
|
||||
" ATTACH_TYPE := { cgroup_inet_ingress | cgroup_inet_egress |\n" \
|
||||
" cgroup_inet_sock_create | cgroup_sock_ops |\n" \
|
||||
" cgroup_device | cgroup_inet4_bind |\n" \
|
||||
" cgroup_inet6_bind | cgroup_inet4_post_bind |\n" \
|
||||
" cgroup_inet6_post_bind | cgroup_inet4_connect |\n" \
|
||||
" cgroup_inet6_connect | cgroup_inet4_getpeername |\n" \
|
||||
" cgroup_inet6_getpeername | cgroup_inet4_getsockname |\n" \
|
||||
" cgroup_inet6_getsockname | cgroup_udp4_sendmsg |\n" \
|
||||
" cgroup_udp6_sendmsg | cgroup_udp4_recvmsg |\n" \
|
||||
" cgroup_udp6_recvmsg | cgroup_sysctl |\n" \
|
||||
" cgroup_getsockopt | cgroup_setsockopt |\n" \
|
||||
" cgroup_inet_sock_release }"
|
||||
|
||||
static unsigned int query_flags;
|
||||
|
||||
static enum bpf_attach_type parse_attach_type(const char *str)
|
||||
{
|
||||
const char *attach_type_str;
|
||||
enum bpf_attach_type type;
|
||||
|
||||
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
|
||||
if (attach_type_name[type] &&
|
||||
is_prefix(str, attach_type_name[type]))
|
||||
for (type = 0; ; type++) {
|
||||
attach_type_str = libbpf_bpf_attach_type_str(type);
|
||||
if (!attach_type_str)
|
||||
break;
|
||||
if (!strcmp(str, attach_type_str))
|
||||
return type;
|
||||
}
|
||||
|
||||
/* Also check traditionally used attach type strings. For these we keep
|
||||
* allowing prefixed usage.
|
||||
*/
|
||||
for (type = 0; ; type++) {
|
||||
attach_type_str = bpf_attach_type_input_str(type);
|
||||
if (!attach_type_str)
|
||||
break;
|
||||
if (is_prefix(str, attach_type_str))
|
||||
return type;
|
||||
}
|
||||
|
||||
@ -52,6 +70,7 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
||||
{
|
||||
char prog_name[MAX_PROG_FULL_NAME];
|
||||
struct bpf_prog_info info = {};
|
||||
const char *attach_type_str;
|
||||
__u32 info_len = sizeof(info);
|
||||
int prog_fd;
|
||||
|
||||
@ -64,13 +83,13 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
||||
return -1;
|
||||
}
|
||||
|
||||
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
|
||||
get_prog_full_name(&info, prog_fd, prog_name, sizeof(prog_name));
|
||||
if (json_output) {
|
||||
jsonw_start_object(json_wtr);
|
||||
jsonw_uint_field(json_wtr, "id", info.id);
|
||||
if (attach_type < ARRAY_SIZE(attach_type_name))
|
||||
jsonw_string_field(json_wtr, "attach_type",
|
||||
attach_type_name[attach_type]);
|
||||
if (attach_type_str)
|
||||
jsonw_string_field(json_wtr, "attach_type", attach_type_str);
|
||||
else
|
||||
jsonw_uint_field(json_wtr, "attach_type", attach_type);
|
||||
jsonw_string_field(json_wtr, "attach_flags",
|
||||
@ -79,8 +98,8 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
||||
jsonw_end_object(json_wtr);
|
||||
} else {
|
||||
printf("%s%-8u ", level ? " " : "", info.id);
|
||||
if (attach_type < ARRAY_SIZE(attach_type_name))
|
||||
printf("%-15s", attach_type_name[attach_type]);
|
||||
if (attach_type_str)
|
||||
printf("%-15s", attach_type_str);
|
||||
else
|
||||
printf("type %-10u", attach_type);
|
||||
printf(" %-15s %-15s\n", attach_flags_str, prog_name);
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/magic.h>
|
||||
#include <net/if.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/vfs.h>
|
||||
|
||||
@ -31,52 +32,6 @@
|
||||
#define BPF_FS_MAGIC 0xcafe4a11
|
||||
#endif
|
||||
|
||||
const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
|
||||
[BPF_CGROUP_INET_INGRESS] = "ingress",
|
||||
[BPF_CGROUP_INET_EGRESS] = "egress",
|
||||
[BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
|
||||
[BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release",
|
||||
[BPF_CGROUP_SOCK_OPS] = "sock_ops",
|
||||
[BPF_CGROUP_DEVICE] = "device",
|
||||
[BPF_CGROUP_INET4_BIND] = "bind4",
|
||||
[BPF_CGROUP_INET6_BIND] = "bind6",
|
||||
[BPF_CGROUP_INET4_CONNECT] = "connect4",
|
||||
[BPF_CGROUP_INET6_CONNECT] = "connect6",
|
||||
[BPF_CGROUP_INET4_POST_BIND] = "post_bind4",
|
||||
[BPF_CGROUP_INET6_POST_BIND] = "post_bind6",
|
||||
[BPF_CGROUP_INET4_GETPEERNAME] = "getpeername4",
|
||||
[BPF_CGROUP_INET6_GETPEERNAME] = "getpeername6",
|
||||
[BPF_CGROUP_INET4_GETSOCKNAME] = "getsockname4",
|
||||
[BPF_CGROUP_INET6_GETSOCKNAME] = "getsockname6",
|
||||
[BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
|
||||
[BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
|
||||
[BPF_CGROUP_SYSCTL] = "sysctl",
|
||||
[BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
|
||||
[BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
|
||||
[BPF_CGROUP_GETSOCKOPT] = "getsockopt",
|
||||
[BPF_CGROUP_SETSOCKOPT] = "setsockopt",
|
||||
[BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
|
||||
[BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
|
||||
[BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
|
||||
[BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
|
||||
[BPF_LIRC_MODE2] = "lirc_mode2",
|
||||
[BPF_FLOW_DISSECTOR] = "flow_dissector",
|
||||
[BPF_TRACE_RAW_TP] = "raw_tp",
|
||||
[BPF_TRACE_FENTRY] = "fentry",
|
||||
[BPF_TRACE_FEXIT] = "fexit",
|
||||
[BPF_MODIFY_RETURN] = "mod_ret",
|
||||
[BPF_LSM_MAC] = "lsm_mac",
|
||||
[BPF_SK_LOOKUP] = "sk_lookup",
|
||||
[BPF_TRACE_ITER] = "trace_iter",
|
||||
[BPF_XDP_DEVMAP] = "xdp_devmap",
|
||||
[BPF_XDP_CPUMAP] = "xdp_cpumap",
|
||||
[BPF_XDP] = "xdp",
|
||||
[BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select",
|
||||
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate",
|
||||
[BPF_PERF_EVENT] = "perf_event",
|
||||
[BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
|
||||
};
|
||||
|
||||
void p_err(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
@ -118,6 +73,13 @@ static bool is_bpffs(char *path)
|
||||
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
|
||||
}
|
||||
|
||||
void set_max_rlimit(void)
|
||||
{
|
||||
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
|
||||
|
||||
setrlimit(RLIMIT_MEMLOCK, &rinf);
|
||||
}
|
||||
|
||||
static int
|
||||
mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
|
||||
{
|
||||
@ -1009,3 +971,39 @@ bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx)
|
||||
{
|
||||
return k1 == k2;
|
||||
}
|
||||
|
||||
const char *bpf_attach_type_input_str(enum bpf_attach_type t)
|
||||
{
|
||||
switch (t) {
|
||||
case BPF_CGROUP_INET_INGRESS: return "ingress";
|
||||
case BPF_CGROUP_INET_EGRESS: return "egress";
|
||||
case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
|
||||
case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
|
||||
case BPF_CGROUP_SOCK_OPS: return "sock_ops";
|
||||
case BPF_CGROUP_DEVICE: return "device";
|
||||
case BPF_CGROUP_INET4_BIND: return "bind4";
|
||||
case BPF_CGROUP_INET6_BIND: return "bind6";
|
||||
case BPF_CGROUP_INET4_CONNECT: return "connect4";
|
||||
case BPF_CGROUP_INET6_CONNECT: return "connect6";
|
||||
case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
|
||||
case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
|
||||
case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
|
||||
case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
|
||||
case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
|
||||
case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
|
||||
case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
|
||||
case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
|
||||
case BPF_CGROUP_SYSCTL: return "sysctl";
|
||||
case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
|
||||
case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
|
||||
case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
|
||||
case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
|
||||
case BPF_TRACE_RAW_TP: return "raw_tp";
|
||||
case BPF_TRACE_FENTRY: return "fentry";
|
||||
case BPF_TRACE_FEXIT: return "fexit";
|
||||
case BPF_MODIFY_RETURN: return "mod_ret";
|
||||
case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
|
||||
case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
|
||||
default: return libbpf_bpf_attach_type_str(t);
|
||||
}
|
||||
}
|
||||
|
@ -548,8 +548,8 @@ static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
}
|
||||
|
||||
static void
|
||||
probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
|
||||
const char *define_prefix, __u32 ifindex)
|
||||
probe_prog_type(enum bpf_prog_type prog_type, const char *prog_type_str,
|
||||
bool *supported_types, const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
char feat_name[128], plain_desc[128], define_name[128];
|
||||
const char *plain_comment = "eBPF program_type ";
|
||||
@ -580,20 +580,16 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
|
||||
|
||||
supported_types[prog_type] |= res;
|
||||
|
||||
if (!prog_type_name[prog_type]) {
|
||||
p_info("program type name not found (type %d)", prog_type);
|
||||
return;
|
||||
}
|
||||
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
|
||||
if (strlen(prog_type_name[prog_type]) > maxlen) {
|
||||
if (strlen(prog_type_str) > maxlen) {
|
||||
p_info("program type name too long");
|
||||
return;
|
||||
}
|
||||
|
||||
sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
|
||||
sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
|
||||
sprintf(feat_name, "have_%s_prog_type", prog_type_str);
|
||||
sprintf(define_name, "%s_prog_type", prog_type_str);
|
||||
uppercase(define_name, sizeof(define_name));
|
||||
sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
|
||||
sprintf(plain_desc, "%s%s", plain_comment, prog_type_str);
|
||||
print_bool_feature(feat_name, plain_desc, define_name, res,
|
||||
define_prefix);
|
||||
}
|
||||
@ -619,8 +615,8 @@ static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
|
||||
}
|
||||
|
||||
static void
|
||||
probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
|
||||
__u32 ifindex)
|
||||
probe_map_type(enum bpf_map_type map_type, char const *map_type_str,
|
||||
const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
char feat_name[128], plain_desc[128], define_name[128];
|
||||
const char *plain_comment = "eBPF map_type ";
|
||||
@ -645,20 +641,16 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
|
||||
* check required for unprivileged users
|
||||
*/
|
||||
|
||||
if (!map_type_name[map_type]) {
|
||||
p_info("map type name not found (type %d)", map_type);
|
||||
return;
|
||||
}
|
||||
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
|
||||
if (strlen(map_type_name[map_type]) > maxlen) {
|
||||
if (strlen(map_type_str) > maxlen) {
|
||||
p_info("map type name too long");
|
||||
return;
|
||||
}
|
||||
|
||||
sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
|
||||
sprintf(define_name, "%s_map_type", map_type_name[map_type]);
|
||||
sprintf(feat_name, "have_%s_map_type", map_type_str);
|
||||
sprintf(define_name, "%s_map_type", map_type_str);
|
||||
uppercase(define_name, sizeof(define_name));
|
||||
sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
|
||||
sprintf(plain_desc, "%s%s", plain_comment, map_type_str);
|
||||
print_bool_feature(feat_name, plain_desc, define_name, res,
|
||||
define_prefix);
|
||||
}
|
||||
@ -728,10 +720,10 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
}
|
||||
|
||||
static void
|
||||
probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
probe_helpers_for_progtype(enum bpf_prog_type prog_type,
|
||||
const char *prog_type_str, bool supported_type,
|
||||
const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
const char *ptype_name = prog_type_name[prog_type];
|
||||
char feat_name[128];
|
||||
unsigned int id;
|
||||
bool probe_res = false;
|
||||
@ -747,12 +739,12 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
}
|
||||
|
||||
if (json_output) {
|
||||
sprintf(feat_name, "%s_available_helpers", ptype_name);
|
||||
sprintf(feat_name, "%s_available_helpers", prog_type_str);
|
||||
jsonw_name(json_wtr, feat_name);
|
||||
jsonw_start_array(json_wtr);
|
||||
} else if (!define_prefix) {
|
||||
printf("eBPF helpers supported for program type %s:",
|
||||
ptype_name);
|
||||
prog_type_str);
|
||||
}
|
||||
|
||||
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
|
||||
@ -768,7 +760,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
/* fallthrough */
|
||||
default:
|
||||
probe_res |= probe_helper_for_progtype(prog_type, supported_type,
|
||||
define_prefix, id, ptype_name,
|
||||
define_prefix, id, prog_type_str,
|
||||
ifindex);
|
||||
}
|
||||
}
|
||||
@ -943,30 +935,47 @@ static void
|
||||
section_program_types(bool *supported_types, const char *define_prefix,
|
||||
__u32 ifindex)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
|
||||
const char *prog_type_str;
|
||||
|
||||
print_start_section("program_types",
|
||||
"Scanning eBPF program types...",
|
||||
"/*** eBPF program types ***/",
|
||||
define_prefix);
|
||||
|
||||
for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++)
|
||||
probe_prog_type(i, supported_types, define_prefix, ifindex);
|
||||
while (true) {
|
||||
prog_type++;
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
|
||||
/* libbpf will return NULL for variants unknown to it. */
|
||||
if (!prog_type_str)
|
||||
break;
|
||||
|
||||
probe_prog_type(prog_type, prog_type_str, supported_types, define_prefix,
|
||||
ifindex);
|
||||
}
|
||||
|
||||
print_end_section();
|
||||
}
|
||||
|
||||
static void section_map_types(const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
const char *map_type_str;
|
||||
|
||||
print_start_section("map_types",
|
||||
"Scanning eBPF map types...",
|
||||
"/*** eBPF map types ***/",
|
||||
define_prefix);
|
||||
|
||||
for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
|
||||
probe_map_type(i, define_prefix, ifindex);
|
||||
while (true) {
|
||||
map_type++;
|
||||
map_type_str = libbpf_bpf_map_type_str(map_type);
|
||||
/* libbpf will return NULL for variants unknown to it. */
|
||||
if (!map_type_str)
|
||||
break;
|
||||
|
||||
probe_map_type(map_type, map_type_str, define_prefix, ifindex);
|
||||
}
|
||||
|
||||
print_end_section();
|
||||
}
|
||||
@ -974,7 +983,8 @@ static void section_map_types(const char *define_prefix, __u32 ifindex)
|
||||
static void
|
||||
section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
|
||||
const char *prog_type_str;
|
||||
|
||||
print_start_section("helpers",
|
||||
"Scanning eBPF helper functions...",
|
||||
@ -996,9 +1006,18 @@ section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
|
||||
" %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
|
||||
define_prefix, define_prefix, define_prefix,
|
||||
define_prefix);
|
||||
for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++)
|
||||
probe_helpers_for_progtype(i, supported_types[i], define_prefix,
|
||||
while (true) {
|
||||
prog_type++;
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
|
||||
/* libbpf will return NULL for variants unknown to it. */
|
||||
if (!prog_type_str)
|
||||
break;
|
||||
|
||||
probe_helpers_for_progtype(prog_type, prog_type_str,
|
||||
supported_types[prog_type],
|
||||
define_prefix,
|
||||
ifindex);
|
||||
}
|
||||
|
||||
print_end_section();
|
||||
}
|
||||
@ -1148,6 +1167,8 @@ static int do_probe(int argc, char **argv)
|
||||
__u32 ifindex = 0;
|
||||
char *ifname;
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
while (argc) {
|
||||
if (is_prefix(*argv, "kernel")) {
|
||||
if (target != COMPONENT_UNSPEC) {
|
||||
|
@ -474,6 +474,9 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
|
||||
const struct btf_type *sec;
|
||||
char map_ident[256], var_ident[256];
|
||||
|
||||
if (!btf)
|
||||
return;
|
||||
|
||||
codegen("\
|
||||
\n\
|
||||
__attribute__((unused)) static void \n\
|
||||
@ -1747,6 +1750,7 @@ btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_poi
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
break;
|
||||
|
@ -13,19 +13,6 @@
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
|
||||
static const char * const link_type_name[] = {
|
||||
[BPF_LINK_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
|
||||
[BPF_LINK_TYPE_TRACING] = "tracing",
|
||||
[BPF_LINK_TYPE_CGROUP] = "cgroup",
|
||||
[BPF_LINK_TYPE_ITER] = "iter",
|
||||
[BPF_LINK_TYPE_NETNS] = "netns",
|
||||
[BPF_LINK_TYPE_XDP] = "xdp",
|
||||
[BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
|
||||
[BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
|
||||
[BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
};
|
||||
|
||||
static struct hashmap *link_table;
|
||||
|
||||
static int link_parse_fd(int *argc, char ***argv)
|
||||
@ -67,9 +54,12 @@ static int link_parse_fd(int *argc, char ***argv)
|
||||
static void
|
||||
show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
|
||||
{
|
||||
const char *link_type_str;
|
||||
|
||||
jsonw_uint_field(wtr, "id", info->id);
|
||||
if (info->type < ARRAY_SIZE(link_type_name))
|
||||
jsonw_string_field(wtr, "type", link_type_name[info->type]);
|
||||
link_type_str = libbpf_bpf_link_type_str(info->type);
|
||||
if (link_type_str)
|
||||
jsonw_string_field(wtr, "type", link_type_str);
|
||||
else
|
||||
jsonw_uint_field(wtr, "type", info->type);
|
||||
|
||||
@ -78,9 +68,11 @@ show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
|
||||
|
||||
static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
|
||||
{
|
||||
if (attach_type < ARRAY_SIZE(attach_type_name))
|
||||
jsonw_string_field(wtr, "attach_type",
|
||||
attach_type_name[attach_type]);
|
||||
const char *attach_type_str;
|
||||
|
||||
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
|
||||
if (attach_type_str)
|
||||
jsonw_string_field(wtr, "attach_type", attach_type_str);
|
||||
else
|
||||
jsonw_uint_field(wtr, "attach_type", attach_type);
|
||||
}
|
||||
@ -121,6 +113,7 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info)
|
||||
static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
const char *prog_type_str;
|
||||
int err;
|
||||
|
||||
jsonw_start_object(json_wtr);
|
||||
@ -137,12 +130,12 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (prog_info.type < prog_type_name_size)
|
||||
jsonw_string_field(json_wtr, "prog_type",
|
||||
prog_type_name[prog_info.type]);
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
|
||||
/* libbpf will return NULL for variants unknown to it. */
|
||||
if (prog_type_str)
|
||||
jsonw_string_field(json_wtr, "prog_type", prog_type_str);
|
||||
else
|
||||
jsonw_uint_field(json_wtr, "prog_type",
|
||||
prog_info.type);
|
||||
jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
|
||||
|
||||
show_link_attach_type_json(info->tracing.attach_type,
|
||||
json_wtr);
|
||||
@ -184,9 +177,12 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
|
||||
static void show_link_header_plain(struct bpf_link_info *info)
|
||||
{
|
||||
const char *link_type_str;
|
||||
|
||||
printf("%u: ", info->id);
|
||||
if (info->type < ARRAY_SIZE(link_type_name))
|
||||
printf("%s ", link_type_name[info->type]);
|
||||
link_type_str = libbpf_bpf_link_type_str(info->type);
|
||||
if (link_type_str)
|
||||
printf("%s ", link_type_str);
|
||||
else
|
||||
printf("type %u ", info->type);
|
||||
|
||||
@ -195,8 +191,11 @@ static void show_link_header_plain(struct bpf_link_info *info)
|
||||
|
||||
static void show_link_attach_type_plain(__u32 attach_type)
|
||||
{
|
||||
if (attach_type < ARRAY_SIZE(attach_type_name))
|
||||
printf("attach_type %s ", attach_type_name[attach_type]);
|
||||
const char *attach_type_str;
|
||||
|
||||
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
|
||||
if (attach_type_str)
|
||||
printf("attach_type %s ", attach_type_str);
|
||||
else
|
||||
printf("attach_type %u ", attach_type);
|
||||
}
|
||||
@ -214,6 +213,7 @@ static void show_iter_plain(struct bpf_link_info *info)
|
||||
static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
const char *prog_type_str;
|
||||
int err;
|
||||
|
||||
show_link_header_plain(info);
|
||||
@ -228,9 +228,10 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (prog_info.type < prog_type_name_size)
|
||||
printf("\n\tprog_type %s ",
|
||||
prog_type_name[prog_info.type]);
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
|
||||
/* libbpf will return NULL for variants unknown to it. */
|
||||
if (prog_type_str)
|
||||
printf("\n\tprog_type %s ", prog_type_str);
|
||||
else
|
||||
printf("\n\tprog_type %u ", prog_info.type);
|
||||
|
||||
|
@ -508,8 +508,6 @@ int main(int argc, char **argv)
|
||||
* mode for loading generated skeleton.
|
||||
*/
|
||||
libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
|
||||
} else {
|
||||
libbpf_set_strict_mode(LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK);
|
||||
}
|
||||
|
||||
argc -= optind;
|
||||
|
@ -63,14 +63,8 @@ static inline void *u64_to_ptr(__u64 ptr)
|
||||
#define HELP_SPEC_LINK \
|
||||
"LINK := { id LINK_ID | pinned FILE }"
|
||||
|
||||
extern const char * const prog_type_name[];
|
||||
extern const size_t prog_type_name_size;
|
||||
|
||||
extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
extern const char * const map_type_name[];
|
||||
extern const size_t map_type_name_size;
|
||||
|
||||
/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
|
||||
enum bpf_obj_type {
|
||||
BPF_OBJ_UNKNOWN,
|
||||
@ -102,6 +96,8 @@ int detect_common_prefix(const char *arg, ...);
|
||||
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
|
||||
void usage(void) __noreturn;
|
||||
|
||||
void set_max_rlimit(void);
|
||||
|
||||
int mount_tracefs(const char *target);
|
||||
|
||||
struct obj_ref {
|
||||
@ -249,6 +245,20 @@ int print_all_levels(__maybe_unused enum libbpf_print_level level,
|
||||
size_t hash_fn_for_key_as_id(const void *key, void *ctx);
|
||||
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
|
||||
|
||||
/* bpf_attach_type_input_str - convert the provided attach type value into a
|
||||
* textual representation that we accept for input purposes.
|
||||
*
|
||||
* This function is similar in nature to libbpf_bpf_attach_type_str, but
|
||||
* recognizes some attach type names that have been used by the program in the
|
||||
* past and which do not follow the string inference scheme that libbpf uses.
|
||||
* These textual representations should only be used for user input.
|
||||
*
|
||||
* @t: The attach type
|
||||
* Returns a pointer to a static string identifying the attach type. NULL is
|
||||
* returned for unknown bpf_attach_type values.
|
||||
*/
|
||||
const char *bpf_attach_type_input_str(enum bpf_attach_type t);
|
||||
|
||||
static inline void *u32_as_hash_field(__u32 x)
|
||||
{
|
||||
return (void *)(uintptr_t)x;
|
||||
|
@ -22,42 +22,6 @@
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
|
||||
const char * const map_type_name[] = {
|
||||
[BPF_MAP_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_MAP_TYPE_HASH] = "hash",
|
||||
[BPF_MAP_TYPE_ARRAY] = "array",
|
||||
[BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
|
||||
[BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
|
||||
[BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
|
||||
[BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
|
||||
[BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
|
||||
[BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
|
||||
[BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
|
||||
[BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
|
||||
[BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
|
||||
[BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
|
||||
[BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
|
||||
[BPF_MAP_TYPE_DEVMAP] = "devmap",
|
||||
[BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
|
||||
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
|
||||
[BPF_MAP_TYPE_CPUMAP] = "cpumap",
|
||||
[BPF_MAP_TYPE_XSKMAP] = "xskmap",
|
||||
[BPF_MAP_TYPE_SOCKHASH] = "sockhash",
|
||||
[BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
|
||||
[BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
|
||||
[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
|
||||
[BPF_MAP_TYPE_QUEUE] = "queue",
|
||||
[BPF_MAP_TYPE_STACK] = "stack",
|
||||
[BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
|
||||
[BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
|
||||
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
|
||||
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
|
||||
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
|
||||
};
|
||||
|
||||
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
|
||||
|
||||
static struct hashmap *map_table;
|
||||
|
||||
static bool map_is_per_cpu(__u32 type)
|
||||
@ -81,12 +45,18 @@ static bool map_is_map_of_progs(__u32 type)
|
||||
|
||||
static int map_type_from_str(const char *type)
|
||||
{
|
||||
const char *map_type_str;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(map_type_name); i++)
|
||||
for (i = 0; ; i++) {
|
||||
map_type_str = libbpf_bpf_map_type_str(i);
|
||||
if (!map_type_str)
|
||||
break;
|
||||
|
||||
/* Don't allow prefixing in case of possible future shadowing */
|
||||
if (map_type_name[i] && !strcmp(map_type_name[i], type))
|
||||
if (!strcmp(map_type_str, type))
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -472,9 +442,12 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
|
||||
|
||||
static void show_map_header_json(struct bpf_map_info *info, json_writer_t *wtr)
|
||||
{
|
||||
const char *map_type_str;
|
||||
|
||||
jsonw_uint_field(wtr, "id", info->id);
|
||||
if (info->type < ARRAY_SIZE(map_type_name))
|
||||
jsonw_string_field(wtr, "type", map_type_name[info->type]);
|
||||
map_type_str = libbpf_bpf_map_type_str(info->type);
|
||||
if (map_type_str)
|
||||
jsonw_string_field(wtr, "type", map_type_str);
|
||||
else
|
||||
jsonw_uint_field(wtr, "type", info->type);
|
||||
|
||||
@ -513,10 +486,12 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
|
||||
|
||||
if (owner_prog_type) {
|
||||
unsigned int prog_type = atoi(owner_prog_type);
|
||||
const char *prog_type_str;
|
||||
|
||||
if (prog_type < prog_type_name_size)
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
|
||||
if (prog_type_str)
|
||||
jsonw_string_field(json_wtr, "owner_prog_type",
|
||||
prog_type_name[prog_type]);
|
||||
prog_type_str);
|
||||
else
|
||||
jsonw_uint_field(json_wtr, "owner_prog_type",
|
||||
prog_type);
|
||||
@ -559,9 +534,13 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
|
||||
|
||||
static void show_map_header_plain(struct bpf_map_info *info)
|
||||
{
|
||||
const char *map_type_str;
|
||||
|
||||
printf("%u: ", info->id);
|
||||
if (info->type < ARRAY_SIZE(map_type_name))
|
||||
printf("%s ", map_type_name[info->type]);
|
||||
|
||||
map_type_str = libbpf_bpf_map_type_str(info->type);
|
||||
if (map_type_str)
|
||||
printf("%s ", map_type_str);
|
||||
else
|
||||
printf("type %u ", info->type);
|
||||
|
||||
@ -597,10 +576,11 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
|
||||
printf("\n\t");
|
||||
if (owner_prog_type) {
|
||||
unsigned int prog_type = atoi(owner_prog_type);
|
||||
const char *prog_type_str;
|
||||
|
||||
if (prog_type < prog_type_name_size)
|
||||
printf("owner_prog_type %s ",
|
||||
prog_type_name[prog_type]);
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
|
||||
if (prog_type_str)
|
||||
printf("owner_prog_type %s ", prog_type_str);
|
||||
else
|
||||
printf("owner_prog_type %d ", prog_type);
|
||||
}
|
||||
@ -876,9 +856,13 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
|
||||
}
|
||||
|
||||
if (info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
|
||||
info->value_size != 8)
|
||||
info->value_size != 8) {
|
||||
const char *map_type_str;
|
||||
|
||||
map_type_str = libbpf_bpf_map_type_str(info->type);
|
||||
p_info("Warning: cannot read values from %s map with value_size != 8",
|
||||
map_type_name[info->type]);
|
||||
map_type_str);
|
||||
}
|
||||
while (true) {
|
||||
err = bpf_map_get_next_key(fd, prev_key, key);
|
||||
if (err) {
|
||||
@ -1342,6 +1326,8 @@ static int do_create(int argc, char **argv)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
|
||||
if (fd < 0) {
|
||||
p_err("map create failed: %s", strerror(errno));
|
||||
|
@ -108,6 +108,7 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
|
||||
p_err("failed to create hashmap for PID references");
|
||||
return -1;
|
||||
}
|
||||
set_max_rlimit();
|
||||
|
||||
skel = pid_iter_bpf__open();
|
||||
if (!skel) {
|
||||
|
@ -36,54 +36,28 @@
|
||||
#define BPF_METADATA_PREFIX "bpf_metadata_"
|
||||
#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
|
||||
|
||||
const char * const prog_type_name[] = {
|
||||
[BPF_PROG_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
|
||||
[BPF_PROG_TYPE_KPROBE] = "kprobe",
|
||||
[BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
|
||||
[BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
|
||||
[BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
|
||||
[BPF_PROG_TYPE_XDP] = "xdp",
|
||||
[BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
|
||||
[BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
|
||||
[BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
|
||||
[BPF_PROG_TYPE_LWT_IN] = "lwt_in",
|
||||
[BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
|
||||
[BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
|
||||
[BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
|
||||
[BPF_PROG_TYPE_SK_SKB] = "sk_skb",
|
||||
[BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
|
||||
[BPF_PROG_TYPE_SK_MSG] = "sk_msg",
|
||||
[BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
|
||||
[BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
|
||||
[BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
|
||||
[BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
|
||||
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
|
||||
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
|
||||
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
|
||||
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
|
||||
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
|
||||
[BPF_PROG_TYPE_TRACING] = "tracing",
|
||||
[BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
[BPF_PROG_TYPE_EXT] = "ext",
|
||||
[BPF_PROG_TYPE_LSM] = "lsm",
|
||||
[BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
|
||||
[BPF_PROG_TYPE_SYSCALL] = "syscall",
|
||||
};
|
||||
|
||||
const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
|
||||
|
||||
enum dump_mode {
|
||||
DUMP_JITED,
|
||||
DUMP_XLATED,
|
||||
};
|
||||
|
||||
static const bool attach_types[] = {
|
||||
[BPF_SK_SKB_STREAM_PARSER] = true,
|
||||
[BPF_SK_SKB_STREAM_VERDICT] = true,
|
||||
[BPF_SK_SKB_VERDICT] = true,
|
||||
[BPF_SK_MSG_VERDICT] = true,
|
||||
[BPF_FLOW_DISSECTOR] = true,
|
||||
[__MAX_BPF_ATTACH_TYPE] = false,
|
||||
};
|
||||
|
||||
/* Textual representations traditionally used by the program and kept around
|
||||
* for the sake of backwards compatibility.
|
||||
*/
|
||||
static const char * const attach_type_strings[] = {
|
||||
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
|
||||
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
|
||||
[BPF_SK_SKB_VERDICT] = "skb_verdict",
|
||||
[BPF_SK_MSG_VERDICT] = "msg_verdict",
|
||||
[BPF_FLOW_DISSECTOR] = "flow_dissector",
|
||||
[__MAX_BPF_ATTACH_TYPE] = NULL,
|
||||
};
|
||||
|
||||
@ -94,6 +68,14 @@ static enum bpf_attach_type parse_attach_type(const char *str)
|
||||
enum bpf_attach_type type;
|
||||
|
||||
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
|
||||
if (attach_types[type]) {
|
||||
const char *attach_type_str;
|
||||
|
||||
attach_type_str = libbpf_bpf_attach_type_str(type);
|
||||
if (!strcmp(str, attach_type_str))
|
||||
return type;
|
||||
}
|
||||
|
||||
if (attach_type_strings[type] &&
|
||||
is_prefix(str, attach_type_strings[type]))
|
||||
return type;
|
||||
@ -428,12 +410,14 @@ out_free:
|
||||
|
||||
static void print_prog_header_json(struct bpf_prog_info *info, int fd)
|
||||
{
|
||||
const char *prog_type_str;
|
||||
char prog_name[MAX_PROG_FULL_NAME];
|
||||
|
||||
jsonw_uint_field(json_wtr, "id", info->id);
|
||||
if (info->type < ARRAY_SIZE(prog_type_name))
|
||||
jsonw_string_field(json_wtr, "type",
|
||||
prog_type_name[info->type]);
|
||||
prog_type_str = libbpf_bpf_prog_type_str(info->type);
|
||||
|
||||
if (prog_type_str)
|
||||
jsonw_string_field(json_wtr, "type", prog_type_str);
|
||||
else
|
||||
jsonw_uint_field(json_wtr, "type", info->type);
|
||||
|
||||
@ -515,11 +499,13 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
|
||||
|
||||
static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
|
||||
{
|
||||
const char *prog_type_str;
|
||||
char prog_name[MAX_PROG_FULL_NAME];
|
||||
|
||||
printf("%u: ", info->id);
|
||||
if (info->type < ARRAY_SIZE(prog_type_name))
|
||||
printf("%s ", prog_type_name[info->type]);
|
||||
prog_type_str = libbpf_bpf_prog_type_str(info->type);
|
||||
if (prog_type_str)
|
||||
printf("%s ", prog_type_str);
|
||||
else
|
||||
printf("type %u ", info->type);
|
||||
|
||||
@ -1604,6 +1590,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
}
|
||||
}
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
open_opts.kernel_log_level = 1 + 2 + 4;
|
||||
@ -2301,6 +2289,7 @@ static int do_profile(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
set_max_rlimit();
|
||||
err = profiler_bpf__load(profile_obj);
|
||||
if (err) {
|
||||
p_err("failed to load profile_obj");
|
||||
@ -2374,8 +2363,8 @@ static int do_help(int argc, char **argv)
|
||||
" cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
|
||||
" cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
|
||||
" struct_ops | fentry | fexit | freplace | sk_lookup }\n"
|
||||
" ATTACH_TYPE := { msg_verdict | skb_verdict | stream_verdict |\n"
|
||||
" stream_parser | flow_dissector }\n"
|
||||
" ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
|
||||
" sk_skb_stream_parser | flow_dissector }\n"
|
||||
" METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
|
||||
" " HELP_SPEC_OPTIONS " |\n"
|
||||
" {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
|
||||
|
@ -501,6 +501,8 @@ static int do_register(int argc, char **argv)
|
||||
if (libbpf_get_error(obj))
|
||||
return -1;
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
if (bpf_object__load(obj)) {
|
||||
bpf_object__close(obj);
|
||||
return -1;
|
||||
|
@ -3597,10 +3597,11 @@ union bpf_attr {
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains **sizeof**\ (**struct tcphdr**).
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
|
||||
* error otherwise.
|
||||
@ -3783,10 +3784,11 @@ union bpf_attr {
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header.
|
||||
* contains the length of the TCP header with options (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
@ -5249,6 +5251,80 @@ union bpf_attr {
|
||||
* Pointer to the underlying dynptr data, NULL if the dynptr is
|
||||
* read-only, if the dynptr is invalid, or if the offset and length
|
||||
* is out of bounds.
|
||||
*
|
||||
* s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
|
||||
* Description
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv4/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
|
||||
* Description
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv6/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*
|
||||
* long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
|
||||
* Description
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
|
||||
* Description
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5455,6 +5531,10 @@ union bpf_attr {
|
||||
FN(dynptr_read), \
|
||||
FN(dynptr_write), \
|
||||
FN(dynptr_data), \
|
||||
FN(tcp_raw_gen_syncookie_ipv4), \
|
||||
FN(tcp_raw_gen_syncookie_ipv6), \
|
||||
FN(tcp_raw_check_syncookie_ipv4), \
|
||||
FN(tcp_raw_check_syncookie_ipv6), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
@ -36,10 +36,10 @@ struct btf_type {
|
||||
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
||||
* bits 29-30: unused
|
||||
* bit 31: kind_flag, currently used by
|
||||
* struct, union and fwd
|
||||
* struct, union, enum, fwd and enum64
|
||||
*/
|
||||
__u32 info;
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
@ -63,7 +63,7 @@ enum {
|
||||
BTF_KIND_ARRAY = 3, /* Array */
|
||||
BTF_KIND_STRUCT = 4, /* Struct */
|
||||
BTF_KIND_UNION = 5, /* Union */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
|
||||
BTF_KIND_FWD = 7, /* Forward */
|
||||
BTF_KIND_TYPEDEF = 8, /* Typedef */
|
||||
BTF_KIND_VOLATILE = 9, /* Volatile */
|
||||
@ -76,6 +76,7 @@ enum {
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
|
||||
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
|
||||
BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
@ -186,4 +187,14 @@ struct btf_decl_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
|
||||
* The exact number of btf_enum64 is stored in the vlen (of the
|
||||
* info in "struct btf_type").
|
||||
*/
|
||||
struct btf_enum64 {
|
||||
__u32 name_off;
|
||||
__u32 val_lo32;
|
||||
__u32 val_hi32;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BTF_H__ */
|
||||
|
@ -130,7 +130,7 @@ static inline __u64 ptr_to_u64(const void *ptr)
|
||||
|
||||
/* Ensure given dynamically allocated memory region pointed to by *data* with
|
||||
* capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
|
||||
* memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
|
||||
* memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
|
||||
* are already used. At most *max_cnt* elements can be ever allocated.
|
||||
* If necessary, memory is reallocated and all existing data is copied over,
|
||||
* new pointer to the memory region is stored at *data, new memory region
|
||||
@ -305,6 +305,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(__u32);
|
||||
case BTF_KIND_ENUM:
|
||||
return base_size + vlen * sizeof(struct btf_enum);
|
||||
case BTF_KIND_ENUM64:
|
||||
return base_size + vlen * sizeof(struct btf_enum64);
|
||||
case BTF_KIND_ARRAY:
|
||||
return base_size + sizeof(struct btf_array);
|
||||
case BTF_KIND_STRUCT:
|
||||
@ -334,6 +336,7 @@ static void btf_bswap_type_base(struct btf_type *t)
|
||||
static int btf_bswap_type_rest(struct btf_type *t)
|
||||
{
|
||||
struct btf_var_secinfo *v;
|
||||
struct btf_enum64 *e64;
|
||||
struct btf_member *m;
|
||||
struct btf_array *a;
|
||||
struct btf_param *p;
|
||||
@ -361,6 +364,13 @@ static int btf_bswap_type_rest(struct btf_type *t)
|
||||
e->val = bswap_32(e->val);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_ENUM64:
|
||||
for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
|
||||
e64->name_off = bswap_32(e64->name_off);
|
||||
e64->val_lo32 = bswap_32(e64->val_lo32);
|
||||
e64->val_hi32 = bswap_32(e64->val_hi32);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_ARRAY:
|
||||
a = btf_array(t);
|
||||
a->type = bswap_32(a->type);
|
||||
@ -472,9 +482,22 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
|
||||
|
||||
static int determine_ptr_size(const struct btf *btf)
|
||||
{
|
||||
static const char * const long_aliases[] = {
|
||||
"long",
|
||||
"long int",
|
||||
"int long",
|
||||
"unsigned long",
|
||||
"long unsigned",
|
||||
"unsigned long int",
|
||||
"unsigned int long",
|
||||
"long unsigned int",
|
||||
"long int unsigned",
|
||||
"int unsigned long",
|
||||
"int long unsigned",
|
||||
};
|
||||
const struct btf_type *t;
|
||||
const char *name;
|
||||
int i, n;
|
||||
int i, j, n;
|
||||
|
||||
if (btf->base_btf && btf->base_btf->ptr_sz > 0)
|
||||
return btf->base_btf->ptr_sz;
|
||||
@ -485,14 +508,15 @@ static int determine_ptr_size(const struct btf *btf)
|
||||
if (!btf_is_int(t))
|
||||
continue;
|
||||
|
||||
if (t->size != 4 && t->size != 8)
|
||||
continue;
|
||||
|
||||
name = btf__name_by_offset(btf, t->name_off);
|
||||
if (!name)
|
||||
continue;
|
||||
|
||||
if (strcmp(name, "long int") == 0 ||
|
||||
strcmp(name, "long unsigned int") == 0) {
|
||||
if (t->size != 4 && t->size != 8)
|
||||
continue;
|
||||
for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
|
||||
if (strcmp(name, long_aliases[j]) == 0)
|
||||
return t->size;
|
||||
}
|
||||
}
|
||||
@ -597,6 +621,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_FLOAT:
|
||||
size = t->size;
|
||||
@ -644,6 +669,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
|
||||
switch (kind) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FLOAT:
|
||||
return min(btf_ptr_sz(btf), (size_t)t->size);
|
||||
case BTF_KIND_PTR:
|
||||
@ -2115,20 +2141,8 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_ENUM type with:
|
||||
* - *name* - name of the enum, can be NULL or empty for anonymous enums;
|
||||
* - *byte_sz* - size of the enum, in bytes.
|
||||
*
|
||||
* Enum initially has no enum values in it (and corresponds to enum forward
|
||||
* declaration). Enumerator values can be added by btf__add_enum_value()
|
||||
* immediately after btf__add_enum() succeeds.
|
||||
*
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
|
||||
bool is_signed, __u8 kind)
|
||||
{
|
||||
struct btf_type *t;
|
||||
int sz, name_off = 0;
|
||||
@ -2153,12 +2167,34 @@ int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
|
||||
/* start out with vlen=0; it will be adjusted when adding enum values */
|
||||
t->name_off = name_off;
|
||||
t->info = btf_type_info(BTF_KIND_ENUM, 0, 0);
|
||||
t->info = btf_type_info(kind, 0, is_signed);
|
||||
t->size = byte_sz;
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_ENUM type with:
|
||||
* - *name* - name of the enum, can be NULL or empty for anonymous enums;
|
||||
* - *byte_sz* - size of the enum, in bytes.
|
||||
*
|
||||
* Enum initially has no enum values in it (and corresponds to enum forward
|
||||
* declaration). Enumerator values can be added by btf__add_enum_value()
|
||||
* immediately after btf__add_enum() succeeds.
|
||||
*
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
{
|
||||
/*
|
||||
* set the signedness to be unsigned, it will change to signed
|
||||
* if any later enumerator is negative.
|
||||
*/
|
||||
return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new enum value for the current ENUM type with:
|
||||
* - *name* - name of the enumerator value, can't be NULL or empty;
|
||||
@ -2206,6 +2242,82 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
|
||||
t = btf_last_type(btf);
|
||||
btf_type_inc_vlen(t);
|
||||
|
||||
/* if negative value, set signedness to signed */
|
||||
if (value < 0)
|
||||
t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
|
||||
|
||||
btf->hdr->type_len += sz;
|
||||
btf->hdr->str_off += sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_ENUM64 type with:
|
||||
* - *name* - name of the enum, can be NULL or empty for anonymous enums;
|
||||
* - *byte_sz* - size of the enum, in bytes.
|
||||
* - *is_signed* - whether the enum values are signed or not;
|
||||
*
|
||||
* Enum initially has no enum values in it (and corresponds to enum forward
|
||||
* declaration). Enumerator values can be added by btf__add_enum64_value()
|
||||
* immediately after btf__add_enum64() succeeds.
|
||||
*
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
|
||||
bool is_signed)
|
||||
{
|
||||
return btf_add_enum_common(btf, name, byte_sz, is_signed,
|
||||
BTF_KIND_ENUM64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new enum value for the current ENUM64 type with:
|
||||
* - *name* - name of the enumerator value, can't be NULL or empty;
|
||||
* - *value* - integer value corresponding to enum value *name*;
|
||||
* Returns:
|
||||
* - 0, on success;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
|
||||
{
|
||||
struct btf_enum64 *v;
|
||||
struct btf_type *t;
|
||||
int sz, name_off;
|
||||
|
||||
/* last type should be BTF_KIND_ENUM64 */
|
||||
if (btf->nr_types == 0)
|
||||
return libbpf_err(-EINVAL);
|
||||
t = btf_last_type(btf);
|
||||
if (!btf_is_enum64(t))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* non-empty name */
|
||||
if (!name || !name[0])
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* decompose and invalidate raw data */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
sz = sizeof(struct btf_enum64);
|
||||
v = btf_add_type_mem(btf, sz);
|
||||
if (!v)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
name_off = btf__add_str(btf, name);
|
||||
if (name_off < 0)
|
||||
return name_off;
|
||||
|
||||
v->name_off = name_off;
|
||||
v->val_lo32 = (__u32)value;
|
||||
v->val_hi32 = value >> 32;
|
||||
|
||||
/* update parent type's vlen */
|
||||
t = btf_last_type(btf);
|
||||
btf_type_inc_vlen(t);
|
||||
|
||||
btf->hdr->type_len += sz;
|
||||
btf->hdr->str_off += sz;
|
||||
return 0;
|
||||
@ -3470,7 +3582,7 @@ static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
|
||||
return info1 == info2;
|
||||
}
|
||||
|
||||
/* Calculate type signature hash of ENUM. */
|
||||
/* Calculate type signature hash of ENUM/ENUM64. */
|
||||
static long btf_hash_enum(struct btf_type *t)
|
||||
{
|
||||
long h;
|
||||
@ -3504,9 +3616,31 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool btf_equal_enum64(struct btf_type *t1, struct btf_type *t2)
|
||||
{
|
||||
const struct btf_enum64 *m1, *m2;
|
||||
__u16 vlen;
|
||||
int i;
|
||||
|
||||
if (!btf_equal_common(t1, t2))
|
||||
return false;
|
||||
|
||||
vlen = btf_vlen(t1);
|
||||
m1 = btf_enum64(t1);
|
||||
m2 = btf_enum64(t2);
|
||||
for (i = 0; i < vlen; i++) {
|
||||
if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
|
||||
m1->val_hi32 != m2->val_hi32)
|
||||
return false;
|
||||
m1++;
|
||||
m2++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool btf_is_enum_fwd(struct btf_type *t)
|
||||
{
|
||||
return btf_is_enum(t) && btf_vlen(t) == 0;
|
||||
return btf_is_any_enum(t) && btf_vlen(t) == 0;
|
||||
}
|
||||
|
||||
static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
|
||||
@ -3519,6 +3653,17 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
|
||||
t1->size == t2->size;
|
||||
}
|
||||
|
||||
static bool btf_compat_enum64(struct btf_type *t1, struct btf_type *t2)
|
||||
{
|
||||
if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
|
||||
return btf_equal_enum64(t1, t2);
|
||||
|
||||
/* ignore vlen when comparing */
|
||||
return t1->name_off == t2->name_off &&
|
||||
(t1->info & ~0xffff) == (t2->info & ~0xffff) &&
|
||||
t1->size == t2->size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
|
||||
* as referenced type IDs equivalence is established separately during type
|
||||
@ -3731,6 +3876,7 @@ static int btf_dedup_prep(struct btf_dedup *d)
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
h = btf_hash_enum(t);
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
@ -3820,6 +3966,27 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_ENUM64:
|
||||
h = btf_hash_enum(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
if (btf_equal_enum64(t, cand)) {
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
if (btf_compat_enum64(t, cand)) {
|
||||
if (btf_is_enum_fwd(t)) {
|
||||
/* resolve fwd to full enum */
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
/* resolve canonical enum fwd to full enum */
|
||||
d->map[cand_id] = type_id;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
h = btf_hash_common(t);
|
||||
@ -4115,6 +4282,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
|
||||
case BTF_KIND_ENUM:
|
||||
return btf_compat_enum(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_ENUM64:
|
||||
return btf_compat_enum64(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
return btf_equal_common(cand_type, canon_type);
|
||||
@ -4717,6 +4887,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
@ -4811,6 +4982,16 @@ int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ct
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
struct btf_enum64 *m = btf_enum64(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
|
@ -215,6 +215,8 @@ LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_
|
||||
/* enum construction APIs */
|
||||
LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz);
|
||||
LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value);
|
||||
LIBBPF_API int btf__add_enum64(struct btf *btf, const char *name, __u32 bytes_sz, bool is_signed);
|
||||
LIBBPF_API int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value);
|
||||
|
||||
enum btf_fwd_kind {
|
||||
BTF_FWD_STRUCT = 0,
|
||||
@ -393,9 +395,10 @@ btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
|
||||
#ifndef BTF_KIND_FLOAT
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#endif
|
||||
/* The kernel header switched to enums, so these two were never #defined */
|
||||
/* The kernel header switched to enums, so the following were never #defined */
|
||||
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
|
||||
#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
|
||||
#define BTF_KIND_ENUM64 19 /* Enum for up-to 64bit values */
|
||||
|
||||
static inline __u16 btf_kind(const struct btf_type *t)
|
||||
{
|
||||
@ -454,6 +457,11 @@ static inline bool btf_is_enum(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_ENUM;
|
||||
}
|
||||
|
||||
static inline bool btf_is_enum64(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_ENUM64;
|
||||
}
|
||||
|
||||
static inline bool btf_is_fwd(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_FWD;
|
||||
@ -524,6 +532,18 @@ static inline bool btf_is_type_tag(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
static inline bool btf_is_any_enum(const struct btf_type *t)
|
||||
{
|
||||
return btf_is_enum(t) || btf_is_enum64(t);
|
||||
}
|
||||
|
||||
static inline bool btf_kind_core_compat(const struct btf_type *t1,
|
||||
const struct btf_type *t2)
|
||||
{
|
||||
return btf_kind(t1) == btf_kind(t2) ||
|
||||
(btf_is_any_enum(t1) && btf_is_any_enum(t2));
|
||||
}
|
||||
|
||||
static inline __u8 btf_int_encoding(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
|
||||
@ -549,6 +569,16 @@ static inline struct btf_enum *btf_enum(const struct btf_type *t)
|
||||
return (struct btf_enum *)(t + 1);
|
||||
}
|
||||
|
||||
static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_enum64 *)(t + 1);
|
||||
}
|
||||
|
||||
static inline __u64 btf_enum64_value(const struct btf_enum64 *e)
|
||||
{
|
||||
return ((__u64)e->val_hi32 << 32) | e->val_lo32;
|
||||
}
|
||||
|
||||
static inline struct btf_member *btf_members(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_member *)(t + 1);
|
||||
|
@ -318,6 +318,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
break;
|
||||
@ -538,6 +539,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
|
||||
return 1;
|
||||
}
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
/*
|
||||
* non-anonymous or non-referenced enums are top-level
|
||||
@ -739,6 +741,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
|
||||
tstate->emit_state = EMITTED;
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
if (top_level_def) {
|
||||
btf_dump_emit_enum_def(d, id, t, 0);
|
||||
btf_dump_printf(d, ";\n\n");
|
||||
@ -989,39 +992,82 @@ static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
|
||||
btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
|
||||
}
|
||||
|
||||
static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
|
||||
static void btf_dump_emit_enum32_val(struct btf_dump *d,
|
||||
const struct btf_type *t,
|
||||
int lvl)
|
||||
int lvl, __u16 vlen)
|
||||
{
|
||||
const struct btf_enum *v = btf_enum(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
bool is_signed = btf_kflag(t);
|
||||
const char *fmt_str;
|
||||
const char *name;
|
||||
size_t dup_cnt;
|
||||
int i;
|
||||
|
||||
btf_dump_printf(d, "enum%s%s",
|
||||
t->name_off ? " " : "",
|
||||
btf_dump_type_name(d, id));
|
||||
|
||||
if (vlen) {
|
||||
btf_dump_printf(d, " {");
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
name = btf_name_of(d, v->name_off);
|
||||
/* enumerators share namespace with typedef idents */
|
||||
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
|
||||
if (dup_cnt > 1) {
|
||||
btf_dump_printf(d, "\n%s%s___%zu = %u,",
|
||||
pfx(lvl + 1), name, dup_cnt,
|
||||
(__u32)v->val);
|
||||
fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
|
||||
btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val);
|
||||
} else {
|
||||
btf_dump_printf(d, "\n%s%s = %u,",
|
||||
fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,";
|
||||
btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void btf_dump_emit_enum64_val(struct btf_dump *d,
|
||||
const struct btf_type *t,
|
||||
int lvl, __u16 vlen)
|
||||
{
|
||||
const struct btf_enum64 *v = btf_enum64(t);
|
||||
bool is_signed = btf_kflag(t);
|
||||
const char *fmt_str;
|
||||
const char *name;
|
||||
size_t dup_cnt;
|
||||
__u64 val;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
name = btf_name_of(d, v->name_off);
|
||||
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
|
||||
val = btf_enum64_value(v);
|
||||
if (dup_cnt > 1) {
|
||||
fmt_str = is_signed ? "\n%s%s___%zd = %lldLL,"
|
||||
: "\n%s%s___%zd = %lluULL,";
|
||||
btf_dump_printf(d, fmt_str,
|
||||
pfx(lvl + 1), name, dup_cnt,
|
||||
(unsigned long long)val);
|
||||
} else {
|
||||
fmt_str = is_signed ? "\n%s%s = %lldLL,"
|
||||
: "\n%s%s = %lluULL,";
|
||||
btf_dump_printf(d, fmt_str,
|
||||
pfx(lvl + 1), name,
|
||||
(__u32)v->val);
|
||||
(unsigned long long)val);
|
||||
}
|
||||
}
|
||||
}
|
||||
static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
|
||||
const struct btf_type *t,
|
||||
int lvl)
|
||||
{
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
btf_dump_printf(d, "enum%s%s",
|
||||
t->name_off ? " " : "",
|
||||
btf_dump_type_name(d, id));
|
||||
|
||||
if (!vlen)
|
||||
return;
|
||||
|
||||
btf_dump_printf(d, " {");
|
||||
if (btf_is_enum(t))
|
||||
btf_dump_emit_enum32_val(d, t, lvl, vlen);
|
||||
else
|
||||
btf_dump_emit_enum64_val(d, t, lvl, vlen);
|
||||
btf_dump_printf(d, "\n%s}", pfx(lvl));
|
||||
}
|
||||
}
|
||||
|
||||
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
|
||||
const struct btf_type *t)
|
||||
@ -1178,6 +1224,7 @@ skip_mod:
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
@ -1312,6 +1359,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
|
||||
btf_dump_emit_struct_fwd(d, id, t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
btf_dump_emit_mods(d, decls);
|
||||
/* inline anonymous enum */
|
||||
if (t->name_off == 0 && !d->skip_anon_defs)
|
||||
@ -1988,7 +2036,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
|
||||
__u32 id,
|
||||
__s64 *value)
|
||||
{
|
||||
/* handle unaligned enum value */
|
||||
bool is_signed = btf_kflag(t);
|
||||
|
||||
if (!ptr_is_aligned(d->btf, id, data)) {
|
||||
__u64 val;
|
||||
int err;
|
||||
@ -2005,13 +2054,13 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
|
||||
*value = *(__s64 *)data;
|
||||
return 0;
|
||||
case 4:
|
||||
*value = *(__s32 *)data;
|
||||
*value = is_signed ? *(__s32 *)data : *(__u32 *)data;
|
||||
return 0;
|
||||
case 2:
|
||||
*value = *(__s16 *)data;
|
||||
*value = is_signed ? *(__s16 *)data : *(__u16 *)data;
|
||||
return 0;
|
||||
case 1:
|
||||
*value = *(__s8 *)data;
|
||||
*value = is_signed ? *(__s8 *)data : *(__u8 *)data;
|
||||
return 0;
|
||||
default:
|
||||
pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id);
|
||||
@ -2024,7 +2073,7 @@ static int btf_dump_enum_data(struct btf_dump *d,
|
||||
__u32 id,
|
||||
const void *data)
|
||||
{
|
||||
const struct btf_enum *e;
|
||||
bool is_signed;
|
||||
__s64 value;
|
||||
int i, err;
|
||||
|
||||
@ -2032,6 +2081,10 @@ static int btf_dump_enum_data(struct btf_dump *d,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
is_signed = btf_kflag(t);
|
||||
if (btf_is_enum(t)) {
|
||||
const struct btf_enum *e;
|
||||
|
||||
for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
|
||||
if (value != e->val)
|
||||
continue;
|
||||
@ -2039,7 +2092,20 @@ static int btf_dump_enum_data(struct btf_dump *d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
btf_dump_type_values(d, "%d", value);
|
||||
btf_dump_type_values(d, is_signed ? "%d" : "%u", value);
|
||||
} else {
|
||||
const struct btf_enum64 *e;
|
||||
|
||||
for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) {
|
||||
if (value != btf_enum64_value(e))
|
||||
continue;
|
||||
btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
|
||||
return 0;
|
||||
}
|
||||
|
||||
btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL",
|
||||
(unsigned long long)value);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2099,6 +2165,7 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
if (data + bits_offset / 8 + size > d->typed_dump->data_end)
|
||||
return -E2BIG;
|
||||
break;
|
||||
@ -2203,6 +2270,7 @@ static int btf_dump_type_data_check_zero(struct btf_dump *d,
|
||||
return -ENODATA;
|
||||
}
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
err = btf_dump_get_enum_value(d, t, data, id, &value);
|
||||
if (err)
|
||||
return err;
|
||||
@ -2275,6 +2343,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
|
||||
err = btf_dump_struct_data(d, t, id, data);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
/* handle bitfield and int enum values */
|
||||
if (bit_sz) {
|
||||
__u64 print_num;
|
||||
|
@ -72,6 +72,134 @@
|
||||
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
|
||||
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
|
||||
|
||||
static const char * const attach_type_name[] = {
|
||||
[BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
|
||||
[BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
|
||||
[BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
|
||||
[BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
|
||||
[BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
|
||||
[BPF_CGROUP_DEVICE] = "cgroup_device",
|
||||
[BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
|
||||
[BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
|
||||
[BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
|
||||
[BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
|
||||
[BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
|
||||
[BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
|
||||
[BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
|
||||
[BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
|
||||
[BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
|
||||
[BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
|
||||
[BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
|
||||
[BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
|
||||
[BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
|
||||
[BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
|
||||
[BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
|
||||
[BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
|
||||
[BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
|
||||
[BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
|
||||
[BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
|
||||
[BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
|
||||
[BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
|
||||
[BPF_LIRC_MODE2] = "lirc_mode2",
|
||||
[BPF_FLOW_DISSECTOR] = "flow_dissector",
|
||||
[BPF_TRACE_RAW_TP] = "trace_raw_tp",
|
||||
[BPF_TRACE_FENTRY] = "trace_fentry",
|
||||
[BPF_TRACE_FEXIT] = "trace_fexit",
|
||||
[BPF_MODIFY_RETURN] = "modify_return",
|
||||
[BPF_LSM_MAC] = "lsm_mac",
|
||||
[BPF_SK_LOOKUP] = "sk_lookup",
|
||||
[BPF_TRACE_ITER] = "trace_iter",
|
||||
[BPF_XDP_DEVMAP] = "xdp_devmap",
|
||||
[BPF_XDP_CPUMAP] = "xdp_cpumap",
|
||||
[BPF_XDP] = "xdp",
|
||||
[BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
|
||||
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
|
||||
[BPF_PERF_EVENT] = "perf_event",
|
||||
[BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
|
||||
};
|
||||
|
||||
static const char * const link_type_name[] = {
|
||||
[BPF_LINK_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
|
||||
[BPF_LINK_TYPE_TRACING] = "tracing",
|
||||
[BPF_LINK_TYPE_CGROUP] = "cgroup",
|
||||
[BPF_LINK_TYPE_ITER] = "iter",
|
||||
[BPF_LINK_TYPE_NETNS] = "netns",
|
||||
[BPF_LINK_TYPE_XDP] = "xdp",
|
||||
[BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
|
||||
[BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
|
||||
[BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
};
|
||||
|
||||
static const char * const map_type_name[] = {
|
||||
[BPF_MAP_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_MAP_TYPE_HASH] = "hash",
|
||||
[BPF_MAP_TYPE_ARRAY] = "array",
|
||||
[BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
|
||||
[BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
|
||||
[BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
|
||||
[BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
|
||||
[BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
|
||||
[BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
|
||||
[BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
|
||||
[BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
|
||||
[BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
|
||||
[BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
|
||||
[BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
|
||||
[BPF_MAP_TYPE_DEVMAP] = "devmap",
|
||||
[BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
|
||||
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
|
||||
[BPF_MAP_TYPE_CPUMAP] = "cpumap",
|
||||
[BPF_MAP_TYPE_XSKMAP] = "xskmap",
|
||||
[BPF_MAP_TYPE_SOCKHASH] = "sockhash",
|
||||
[BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
|
||||
[BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
|
||||
[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
|
||||
[BPF_MAP_TYPE_QUEUE] = "queue",
|
||||
[BPF_MAP_TYPE_STACK] = "stack",
|
||||
[BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
|
||||
[BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
|
||||
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
|
||||
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
|
||||
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
|
||||
};
|
||||
|
||||
static const char * const prog_type_name[] = {
|
||||
[BPF_PROG_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
|
||||
[BPF_PROG_TYPE_KPROBE] = "kprobe",
|
||||
[BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
|
||||
[BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
|
||||
[BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
|
||||
[BPF_PROG_TYPE_XDP] = "xdp",
|
||||
[BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
|
||||
[BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
|
||||
[BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
|
||||
[BPF_PROG_TYPE_LWT_IN] = "lwt_in",
|
||||
[BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
|
||||
[BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
|
||||
[BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
|
||||
[BPF_PROG_TYPE_SK_SKB] = "sk_skb",
|
||||
[BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
|
||||
[BPF_PROG_TYPE_SK_MSG] = "sk_msg",
|
||||
[BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
|
||||
[BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
|
||||
[BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
|
||||
[BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
|
||||
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
|
||||
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
|
||||
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
|
||||
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
|
||||
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
|
||||
[BPF_PROG_TYPE_TRACING] = "tracing",
|
||||
[BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
[BPF_PROG_TYPE_EXT] = "ext",
|
||||
[BPF_PROG_TYPE_LSM] = "lsm",
|
||||
[BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
|
||||
[BPF_PROG_TYPE_SYSCALL] = "syscall",
|
||||
};
|
||||
|
||||
static int __base_pr(enum libbpf_print_level level, const char *format,
|
||||
va_list args)
|
||||
{
|
||||
@ -2114,6 +2242,7 @@ static const char *__btf_kind_str(__u16 kind)
|
||||
case BTF_KIND_FLOAT: return "float";
|
||||
case BTF_KIND_DECL_TAG: return "decl_tag";
|
||||
case BTF_KIND_TYPE_TAG: return "type_tag";
|
||||
case BTF_KIND_ENUM64: return "enum64";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
@ -2642,12 +2771,13 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
|
||||
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
|
||||
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
|
||||
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
|
||||
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
|
||||
|
||||
return !has_func || !has_datasec || !has_func_global || !has_float ||
|
||||
!has_decl_tag || !has_type_tag;
|
||||
!has_decl_tag || !has_type_tag || !has_enum64;
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
{
|
||||
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
|
||||
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
|
||||
@ -2655,6 +2785,8 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
|
||||
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
|
||||
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
|
||||
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
|
||||
int enum64_placeholder_id = 0;
|
||||
struct btf_type *t;
|
||||
int i, j, vlen;
|
||||
|
||||
@ -2717,10 +2849,34 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
/* replace TYPE_TAG with a CONST */
|
||||
t->name_off = 0;
|
||||
t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
|
||||
} else if (!has_enum64 && btf_is_enum(t)) {
|
||||
/* clear the kflag */
|
||||
t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
|
||||
} else if (!has_enum64 && btf_is_enum64(t)) {
|
||||
/* replace ENUM64 with a union */
|
||||
struct btf_member *m;
|
||||
|
||||
if (enum64_placeholder_id == 0) {
|
||||
enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
|
||||
if (enum64_placeholder_id < 0)
|
||||
return enum64_placeholder_id;
|
||||
|
||||
t = (struct btf_type *)btf__type_by_id(btf, i);
|
||||
}
|
||||
|
||||
m = btf_members(t);
|
||||
vlen = btf_vlen(t);
|
||||
t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
|
||||
for (j = 0; j < vlen; j++, m++) {
|
||||
m->type = enum64_placeholder_id;
|
||||
m->offset = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool libbpf_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.btf_maps_shndx >= 0 ||
|
||||
@ -3056,7 +3212,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
|
||||
|
||||
/* enforce 8-byte pointers for BPF-targeted BTFs */
|
||||
btf__set_pointer_size(obj->btf, 8);
|
||||
bpf_object__sanitize_btf(obj, kern_btf);
|
||||
err = bpf_object__sanitize_btf(obj, kern_btf);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (obj->gen_loader) {
|
||||
@ -3563,6 +3721,10 @@ static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
|
||||
if (strcmp(name, "libbpf_tristate"))
|
||||
return KCFG_UNKNOWN;
|
||||
return KCFG_TRISTATE;
|
||||
case BTF_KIND_ENUM64:
|
||||
if (strcmp(name, "libbpf_tristate"))
|
||||
return KCFG_UNKNOWN;
|
||||
return KCFG_TRISTATE;
|
||||
case BTF_KIND_ARRAY:
|
||||
if (btf_array(t)->nelems == 0)
|
||||
return KCFG_UNKNOWN;
|
||||
@ -4746,6 +4908,17 @@ static int probe_kern_bpf_cookie(void)
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_btf_enum64(void)
|
||||
{
|
||||
static const char strs[] = "\0enum64";
|
||||
__u32 types[] = {
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
enum kern_feature_result {
|
||||
FEAT_UNKNOWN = 0,
|
||||
FEAT_SUPPORTED = 1,
|
||||
@ -4811,6 +4984,9 @@ static struct kern_feature_desc {
|
||||
[FEAT_BPF_COOKIE] = {
|
||||
"BPF cookie support", probe_kern_bpf_cookie,
|
||||
},
|
||||
[FEAT_BTF_ENUM64] = {
|
||||
"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
|
||||
},
|
||||
};
|
||||
|
||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
|
||||
@ -4943,11 +5119,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
|
||||
static void bpf_map__destroy(struct bpf_map *map);
|
||||
|
||||
static bool is_pow_of_2(size_t x)
|
||||
{
|
||||
return x && (x & (x - 1));
|
||||
}
|
||||
|
||||
static size_t adjust_ringbuf_sz(size_t sz)
|
||||
{
|
||||
__u32 page_sz = sysconf(_SC_PAGE_SIZE);
|
||||
@ -5353,7 +5524,7 @@ int bpf_core_add_cands(struct bpf_core_cand *local_cand,
|
||||
n = btf__type_cnt(targ_btf);
|
||||
for (i = targ_start_id; i < n; i++) {
|
||||
t = btf__type_by_id(targ_btf, i);
|
||||
if (btf_kind(t) != btf_kind(local_t))
|
||||
if (!btf_kind_core_compat(t, local_t))
|
||||
continue;
|
||||
|
||||
targ_name = btf__name_by_offset(targ_btf, t->name_off);
|
||||
@ -5567,7 +5738,7 @@ int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
/* caller made sure that names match (ignoring flavor suffix) */
|
||||
local_type = btf__type_by_id(local_btf, local_id);
|
||||
targ_type = btf__type_by_id(targ_btf, targ_id);
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
recur:
|
||||
@ -5580,7 +5751,7 @@ recur:
|
||||
if (!local_type || !targ_type)
|
||||
return -EINVAL;
|
||||
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
@ -5588,6 +5759,7 @@ recur:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
return 1;
|
||||
case BTF_KIND_INT:
|
||||
@ -9005,8 +9177,10 @@ static const struct bpf_sec_def section_defs[] = {
|
||||
SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
|
||||
SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
|
||||
SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
|
||||
SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
|
||||
SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
|
||||
SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
|
||||
SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
|
||||
SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
|
||||
SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
|
||||
SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt),
|
||||
@ -9300,6 +9474,38 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
|
||||
return libbpf_err(-ESRCH);
|
||||
}
|
||||
|
||||
const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
|
||||
{
|
||||
if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
|
||||
return NULL;
|
||||
|
||||
return attach_type_name[t];
|
||||
}
|
||||
|
||||
const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
|
||||
{
|
||||
if (t < 0 || t >= ARRAY_SIZE(link_type_name))
|
||||
return NULL;
|
||||
|
||||
return link_type_name[t];
|
||||
}
|
||||
|
||||
const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
|
||||
{
|
||||
if (t < 0 || t >= ARRAY_SIZE(map_type_name))
|
||||
return NULL;
|
||||
|
||||
return map_type_name[t];
|
||||
}
|
||||
|
||||
const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
|
||||
{
|
||||
if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
|
||||
return NULL;
|
||||
|
||||
return prog_type_name[t];
|
||||
}
|
||||
|
||||
static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
|
||||
size_t offset)
|
||||
{
|
||||
@ -10988,43 +11194,6 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
|
||||
return pfd;
|
||||
}
|
||||
|
||||
/* uprobes deal in relative offsets; subtract the base address associated with
|
||||
* the mapped binary. See Documentation/trace/uprobetracer.rst for more
|
||||
* details.
|
||||
*/
|
||||
static long elf_find_relative_offset(const char *filename, Elf *elf, long addr)
|
||||
{
|
||||
size_t n;
|
||||
int i;
|
||||
|
||||
if (elf_getphdrnum(elf, &n)) {
|
||||
pr_warn("elf: failed to find program headers for '%s': %s\n", filename,
|
||||
elf_errmsg(-1));
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
int seg_start, seg_end, seg_offset;
|
||||
GElf_Phdr phdr;
|
||||
|
||||
if (!gelf_getphdr(elf, i, &phdr)) {
|
||||
pr_warn("elf: failed to get program header %d from '%s': %s\n", i, filename,
|
||||
elf_errmsg(-1));
|
||||
return -ENOENT;
|
||||
}
|
||||
if (phdr.p_type != PT_LOAD || !(phdr.p_flags & PF_X))
|
||||
continue;
|
||||
|
||||
seg_start = phdr.p_vaddr;
|
||||
seg_end = seg_start + phdr.p_memsz;
|
||||
seg_offset = phdr.p_offset;
|
||||
if (addr >= seg_start && addr < seg_end)
|
||||
return addr - seg_start + seg_offset;
|
||||
}
|
||||
pr_warn("elf: failed to find prog header containing 0x%lx in '%s'\n", addr, filename);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
|
||||
static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
|
||||
{
|
||||
@ -11111,6 +11280,8 @@ static long elf_find_func_offset(const char *binary_path, const char *name)
|
||||
for (idx = 0; idx < nr_syms; idx++) {
|
||||
int curr_bind;
|
||||
GElf_Sym sym;
|
||||
Elf_Scn *sym_scn;
|
||||
GElf_Shdr sym_sh;
|
||||
|
||||
if (!gelf_getsym(symbols, idx, &sym))
|
||||
continue;
|
||||
@ -11148,12 +11319,28 @@ static long elf_find_func_offset(const char *binary_path, const char *name)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
ret = sym.st_value;
|
||||
|
||||
/* Transform symbol's virtual address (absolute for
|
||||
* binaries and relative for shared libs) into file
|
||||
* offset, which is what kernel is expecting for
|
||||
* uprobe/uretprobe attachment.
|
||||
* See Documentation/trace/uprobetracer.rst for more
|
||||
* details.
|
||||
* This is done by looking up symbol's containing
|
||||
* section's header and using it's virtual address
|
||||
* (sh_addr) and corresponding file offset (sh_offset)
|
||||
* to transform sym.st_value (virtual address) into
|
||||
* desired final file offset.
|
||||
*/
|
||||
sym_scn = elf_getscn(elf, sym.st_shndx);
|
||||
if (!sym_scn)
|
||||
continue;
|
||||
if (!gelf_getshdr(sym_scn, &sym_sh))
|
||||
continue;
|
||||
|
||||
ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset;
|
||||
last_bind = curr_bind;
|
||||
}
|
||||
/* For binaries that are not shared libraries, we need relative offset */
|
||||
if (ret > 0 && !is_shared_lib)
|
||||
ret = elf_find_relative_offset(binary_path, elf, ret);
|
||||
if (ret > 0)
|
||||
break;
|
||||
}
|
||||
@ -11386,7 +11573,8 @@ static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf
|
||||
break;
|
||||
case 3:
|
||||
case 4:
|
||||
opts.retprobe = strcmp(probe_type, "uretprobe") == 0;
|
||||
opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
|
||||
strcmp(probe_type, "uretprobe.s") == 0;
|
||||
if (opts.retprobe && offset != 0) {
|
||||
pr_warn("prog '%s': uretprobes do not support offset specification\n",
|
||||
prog->name);
|
||||
|
@ -51,6 +51,42 @@ enum libbpf_errno {
|
||||
|
||||
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_attach_type_str()** converts the provided attach type
|
||||
* value into a textual representation.
|
||||
* @param t The attach type.
|
||||
* @return Pointer to a static string identifying the attach type. NULL is
|
||||
* returned for unknown **bpf_attach_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_link_type_str()** converts the provided link type value
|
||||
* into a textual representation.
|
||||
* @param t The link type.
|
||||
* @return Pointer to a static string identifying the link type. NULL is
|
||||
* returned for unknown **bpf_link_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_map_type_str()** converts the provided map type value
|
||||
* into a textual representation.
|
||||
* @param t The map type.
|
||||
* @return Pointer to a static string identifying the map type. NULL is
|
||||
* returned for unknown **bpf_map_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_prog_type_str()** converts the provided program type
|
||||
* value into a textual representation.
|
||||
* @param t The program type.
|
||||
* @return Pointer to a static string identifying the program type. NULL is
|
||||
* returned for unknown **bpf_prog_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t);
|
||||
|
||||
enum libbpf_print_level {
|
||||
LIBBPF_WARN,
|
||||
LIBBPF_INFO,
|
||||
@ -71,7 +107,7 @@ struct bpf_object_open_attr {
|
||||
};
|
||||
|
||||
struct bpf_object_open_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* object name override, if provided:
|
||||
* - for object open from file, this will override setting object
|
||||
|
@ -461,5 +461,13 @@ LIBBPF_0.8.0 {
|
||||
} LIBBPF_0.7.0;
|
||||
|
||||
LIBBPF_1.0.0 {
|
||||
global:
|
||||
btf__add_enum64;
|
||||
btf__add_enum64_value;
|
||||
libbpf_bpf_attach_type_str;
|
||||
libbpf_bpf_link_type_str;
|
||||
libbpf_bpf_map_type_str;
|
||||
libbpf_bpf_prog_type_str;
|
||||
|
||||
local: *;
|
||||
};
|
||||
|
@ -351,6 +351,8 @@ enum kern_feature_id {
|
||||
FEAT_MEMCG_ACCOUNT,
|
||||
/* BPF cookie (bpf_get_attach_cookie() BPF helper) support */
|
||||
FEAT_BPF_COOKIE,
|
||||
/* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */
|
||||
FEAT_BTF_ENUM64,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
@ -580,4 +582,9 @@ struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
|
||||
const char *usdt_provider, const char *usdt_name,
|
||||
__u64 usdt_cookie);
|
||||
|
||||
static inline bool is_pow_of_2(size_t x)
|
||||
{
|
||||
return x && (x & (x - 1)) == 0;
|
||||
}
|
||||
|
||||
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
||||
|
@ -697,11 +697,6 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool is_pow_of_2(size_t x)
|
||||
{
|
||||
return x && (x & (x - 1)) == 0;
|
||||
}
|
||||
|
||||
static int linker_sanity_check_elf(struct src_obj *obj)
|
||||
{
|
||||
struct src_sec *sec;
|
||||
@ -1340,6 +1335,7 @@ recur:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
@ -1362,6 +1358,7 @@ recur:
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
/* ignore encoding for int and enum values for enum */
|
||||
if (t1->size != t2->size) {
|
||||
pr_warn("global '%s': incompatible %s '%s' size %u and %u\n",
|
||||
|
@ -167,7 +167,7 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
||||
* just a parsed access string representation): [0, 1, 2, 3].
|
||||
*
|
||||
* High-level spec will capture only 3 points:
|
||||
* - intial zero-index access by pointer (&s->... is the same as &s[0]...);
|
||||
* - initial zero-index access by pointer (&s->... is the same as &s[0]...);
|
||||
* - field 'a' access (corresponds to '2' in low-level spec);
|
||||
* - array element #3 access (corresponds to '3' in low-level spec).
|
||||
*
|
||||
@ -186,7 +186,7 @@ int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
|
||||
struct bpf_core_accessor *acc;
|
||||
const struct btf_type *t;
|
||||
const char *name, *spec_str;
|
||||
__u32 id;
|
||||
__u32 id, name_off;
|
||||
__s64 sz;
|
||||
|
||||
spec_str = btf__name_by_offset(btf, relo->access_str_off);
|
||||
@ -231,11 +231,13 @@ int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
|
||||
spec->len++;
|
||||
|
||||
if (core_relo_is_enumval_based(relo->kind)) {
|
||||
if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
|
||||
if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
|
||||
return -EINVAL;
|
||||
|
||||
/* record enumerator name in a first accessor */
|
||||
acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
|
||||
name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off
|
||||
: btf_enum64(t)[access_idx].name_off;
|
||||
acc->name = btf__name_by_offset(btf, name_off);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -340,7 +342,7 @@ recur:
|
||||
|
||||
if (btf_is_composite(local_type) && btf_is_composite(targ_type))
|
||||
return 1;
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
@ -348,6 +350,7 @@ recur:
|
||||
case BTF_KIND_FLOAT:
|
||||
return 1;
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_ENUM: {
|
||||
const char *local_name, *targ_name;
|
||||
size_t local_len, targ_len;
|
||||
@ -477,6 +480,7 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
const struct bpf_core_accessor *local_acc;
|
||||
struct bpf_core_accessor *targ_acc;
|
||||
int i, sz, matched;
|
||||
__u32 name_off;
|
||||
|
||||
memset(targ_spec, 0, sizeof(*targ_spec));
|
||||
targ_spec->btf = targ_btf;
|
||||
@ -494,18 +498,22 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
|
||||
if (core_relo_is_enumval_based(local_spec->relo_kind)) {
|
||||
size_t local_essent_len, targ_essent_len;
|
||||
const struct btf_enum *e;
|
||||
const char *targ_name;
|
||||
|
||||
/* has to resolve to an enum */
|
||||
targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
|
||||
if (!btf_is_enum(targ_type))
|
||||
if (!btf_is_any_enum(targ_type))
|
||||
return 0;
|
||||
|
||||
local_essent_len = bpf_core_essential_name_len(local_acc->name);
|
||||
|
||||
for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
|
||||
targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
|
||||
for (i = 0; i < btf_vlen(targ_type); i++) {
|
||||
if (btf_is_enum(targ_type))
|
||||
name_off = btf_enum(targ_type)[i].name_off;
|
||||
else
|
||||
name_off = btf_enum64(targ_type)[i].name_off;
|
||||
|
||||
targ_name = btf__name_by_offset(targ_spec->btf, name_off);
|
||||
targ_essent_len = bpf_core_essential_name_len(targ_name);
|
||||
if (targ_essent_len != local_essent_len)
|
||||
continue;
|
||||
@ -583,7 +591,7 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val, __u32 *field_sz, __u32 *type_id,
|
||||
__u64 *val, __u32 *field_sz, __u32 *type_id,
|
||||
bool *validate)
|
||||
{
|
||||
const struct bpf_core_accessor *acc;
|
||||
@ -680,8 +688,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
*val = byte_sz;
|
||||
break;
|
||||
case BPF_CORE_FIELD_SIGNED:
|
||||
/* enums will be assumed unsigned */
|
||||
*val = btf_is_enum(mt) ||
|
||||
*val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
|
||||
(btf_int_encoding(mt) & BTF_INT_SIGNED);
|
||||
if (validate)
|
||||
*validate = true; /* signedness is never ambiguous */
|
||||
@ -708,7 +715,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
|
||||
static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val, bool *validate)
|
||||
__u64 *val, bool *validate)
|
||||
{
|
||||
__s64 sz;
|
||||
|
||||
@ -751,10 +758,9 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
|
||||
static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val)
|
||||
__u64 *val)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
|
||||
switch (relo->kind) {
|
||||
case BPF_CORE_ENUMVAL_EXISTS:
|
||||
@ -764,8 +770,10 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
|
||||
if (!spec)
|
||||
return -EUCLEAN; /* request instruction poisoning */
|
||||
t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
|
||||
e = btf_enum(t) + spec->spec[0].idx;
|
||||
*val = e->val;
|
||||
if (btf_is_enum(t))
|
||||
*val = btf_enum(t)[spec->spec[0].idx].val;
|
||||
else
|
||||
*val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -929,7 +937,7 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
int insn_idx, const struct bpf_core_relo *relo,
|
||||
int relo_idx, const struct bpf_core_relo_res *res)
|
||||
{
|
||||
__u32 orig_val, new_val;
|
||||
__u64 orig_val, new_val;
|
||||
__u8 class;
|
||||
|
||||
class = BPF_CLASS(insn->code);
|
||||
@ -954,28 +962,30 @@ poison:
|
||||
if (BPF_SRC(insn->code) != BPF_K)
|
||||
return -EINVAL;
|
||||
if (res->validate && insn->imm != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n",
|
||||
prog_name, relo_idx,
|
||||
insn_idx, insn->imm, orig_val, new_val);
|
||||
insn_idx, insn->imm, (unsigned long long)orig_val,
|
||||
(unsigned long long)new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
orig_val = insn->imm;
|
||||
insn->imm = new_val;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx,
|
||||
orig_val, new_val);
|
||||
(unsigned long long)orig_val, (unsigned long long)new_val);
|
||||
break;
|
||||
case BPF_LDX:
|
||||
case BPF_ST:
|
||||
case BPF_STX:
|
||||
if (res->validate && insn->off != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
|
||||
prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val,
|
||||
(unsigned long long)new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_val > SHRT_MAX) {
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
|
||||
prog_name, relo_idx, insn_idx, new_val);
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n",
|
||||
prog_name, relo_idx, insn_idx, (unsigned long long)new_val);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (res->fail_memsz_adjust) {
|
||||
@ -987,8 +997,9 @@ poison:
|
||||
|
||||
orig_val = insn->off;
|
||||
insn->off = new_val;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
|
||||
prog_name, relo_idx, insn_idx, orig_val, new_val);
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx, (unsigned long long)orig_val,
|
||||
(unsigned long long)new_val);
|
||||
|
||||
if (res->new_sz != res->orig_sz) {
|
||||
int insn_bytes_sz, insn_bpf_sz;
|
||||
@ -1024,20 +1035,20 @@ poison:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
imm = insn[0].imm + ((__u64)insn[1].imm << 32);
|
||||
imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32);
|
||||
if (res->validate && imm != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n",
|
||||
prog_name, relo_idx,
|
||||
insn_idx, (unsigned long long)imm,
|
||||
orig_val, new_val);
|
||||
(unsigned long long)orig_val, (unsigned long long)new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
insn[0].imm = new_val;
|
||||
insn[1].imm = 0; /* currently only 32-bit values are supported */
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
|
||||
insn[1].imm = new_val >> 32;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx,
|
||||
(unsigned long long)imm, new_val);
|
||||
(unsigned long long)imm, (unsigned long long)new_val);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -1057,7 +1068,6 @@ poison:
|
||||
int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
const char *s;
|
||||
__u32 type_id;
|
||||
int i, len = 0;
|
||||
@ -1086,10 +1096,23 @@ int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *s
|
||||
|
||||
if (core_relo_is_enumval_based(spec->relo_kind)) {
|
||||
t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
|
||||
if (btf_is_enum(t)) {
|
||||
const struct btf_enum *e;
|
||||
const char *fmt_str;
|
||||
|
||||
e = btf_enum(t) + spec->raw_spec[0];
|
||||
s = btf__name_by_offset(spec->btf, e->name_off);
|
||||
fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u";
|
||||
append_buf(fmt_str, s, e->val);
|
||||
} else {
|
||||
const struct btf_enum64 *e;
|
||||
const char *fmt_str;
|
||||
|
||||
append_buf("::%s = %u", s, e->val);
|
||||
e = btf_enum64(t) + spec->raw_spec[0];
|
||||
s = btf__name_by_offset(spec->btf, e->name_off);
|
||||
fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu";
|
||||
append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e));
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -1148,11 +1171,11 @@ int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *s
|
||||
* 3. It is supported and expected that there might be multiple flavors
|
||||
* matching the spec. As long as all the specs resolve to the same set of
|
||||
* offsets across all candidates, there is no error. If there is any
|
||||
* ambiguity, CO-RE relocation will fail. This is necessary to accomodate
|
||||
* imprefection of BTF deduplication, which can cause slight duplication of
|
||||
* ambiguity, CO-RE relocation will fail. This is necessary to accommodate
|
||||
* imperfection of BTF deduplication, which can cause slight duplication of
|
||||
* the same BTF type, if some directly or indirectly referenced (by
|
||||
* pointer) type gets resolved to different actual types in different
|
||||
* object files. If such situation occurs, deduplicated BTF will end up
|
||||
* object files. If such a situation occurs, deduplicated BTF will end up
|
||||
* with two (or more) structurally identical types, which differ only in
|
||||
* types they refer to through pointer. This should be OK in most cases and
|
||||
* is not an error.
|
||||
@ -1261,10 +1284,12 @@ int bpf_core_calc_relo_insn(const char *prog_name,
|
||||
* decision and value, otherwise it's dangerous to
|
||||
* proceed due to ambiguity
|
||||
*/
|
||||
pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
|
||||
pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n",
|
||||
prog_name, relo_idx,
|
||||
cand_res.poison ? "failure" : "success", cand_res.new_val,
|
||||
targ_res->poison ? "failure" : "success", targ_res->new_val);
|
||||
cand_res.poison ? "failure" : "success",
|
||||
(unsigned long long)cand_res.new_val,
|
||||
targ_res->poison ? "failure" : "success",
|
||||
(unsigned long long)targ_res->new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -46,9 +46,9 @@ struct bpf_core_spec {
|
||||
|
||||
struct bpf_core_relo_res {
|
||||
/* expected value in the instruction, unless validate == false */
|
||||
__u32 orig_val;
|
||||
__u64 orig_val;
|
||||
/* new value that needs to be patched up to */
|
||||
__u32 new_val;
|
||||
__u64 new_val;
|
||||
/* relocation unsuccessful, poison instruction, but don't fail load */
|
||||
bool poison;
|
||||
/* some relocations can't be validated against orig_val */
|
||||
|
@ -441,7 +441,7 @@ static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, siz
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_lib_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
|
||||
static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
|
||||
{
|
||||
char path[PATH_MAX], line[PATH_MAX], mode[16];
|
||||
size_t seg_start, seg_end, seg_off;
|
||||
@ -531,35 +531,40 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long addr, bool relative)
|
||||
static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
|
||||
{
|
||||
struct elf_seg *seg;
|
||||
int i;
|
||||
|
||||
if (relative) {
|
||||
/* for shared libraries, address is relative offset and thus
|
||||
* should be fall within logical offset-based range of
|
||||
* [offset_start, offset_end)
|
||||
/* for ELF binaries (both executables and shared libraries), we are
|
||||
* given virtual address (absolute for executables, relative for
|
||||
* libraries) which should match address range of [seg_start, seg_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->offset <= addr && addr < seg->offset + (seg->end - seg->start))
|
||||
if (seg->start <= virtaddr && virtaddr < seg->end)
|
||||
return seg;
|
||||
}
|
||||
} else {
|
||||
/* for binaries, address is absolute and thus should be within
|
||||
* absolute address range of [seg_start, seg_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->start <= addr && addr < seg->end)
|
||||
return seg;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
|
||||
GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
|
||||
static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
|
||||
{
|
||||
struct elf_seg *seg;
|
||||
int i;
|
||||
|
||||
/* for VMA segments from /proc/<pid>/maps file, provided "address" is
|
||||
* actually a file offset, so should be fall within logical
|
||||
* offset-based range of [offset_start, offset_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
|
||||
return seg;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
|
||||
const char *data, size_t name_off, size_t desc_off,
|
||||
struct usdt_note *usdt_note);
|
||||
|
||||
static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
|
||||
@ -568,8 +573,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
|
||||
struct usdt_target **out_targets, size_t *out_target_cnt)
|
||||
{
|
||||
size_t off, name_off, desc_off, seg_cnt = 0, lib_seg_cnt = 0, target_cnt = 0;
|
||||
struct elf_seg *segs = NULL, *lib_segs = NULL;
|
||||
size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
|
||||
struct elf_seg *segs = NULL, *vma_segs = NULL;
|
||||
struct usdt_target *targets = NULL, *target;
|
||||
long base_addr = 0;
|
||||
Elf_Scn *notes_scn, *base_scn;
|
||||
@ -613,8 +618,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
struct elf_seg *seg = NULL;
|
||||
void *tmp;
|
||||
|
||||
err = parse_usdt_note(elf, path, base_addr, &nhdr,
|
||||
data->d_buf, name_off, desc_off, ¬e);
|
||||
err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, ¬e);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@ -654,14 +658,12 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
usdt_rel_ip += base_addr - note.base_addr;
|
||||
}
|
||||
|
||||
if (ehdr.e_type == ET_EXEC) {
|
||||
/* When attaching uprobes (which what USDTs basically
|
||||
* are) kernel expects a relative IP to be specified,
|
||||
* so if we are attaching to an executable ELF binary
|
||||
* (i.e., not a shared library), we need to calculate
|
||||
* proper relative IP based on ELF's load address
|
||||
/* When attaching uprobes (which is what USDTs basically are)
|
||||
* kernel expects file offset to be specified, not a relative
|
||||
* virtual address, so we need to translate virtual address to
|
||||
* file offset, for both ET_EXEC and ET_DYN binaries.
|
||||
*/
|
||||
seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip, false /* relative */);
|
||||
seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
|
||||
@ -675,9 +677,10 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
usdt_abs_ip);
|
||||
goto err_out;
|
||||
}
|
||||
/* translate from virtual address to file offset */
|
||||
usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
|
||||
|
||||
usdt_rel_ip = usdt_abs_ip - (seg->start - seg->offset);
|
||||
} else if (!man->has_bpf_cookie) { /* ehdr.e_type == ET_DYN */
|
||||
if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
|
||||
/* If we don't have BPF cookie support but need to
|
||||
* attach to a shared library, we'll need to know and
|
||||
* record absolute addresses of attach points due to
|
||||
@ -697,9 +700,9 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* lib_segs are lazily initialized only if necessary */
|
||||
if (lib_seg_cnt == 0) {
|
||||
err = parse_lib_segs(pid, path, &lib_segs, &lib_seg_cnt);
|
||||
/* vma_segs are lazily initialized only if necessary */
|
||||
if (vma_seg_cnt == 0) {
|
||||
err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
|
||||
if (err) {
|
||||
pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
|
||||
pid, path, err);
|
||||
@ -707,7 +710,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
}
|
||||
}
|
||||
|
||||
seg = find_elf_seg(lib_segs, lib_seg_cnt, usdt_rel_ip, true /* relative */);
|
||||
seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
|
||||
@ -715,7 +718,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
usdt_abs_ip = seg->start + (usdt_rel_ip - seg->offset);
|
||||
usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
|
||||
}
|
||||
|
||||
pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
|
||||
@ -723,7 +726,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
|
||||
seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
|
||||
|
||||
/* Adjust semaphore address to be a relative offset */
|
||||
/* Adjust semaphore address to be a file offset */
|
||||
if (note.sema_addr) {
|
||||
if (!man->has_sema_refcnt) {
|
||||
pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
|
||||
@ -732,7 +735,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
seg = find_elf_seg(segs, seg_cnt, note.sema_addr, false /* relative */);
|
||||
seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
|
||||
@ -747,7 +750,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
usdt_sema_off = note.sema_addr - (seg->start - seg->offset);
|
||||
usdt_sema_off = note.sema_addr - seg->start + seg->offset;
|
||||
|
||||
pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
|
||||
usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
|
||||
@ -770,7 +773,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
target->rel_ip = usdt_rel_ip;
|
||||
target->sema_off = usdt_sema_off;
|
||||
|
||||
/* notes->args references strings from Elf itself, so they can
|
||||
/* notes.args references strings from Elf itself, so they can
|
||||
* be referenced safely until elf_end() call
|
||||
*/
|
||||
target->spec_str = note.args;
|
||||
@ -788,7 +791,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
|
||||
err_out:
|
||||
free(segs);
|
||||
free(lib_segs);
|
||||
free(vma_segs);
|
||||
if (err < 0)
|
||||
free(targets);
|
||||
return err;
|
||||
@ -1089,8 +1092,8 @@ err_out:
|
||||
/* Parse out USDT ELF note from '.note.stapsdt' section.
|
||||
* Logic inspired by perf's code.
|
||||
*/
|
||||
static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
|
||||
GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
|
||||
static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
|
||||
const char *data, size_t name_off, size_t desc_off,
|
||||
struct usdt_note *note)
|
||||
{
|
||||
const char *provider, *name, *args;
|
||||
|
1
tools/testing/selftests/bpf/.gitignore
vendored
1
tools/testing/selftests/bpf/.gitignore
vendored
@ -43,3 +43,4 @@ test_cpp
|
||||
*.tmp
|
||||
xdpxceiver
|
||||
xdp_redirect_multi
|
||||
xdp_synproxy
|
||||
|
@ -82,7 +82,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
|
||||
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
|
||||
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
|
||||
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
|
||||
xdpxceiver xdp_redirect_multi
|
||||
xdpxceiver xdp_redirect_multi xdp_synproxy
|
||||
|
||||
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
|
||||
|
||||
@ -168,16 +168,25 @@ $(OUTPUT)/%:%.c
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
|
||||
|
||||
# LLVM's ld.lld doesn't support all the architectures, so use it only on x86
|
||||
ifeq ($(SRCARCH),x86)
|
||||
LLD := lld
|
||||
else
|
||||
LLD := ld
|
||||
endif
|
||||
|
||||
# Filter out -static for liburandom_read.so and its dependent targets so that static builds
|
||||
# do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
|
||||
$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
|
||||
$(call msg,LIB,,$@)
|
||||
$(Q)$(CC) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS) -fPIC -shared -o $@
|
||||
$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS) \
|
||||
-fuse-ld=$(LLD) -Wl,-znoseparate-code -fPIC -shared -o $@
|
||||
|
||||
$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
|
||||
$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
|
||||
liburandom_read.so $(LDLIBS) \
|
||||
-fuse-ld=$(LLD) -Wl,-znoseparate-code \
|
||||
-Wl,-rpath=. -Wl,--build-id=sha1 -o $@
|
||||
|
||||
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
|
||||
@ -502,6 +511,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
|
||||
cap_helpers.c
|
||||
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
|
||||
$(OUTPUT)/liburandom_read.so \
|
||||
$(OUTPUT)/xdp_synproxy \
|
||||
ima_setup.sh \
|
||||
$(wildcard progs/btf_dump_test_case_*.c)
|
||||
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
|
||||
@ -560,6 +570,7 @@ $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
|
||||
$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h
|
||||
$(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
|
||||
$(OUTPUT)/bench: LDLIBS += -lm
|
||||
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
@ -571,13 +582,16 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
$(OUTPUT)/bench_ringbufs.o \
|
||||
$(OUTPUT)/bench_bloom_filter_map.o \
|
||||
$(OUTPUT)/bench_bpf_loop.o \
|
||||
$(OUTPUT)/bench_strncmp.o
|
||||
$(OUTPUT)/bench_strncmp.o \
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
|
||||
|
||||
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
|
||||
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
|
||||
feature bpftool \
|
||||
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko)
|
||||
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \
|
||||
no_alu32 bpf_gcc bpf_testmod.ko \
|
||||
liburandom_read.so)
|
||||
|
||||
.PHONY: docs docs-clean
|
||||
|
@ -396,6 +396,7 @@ extern const struct bench bench_hashmap_with_bloom;
|
||||
extern const struct bench bench_bpf_loop;
|
||||
extern const struct bench bench_strncmp_no_helper;
|
||||
extern const struct bench bench_strncmp_helper;
|
||||
extern const struct bench bench_bpf_hashmap_full_update;
|
||||
|
||||
static const struct bench *benchs[] = {
|
||||
&bench_count_global,
|
||||
@ -430,6 +431,7 @@ static const struct bench *benchs[] = {
|
||||
&bench_bpf_loop,
|
||||
&bench_strncmp_no_helper,
|
||||
&bench_strncmp_helper,
|
||||
&bench_bpf_hashmap_full_update,
|
||||
};
|
||||
|
||||
static void setup_benchmark()
|
||||
|
@ -0,0 +1,96 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Bytedance */
|
||||
|
||||
#include <argp.h>
|
||||
#include "bench.h"
|
||||
#include "bpf_hashmap_full_update_bench.skel.h"
|
||||
#include "bpf_util.h"
|
||||
|
||||
/* BPF triggering benchmarks */
|
||||
static struct ctx {
|
||||
struct bpf_hashmap_full_update_bench *skel;
|
||||
} ctx;
|
||||
|
||||
#define MAX_LOOP_NUM 10000
|
||||
|
||||
static void validate(void)
|
||||
{
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void *producer(void *input)
|
||||
{
|
||||
while (true) {
|
||||
/* trigger the bpf program */
|
||||
syscall(__NR_getpgid);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *consumer(void *input)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void measure(struct bench_res *res)
|
||||
{
|
||||
}
|
||||
|
||||
static void setup(void)
|
||||
{
|
||||
struct bpf_link *link;
|
||||
int map_fd, i, max_entries;
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
ctx.skel = bpf_hashmap_full_update_bench__open_and_load();
|
||||
if (!ctx.skel) {
|
||||
fprintf(stderr, "failed to open skeleton\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ctx.skel->bss->nr_loops = MAX_LOOP_NUM;
|
||||
|
||||
link = bpf_program__attach(ctx.skel->progs.benchmark);
|
||||
if (!link) {
|
||||
fprintf(stderr, "failed to attach program!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* fill hash_map */
|
||||
map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
|
||||
max_entries = bpf_map__max_entries(ctx.skel->maps.hash_map_bench);
|
||||
for (i = 0; i < max_entries; i++)
|
||||
bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
|
||||
}
|
||||
|
||||
void hashmap_report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
u64 time = ctx.skel->bss->percpu_time[i];
|
||||
|
||||
if (!time)
|
||||
continue;
|
||||
|
||||
printf("%d:hash_map_full_perf %lld events per sec\n",
|
||||
i, ctx.skel->bss->nr_loops * 1000000000ll / time);
|
||||
}
|
||||
}
|
||||
|
||||
const struct bench bench_bpf_hashmap_full_update = {
|
||||
.name = "bpf-hashmap-ful-update",
|
||||
.validate = validate,
|
||||
.setup = setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = NULL,
|
||||
.report_final = hashmap_report_final,
|
||||
};
|
11
tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
Executable file
11
tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
source ./benchs/run_common.sh
|
||||
|
||||
set -eufo pipefail
|
||||
|
||||
nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1`
|
||||
summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-ful-update)
|
||||
printf "$summary"
|
||||
printf "\n"
|
@ -26,11 +26,12 @@ static const char * const btf_kind_str_mapping[] = {
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_DECL_TAG] = "DECL_TAG",
|
||||
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
|
||||
[BTF_KIND_ENUM64] = "ENUM64",
|
||||
};
|
||||
|
||||
static const char *btf_kind_str(__u16 kind)
|
||||
{
|
||||
if (kind > BTF_KIND_TYPE_TAG)
|
||||
if (kind > BTF_KIND_ENUM64)
|
||||
return "UNKNOWN";
|
||||
return btf_kind_str_mapping[kind];
|
||||
}
|
||||
@ -139,14 +140,32 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
|
||||
}
|
||||
case BTF_KIND_ENUM: {
|
||||
const struct btf_enum *v = btf_enum(t);
|
||||
const char *fmt_str;
|
||||
|
||||
fprintf(out, " size=%u vlen=%u", t->size, vlen);
|
||||
fmt_str = btf_kflag(t) ? "\n\t'%s' val=%d" : "\n\t'%s' val=%u";
|
||||
fprintf(out, " encoding=%s size=%u vlen=%u",
|
||||
btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
fprintf(out, "\n\t'%s' val=%u",
|
||||
fprintf(out, fmt_str,
|
||||
btf_str(btf, v->name_off), v->val);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
const struct btf_enum64 *v = btf_enum64(t);
|
||||
const char *fmt_str;
|
||||
|
||||
fmt_str = btf_kflag(t) ? "\n\t'%s' val=%lld" : "\n\t'%s' val=%llu";
|
||||
|
||||
fprintf(out, " encoding=%s size=%u vlen=%u",
|
||||
btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
fprintf(out, fmt_str,
|
||||
btf_str(btf, v->name_off),
|
||||
((__u64)v->val_hi32 << 32) | v->val_lo32);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FWD:
|
||||
fprintf(out, " fwd_kind=%s", btf_kflag(t) ? "union" : "struct");
|
||||
break;
|
||||
|
@ -17,6 +17,14 @@ static void trigger_func2(void)
|
||||
asm volatile ("");
|
||||
}
|
||||
|
||||
/* attach point for byname sleepable uprobe */
|
||||
static void trigger_func3(void)
|
||||
{
|
||||
asm volatile ("");
|
||||
}
|
||||
|
||||
static char test_data[] = "test_data";
|
||||
|
||||
void test_attach_probe(void)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
|
||||
@ -49,9 +57,17 @@ void test_attach_probe(void)
|
||||
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
|
||||
return;
|
||||
|
||||
skel = test_attach_probe__open_and_load();
|
||||
skel = test_attach_probe__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
/* sleepable kprobe test case needs flags set before loading */
|
||||
if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
|
||||
BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
|
||||
goto cleanup;
|
||||
|
||||
if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
|
||||
goto cleanup;
|
||||
|
||||
@ -151,6 +167,30 @@ void test_attach_probe(void)
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
|
||||
goto cleanup;
|
||||
|
||||
/* sleepable kprobes should not attach successfully */
|
||||
skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
|
||||
if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable"))
|
||||
goto cleanup;
|
||||
|
||||
/* test sleepable uprobe and uretprobe variants */
|
||||
skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
|
||||
goto cleanup;
|
||||
|
||||
skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
|
||||
goto cleanup;
|
||||
|
||||
skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
|
||||
goto cleanup;
|
||||
|
||||
skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
|
||||
goto cleanup;
|
||||
|
||||
skel->bss->user_ptr = test_data;
|
||||
|
||||
/* trigger & validate kprobe && kretprobe */
|
||||
usleep(1);
|
||||
|
||||
@ -164,6 +204,9 @@ void test_attach_probe(void)
|
||||
/* trigger & validate uprobe attached by name */
|
||||
trigger_func2();
|
||||
|
||||
/* trigger & validate sleepable uprobe attached by name */
|
||||
trigger_func3();
|
||||
|
||||
ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
|
||||
ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
|
||||
ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
|
||||
@ -174,6 +217,10 @@ void test_attach_probe(void)
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
|
||||
|
||||
cleanup:
|
||||
test_attach_probe__destroy(skel);
|
||||
|
@ -2896,26 +2896,6 @@ static struct btf_raw_test raw_tests[] = {
|
||||
.err_str = "Invalid btf_info kind_flag",
|
||||
},
|
||||
|
||||
{
|
||||
.descr = "invalid enum kind_flag",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 1, 1), 4), /* [2] */
|
||||
BTF_ENUM_ENC(NAME_TBD, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0A"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "enum_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 4,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid btf_info kind_flag",
|
||||
},
|
||||
|
||||
{
|
||||
.descr = "valid fwd kind_flag",
|
||||
.raw_types = {
|
||||
@ -4072,6 +4052,42 @@ static struct btf_raw_test raw_tests[] = {
|
||||
.btf_load_err = true,
|
||||
.err_str = "Type tags don't precede modifiers",
|
||||
},
|
||||
{
|
||||
.descr = "enum64 test #1, unsigned, size 8",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [2] */
|
||||
BTF_ENUM64_ENC(NAME_TBD, 0, 0),
|
||||
BTF_ENUM64_ENC(NAME_TBD, 1, 1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0a\0b\0c"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 8,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 2,
|
||||
.max_entries = 1,
|
||||
},
|
||||
{
|
||||
.descr = "enum64 test #2, signed, size 4",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 1, 2), 4), /* [2] */
|
||||
BTF_ENUM64_ENC(NAME_TBD, -1, 0),
|
||||
BTF_ENUM64_ENC(NAME_TBD, 1, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0a\0b\0c"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 2,
|
||||
.max_entries = 1,
|
||||
},
|
||||
|
||||
}; /* struct btf_raw_test raw_tests[] */
|
||||
|
||||
@ -7000,9 +7016,12 @@ static struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
|
||||
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
|
||||
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
|
||||
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
|
||||
BTF_ENUM64_ENC(NAME_TBD, 0, 0),
|
||||
BTF_ENUM64_ENC(NAME_TBD, 1, 1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
@ -7030,9 +7049,12 @@ static struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
|
||||
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
|
||||
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
|
||||
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
|
||||
BTF_ENUM64_ENC(NAME_TBD, 0, 0),
|
||||
BTF_ENUM64_ENC(NAME_TBD, 1, 1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -7493,6 +7515,91 @@ static struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_STR_SEC("\0tag1\0t\0m"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: enum64, standalone",
|
||||
.input = {
|
||||
.raw_types = {
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0e1\0e1_val"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0e1\0e1_val"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: enum64, fwd resolution",
|
||||
.input = {
|
||||
.raw_types = {
|
||||
/* [1] fwd enum64 'e1' before full enum */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
|
||||
/* [2] full enum64 'e1' after fwd */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
|
||||
/* [3] full enum64 'e2' before fwd */
|
||||
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
|
||||
/* [4] fwd enum64 'e2' after full enum */
|
||||
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
|
||||
/* [5] incompatible full enum64 with different value */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
/* [1] full enum64 'e1' */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
|
||||
/* [2] full enum64 'e2' */
|
||||
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
|
||||
/* [3] incompatible full enum64 with different value */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: enum and enum64, no dedup",
|
||||
.input = {
|
||||
.raw_types = {
|
||||
/* [1] enum 'e1' */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
|
||||
BTF_ENUM_ENC(NAME_NTH(2), 1),
|
||||
/* [2] enum64 'e1' */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0e1\0e1_val"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
/* [1] enum 'e1' */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
|
||||
BTF_ENUM_ENC(NAME_NTH(2), 1),
|
||||
/* [2] enum64 'e1' */
|
||||
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
|
||||
BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0e1\0e1_val"),
|
||||
},
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
@ -7517,6 +7624,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(__u32);
|
||||
case BTF_KIND_ENUM:
|
||||
return base_size + vlen * sizeof(struct btf_enum);
|
||||
case BTF_KIND_ENUM64:
|
||||
return base_size + vlen * sizeof(struct btf_enum64);
|
||||
case BTF_KIND_ARRAY:
|
||||
return base_size + sizeof(struct btf_array);
|
||||
case BTF_KIND_STRUCT:
|
||||
|
@ -9,6 +9,7 @@ static void gen_btf(struct btf *btf)
|
||||
const struct btf_var_secinfo *vi;
|
||||
const struct btf_type *t;
|
||||
const struct btf_member *m;
|
||||
const struct btf_enum64 *v64;
|
||||
const struct btf_enum *v;
|
||||
const struct btf_param *p;
|
||||
int id, err, str_off;
|
||||
@ -171,7 +172,7 @@ static void gen_btf(struct btf *btf)
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name");
|
||||
ASSERT_EQ(v->val, 2, "v2_val");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 9),
|
||||
"[9] ENUM 'e1' size=4 vlen=2\n"
|
||||
"[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
|
||||
"\t'v1' val=1\n"
|
||||
"\t'v2' val=2", "raw_dump");
|
||||
|
||||
@ -202,7 +203,7 @@ static void gen_btf(struct btf *btf)
|
||||
ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind");
|
||||
ASSERT_EQ(t->size, 4, "enum_fwd_sz");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 12),
|
||||
"[12] ENUM 'enum_fwd' size=4 vlen=0", "raw_dump");
|
||||
"[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", "raw_dump");
|
||||
|
||||
/* TYPEDEF */
|
||||
id = btf__add_typedef(btf, "typedef1", 1);
|
||||
@ -307,6 +308,48 @@ static void gen_btf(struct btf *btf)
|
||||
ASSERT_EQ(t->type, 1, "tag_type");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 20),
|
||||
"[20] TYPE_TAG 'tag1' type_id=1", "raw_dump");
|
||||
|
||||
/* ENUM64 */
|
||||
id = btf__add_enum64(btf, "e1", 8, true);
|
||||
ASSERT_EQ(id, 21, "enum64_id");
|
||||
err = btf__add_enum64_value(btf, "v1", -1);
|
||||
ASSERT_OK(err, "v1_res");
|
||||
err = btf__add_enum64_value(btf, "v2", 0x123456789); /* 4886718345 */
|
||||
ASSERT_OK(err, "v2_res");
|
||||
t = btf__type_by_id(btf, 21);
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
|
||||
ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
|
||||
ASSERT_EQ(btf_vlen(t), 2, "enum64_vlen");
|
||||
ASSERT_EQ(t->size, 8, "enum64_sz");
|
||||
v64 = btf_enum64(t) + 0;
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
|
||||
ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
|
||||
ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
|
||||
v64 = btf_enum64(t) + 1;
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v2", "v2_name");
|
||||
ASSERT_EQ(v64->val_hi32, 0x1, "v2_val");
|
||||
ASSERT_EQ(v64->val_lo32, 0x23456789, "v2_val");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 21),
|
||||
"[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
|
||||
"\t'v1' val=-1\n"
|
||||
"\t'v2' val=4886718345", "raw_dump");
|
||||
|
||||
id = btf__add_enum64(btf, "e1", 8, false);
|
||||
ASSERT_EQ(id, 22, "enum64_id");
|
||||
err = btf__add_enum64_value(btf, "v1", 0xffffffffFFFFFFFF); /* 18446744073709551615 */
|
||||
ASSERT_OK(err, "v1_res");
|
||||
t = btf__type_by_id(btf, 22);
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
|
||||
ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
|
||||
ASSERT_EQ(btf_vlen(t), 1, "enum64_vlen");
|
||||
ASSERT_EQ(t->size, 8, "enum64_sz");
|
||||
v64 = btf_enum64(t) + 0;
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
|
||||
ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
|
||||
ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 22),
|
||||
"[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
|
||||
"\t'v1' val=18446744073709551615", "raw_dump");
|
||||
}
|
||||
|
||||
static void test_btf_add()
|
||||
@ -332,12 +375,12 @@ static void test_btf_add()
|
||||
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
|
||||
"[8] UNION 'u1' size=8 vlen=1\n"
|
||||
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
|
||||
"[9] ENUM 'e1' size=4 vlen=2\n"
|
||||
"[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
|
||||
"\t'v1' val=1\n"
|
||||
"\t'v2' val=2",
|
||||
"[10] FWD 'struct_fwd' fwd_kind=struct",
|
||||
"[11] FWD 'union_fwd' fwd_kind=union",
|
||||
"[12] ENUM 'enum_fwd' size=4 vlen=0",
|
||||
"[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
|
||||
"[13] TYPEDEF 'typedef1' type_id=1",
|
||||
"[14] FUNC 'func1' type_id=15 linkage=global",
|
||||
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
|
||||
@ -348,7 +391,12 @@ static void test_btf_add()
|
||||
"\ttype_id=1 offset=4 size=8",
|
||||
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
|
||||
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
|
||||
"[20] TYPE_TAG 'tag1' type_id=1");
|
||||
"[20] TYPE_TAG 'tag1' type_id=1",
|
||||
"[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
|
||||
"\t'v1' val=-1\n"
|
||||
"\t'v2' val=4886718345",
|
||||
"[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
|
||||
"\t'v1' val=18446744073709551615");
|
||||
|
||||
btf__free(btf);
|
||||
}
|
||||
@ -370,7 +418,7 @@ static void test_btf_add_btf()
|
||||
gen_btf(btf2);
|
||||
|
||||
id = btf__add_btf(btf1, btf2);
|
||||
if (!ASSERT_EQ(id, 21, "id"))
|
||||
if (!ASSERT_EQ(id, 23, "id"))
|
||||
goto cleanup;
|
||||
|
||||
VALIDATE_RAW_BTF(
|
||||
@ -386,12 +434,12 @@ static void test_btf_add_btf()
|
||||
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
|
||||
"[8] UNION 'u1' size=8 vlen=1\n"
|
||||
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
|
||||
"[9] ENUM 'e1' size=4 vlen=2\n"
|
||||
"[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
|
||||
"\t'v1' val=1\n"
|
||||
"\t'v2' val=2",
|
||||
"[10] FWD 'struct_fwd' fwd_kind=struct",
|
||||
"[11] FWD 'union_fwd' fwd_kind=union",
|
||||
"[12] ENUM 'enum_fwd' size=4 vlen=0",
|
||||
"[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
|
||||
"[13] TYPEDEF 'typedef1' type_id=1",
|
||||
"[14] FUNC 'func1' type_id=15 linkage=global",
|
||||
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
|
||||
@ -403,36 +451,46 @@ static void test_btf_add_btf()
|
||||
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
|
||||
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
|
||||
"[20] TYPE_TAG 'tag1' type_id=1",
|
||||
"[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
|
||||
"\t'v1' val=-1\n"
|
||||
"\t'v2' val=4886718345",
|
||||
"[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
|
||||
"\t'v1' val=18446744073709551615",
|
||||
|
||||
/* types appended from the second BTF */
|
||||
"[21] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
|
||||
"[22] PTR '(anon)' type_id=21",
|
||||
"[23] CONST '(anon)' type_id=25",
|
||||
"[24] VOLATILE '(anon)' type_id=23",
|
||||
"[25] RESTRICT '(anon)' type_id=24",
|
||||
"[26] ARRAY '(anon)' type_id=22 index_type_id=21 nr_elems=10",
|
||||
"[27] STRUCT 's1' size=8 vlen=2\n"
|
||||
"\t'f1' type_id=21 bits_offset=0\n"
|
||||
"\t'f2' type_id=21 bits_offset=32 bitfield_size=16",
|
||||
"[28] UNION 'u1' size=8 vlen=1\n"
|
||||
"\t'f1' type_id=21 bits_offset=0 bitfield_size=16",
|
||||
"[29] ENUM 'e1' size=4 vlen=2\n"
|
||||
"[23] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
|
||||
"[24] PTR '(anon)' type_id=23",
|
||||
"[25] CONST '(anon)' type_id=27",
|
||||
"[26] VOLATILE '(anon)' type_id=25",
|
||||
"[27] RESTRICT '(anon)' type_id=26",
|
||||
"[28] ARRAY '(anon)' type_id=24 index_type_id=23 nr_elems=10",
|
||||
"[29] STRUCT 's1' size=8 vlen=2\n"
|
||||
"\t'f1' type_id=23 bits_offset=0\n"
|
||||
"\t'f2' type_id=23 bits_offset=32 bitfield_size=16",
|
||||
"[30] UNION 'u1' size=8 vlen=1\n"
|
||||
"\t'f1' type_id=23 bits_offset=0 bitfield_size=16",
|
||||
"[31] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
|
||||
"\t'v1' val=1\n"
|
||||
"\t'v2' val=2",
|
||||
"[30] FWD 'struct_fwd' fwd_kind=struct",
|
||||
"[31] FWD 'union_fwd' fwd_kind=union",
|
||||
"[32] ENUM 'enum_fwd' size=4 vlen=0",
|
||||
"[33] TYPEDEF 'typedef1' type_id=21",
|
||||
"[34] FUNC 'func1' type_id=35 linkage=global",
|
||||
"[35] FUNC_PROTO '(anon)' ret_type_id=21 vlen=2\n"
|
||||
"\t'p1' type_id=21\n"
|
||||
"\t'p2' type_id=22",
|
||||
"[36] VAR 'var1' type_id=21, linkage=global-alloc",
|
||||
"[37] DATASEC 'datasec1' size=12 vlen=1\n"
|
||||
"\ttype_id=21 offset=4 size=8",
|
||||
"[38] DECL_TAG 'tag1' type_id=36 component_idx=-1",
|
||||
"[39] DECL_TAG 'tag2' type_id=34 component_idx=1",
|
||||
"[40] TYPE_TAG 'tag1' type_id=21");
|
||||
"[32] FWD 'struct_fwd' fwd_kind=struct",
|
||||
"[33] FWD 'union_fwd' fwd_kind=union",
|
||||
"[34] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
|
||||
"[35] TYPEDEF 'typedef1' type_id=23",
|
||||
"[36] FUNC 'func1' type_id=37 linkage=global",
|
||||
"[37] FUNC_PROTO '(anon)' ret_type_id=23 vlen=2\n"
|
||||
"\t'p1' type_id=23\n"
|
||||
"\t'p2' type_id=24",
|
||||
"[38] VAR 'var1' type_id=23, linkage=global-alloc",
|
||||
"[39] DATASEC 'datasec1' size=12 vlen=1\n"
|
||||
"\ttype_id=23 offset=4 size=8",
|
||||
"[40] DECL_TAG 'tag1' type_id=38 component_idx=-1",
|
||||
"[41] DECL_TAG 'tag2' type_id=36 component_idx=1",
|
||||
"[42] TYPE_TAG 'tag1' type_id=23",
|
||||
"[43] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
|
||||
"\t'v1' val=-1\n"
|
||||
"\t'v2' val=4886718345",
|
||||
"[44] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
|
||||
"\t'v1' val=18446744073709551615");
|
||||
|
||||
cleanup:
|
||||
btf__free(btf1);
|
||||
|
@ -84,6 +84,7 @@ static int duration = 0;
|
||||
#define NESTING_ERR_CASE(name) { \
|
||||
NESTING_CASE_COMMON(name), \
|
||||
.fails = true, \
|
||||
.run_btfgen_fails = true, \
|
||||
}
|
||||
|
||||
#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
|
||||
@ -258,12 +259,14 @@ static int duration = 0;
|
||||
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
|
||||
"probed:", name), \
|
||||
.fails = true, \
|
||||
.run_btfgen_fails = true, \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_bitfields", \
|
||||
}, { \
|
||||
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
|
||||
"direct:", name), \
|
||||
.fails = true, \
|
||||
.run_btfgen_fails = true, \
|
||||
.prog_name = "test_core_bitfields_direct", \
|
||||
}
|
||||
|
||||
@ -304,6 +307,7 @@ static int duration = 0;
|
||||
#define SIZE_ERR_CASE(name) { \
|
||||
SIZE_CASE_COMMON(name), \
|
||||
.fails = true, \
|
||||
.run_btfgen_fails = true, \
|
||||
}
|
||||
|
||||
#define TYPE_BASED_CASE_COMMON(name) \
|
||||
@ -363,6 +367,25 @@ static int duration = 0;
|
||||
.fails = true, \
|
||||
}
|
||||
|
||||
#define ENUM64VAL_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_enum64val.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_enum64val"
|
||||
|
||||
#define ENUM64VAL_CASE(name, ...) { \
|
||||
ENUM64VAL_CASE_COMMON(name), \
|
||||
.output = STRUCT_TO_CHAR_PTR(core_reloc_enum64val_output) \
|
||||
__VA_ARGS__, \
|
||||
.output_len = sizeof(struct core_reloc_enum64val_output), \
|
||||
}
|
||||
|
||||
#define ENUM64VAL_ERR_CASE(name) { \
|
||||
ENUM64VAL_CASE_COMMON(name), \
|
||||
.fails = true, \
|
||||
}
|
||||
|
||||
struct core_reloc_test_case;
|
||||
|
||||
typedef int (*setup_test_fn)(struct core_reloc_test_case *test);
|
||||
@ -377,6 +400,7 @@ struct core_reloc_test_case {
|
||||
const char *output;
|
||||
int output_len;
|
||||
bool fails;
|
||||
bool run_btfgen_fails;
|
||||
bool needs_testmod;
|
||||
bool relaxed_core_relocs;
|
||||
const char *prog_name;
|
||||
@ -831,6 +855,45 @@ static const struct core_reloc_test_case test_cases[] = {
|
||||
.anon_val2 = 0x222,
|
||||
}),
|
||||
ENUMVAL_ERR_CASE(enumval___err_missing),
|
||||
|
||||
/* 64bit enumerator value existence and value relocations */
|
||||
ENUM64VAL_CASE(enum64val, {
|
||||
.unsigned_val1_exists = true,
|
||||
.unsigned_val2_exists = true,
|
||||
.unsigned_val3_exists = true,
|
||||
.signed_val1_exists = true,
|
||||
.signed_val2_exists = true,
|
||||
.signed_val3_exists = true,
|
||||
.unsigned_val1 = 0x1ffffffffULL,
|
||||
.unsigned_val2 = 0x2,
|
||||
.signed_val1 = 0x1ffffffffLL,
|
||||
.signed_val2 = -2,
|
||||
}),
|
||||
ENUM64VAL_CASE(enum64val___diff, {
|
||||
.unsigned_val1_exists = true,
|
||||
.unsigned_val2_exists = true,
|
||||
.unsigned_val3_exists = true,
|
||||
.signed_val1_exists = true,
|
||||
.signed_val2_exists = true,
|
||||
.signed_val3_exists = true,
|
||||
.unsigned_val1 = 0x101ffffffffULL,
|
||||
.unsigned_val2 = 0x202ffffffffULL,
|
||||
.signed_val1 = -101,
|
||||
.signed_val2 = -202,
|
||||
}),
|
||||
ENUM64VAL_CASE(enum64val___val3_missing, {
|
||||
.unsigned_val1_exists = true,
|
||||
.unsigned_val2_exists = true,
|
||||
.unsigned_val3_exists = false,
|
||||
.signed_val1_exists = true,
|
||||
.signed_val2_exists = true,
|
||||
.signed_val3_exists = false,
|
||||
.unsigned_val1 = 0x111ffffffffULL,
|
||||
.unsigned_val2 = 0x222,
|
||||
.signed_val1 = 0x111ffffffffLL,
|
||||
.signed_val2 = -222,
|
||||
}),
|
||||
ENUM64VAL_ERR_CASE(enum64val___err_missing),
|
||||
};
|
||||
|
||||
struct data {
|
||||
@ -894,7 +957,7 @@ static void run_core_reloc_tests(bool use_btfgen)
|
||||
/* generate a "minimal" BTF file and use it as source */
|
||||
if (use_btfgen) {
|
||||
|
||||
if (!test_case->btf_src_file || test_case->fails) {
|
||||
if (!test_case->btf_src_file || test_case->run_btfgen_fails) {
|
||||
test__skip();
|
||||
continue;
|
||||
}
|
||||
|
@ -7,11 +7,9 @@
|
||||
|
||||
void serial_test_fexit_stress(void)
|
||||
{
|
||||
char test_skb[128] = {};
|
||||
int fexit_fd[CNT] = {};
|
||||
int link_fd[CNT] = {};
|
||||
char error[4096];
|
||||
int err, i, filter_fd;
|
||||
int err, i;
|
||||
|
||||
const struct bpf_insn trace_program[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
@ -20,25 +18,9 @@ void serial_test_fexit_stress(void)
|
||||
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
|
||||
.expected_attach_type = BPF_TRACE_FEXIT,
|
||||
.log_buf = error,
|
||||
.log_size = sizeof(error),
|
||||
);
|
||||
|
||||
const struct bpf_insn skb_program[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
|
||||
.log_buf = error,
|
||||
.log_size = sizeof(error),
|
||||
);
|
||||
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = test_skb,
|
||||
.data_size_in = sizeof(test_skb),
|
||||
.repeat = 1,
|
||||
);
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
|
||||
trace_opts.expected_attach_type);
|
||||
@ -58,15 +40,9 @@ void serial_test_fexit_stress(void)
|
||||
goto out;
|
||||
}
|
||||
|
||||
filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
|
||||
skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
|
||||
&skb_opts);
|
||||
if (!ASSERT_GE(filter_fd, 0, "test_program_loaded"))
|
||||
goto out;
|
||||
err = bpf_prog_test_run_opts(fexit_fd[0], &topts);
|
||||
ASSERT_OK(err, "bpf_prog_test_run_opts");
|
||||
|
||||
err = bpf_prog_test_run_opts(filter_fd, &topts);
|
||||
close(filter_fd);
|
||||
CHECK_FAIL(err);
|
||||
out:
|
||||
for (i = 0; i < CNT; i++) {
|
||||
if (link_fd[i])
|
||||
|
207
tools/testing/selftests/bpf/prog_tests/libbpf_str.c
Normal file
207
tools/testing/selftests/bpf/prog_tests/libbpf_str.c
Normal file
@ -0,0 +1,207 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <ctype.h>
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
/*
|
||||
* Utility function uppercasing an entire string.
|
||||
*/
|
||||
static void uppercase(char *s)
|
||||
{
|
||||
for (; *s != '\0'; s++)
|
||||
*s = toupper(*s);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test case to check that all bpf_attach_type variants are covered by
|
||||
* libbpf_bpf_attach_type_str.
|
||||
*/
|
||||
static void test_libbpf_bpf_attach_type_str(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
int i, n, id;
|
||||
|
||||
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
|
||||
if (!ASSERT_OK_PTR(btf, "btf_parse"))
|
||||
return;
|
||||
|
||||
/* find enum bpf_attach_type and enumerate each value */
|
||||
id = btf__find_by_name_kind(btf, "bpf_attach_type", BTF_KIND_ENUM);
|
||||
if (!ASSERT_GT(id, 0, "bpf_attach_type_id"))
|
||||
goto cleanup;
|
||||
t = btf__type_by_id(btf, id);
|
||||
e = btf_enum(t);
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; e++, i++) {
|
||||
enum bpf_attach_type attach_type = (enum bpf_attach_type)e->val;
|
||||
const char *attach_type_name;
|
||||
const char *attach_type_str;
|
||||
char buf[256];
|
||||
|
||||
if (attach_type == __MAX_BPF_ATTACH_TYPE)
|
||||
continue;
|
||||
|
||||
attach_type_name = btf__str_by_offset(btf, e->name_off);
|
||||
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
|
||||
ASSERT_OK_PTR(attach_type_str, attach_type_name);
|
||||
|
||||
snprintf(buf, sizeof(buf), "BPF_%s", attach_type_str);
|
||||
uppercase(buf);
|
||||
|
||||
ASSERT_STREQ(buf, attach_type_name, "exp_str_value");
|
||||
}
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test case to check that all bpf_link_type variants are covered by
|
||||
* libbpf_bpf_link_type_str.
|
||||
*/
|
||||
static void test_libbpf_bpf_link_type_str(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
int i, n, id;
|
||||
|
||||
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
|
||||
if (!ASSERT_OK_PTR(btf, "btf_parse"))
|
||||
return;
|
||||
|
||||
/* find enum bpf_link_type and enumerate each value */
|
||||
id = btf__find_by_name_kind(btf, "bpf_link_type", BTF_KIND_ENUM);
|
||||
if (!ASSERT_GT(id, 0, "bpf_link_type_id"))
|
||||
goto cleanup;
|
||||
t = btf__type_by_id(btf, id);
|
||||
e = btf_enum(t);
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; e++, i++) {
|
||||
enum bpf_link_type link_type = (enum bpf_link_type)e->val;
|
||||
const char *link_type_name;
|
||||
const char *link_type_str;
|
||||
char buf[256];
|
||||
|
||||
if (link_type == MAX_BPF_LINK_TYPE)
|
||||
continue;
|
||||
|
||||
link_type_name = btf__str_by_offset(btf, e->name_off);
|
||||
link_type_str = libbpf_bpf_link_type_str(link_type);
|
||||
ASSERT_OK_PTR(link_type_str, link_type_name);
|
||||
|
||||
snprintf(buf, sizeof(buf), "BPF_LINK_TYPE_%s", link_type_str);
|
||||
uppercase(buf);
|
||||
|
||||
ASSERT_STREQ(buf, link_type_name, "exp_str_value");
|
||||
}
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test case to check that all bpf_map_type variants are covered by
|
||||
* libbpf_bpf_map_type_str.
|
||||
*/
|
||||
static void test_libbpf_bpf_map_type_str(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
int i, n, id;
|
||||
|
||||
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
|
||||
if (!ASSERT_OK_PTR(btf, "btf_parse"))
|
||||
return;
|
||||
|
||||
/* find enum bpf_map_type and enumerate each value */
|
||||
id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
|
||||
if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
|
||||
goto cleanup;
|
||||
t = btf__type_by_id(btf, id);
|
||||
e = btf_enum(t);
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; e++, i++) {
|
||||
enum bpf_map_type map_type = (enum bpf_map_type)e->val;
|
||||
const char *map_type_name;
|
||||
const char *map_type_str;
|
||||
char buf[256];
|
||||
|
||||
map_type_name = btf__str_by_offset(btf, e->name_off);
|
||||
map_type_str = libbpf_bpf_map_type_str(map_type);
|
||||
ASSERT_OK_PTR(map_type_str, map_type_name);
|
||||
|
||||
snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str);
|
||||
uppercase(buf);
|
||||
|
||||
ASSERT_STREQ(buf, map_type_name, "exp_str_value");
|
||||
}
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test case to check that all bpf_prog_type variants are covered by
|
||||
* libbpf_bpf_prog_type_str.
|
||||
*/
|
||||
static void test_libbpf_bpf_prog_type_str(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
int i, n, id;
|
||||
|
||||
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
|
||||
if (!ASSERT_OK_PTR(btf, "btf_parse"))
|
||||
return;
|
||||
|
||||
/* find enum bpf_prog_type and enumerate each value */
|
||||
id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
|
||||
if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
|
||||
goto cleanup;
|
||||
t = btf__type_by_id(btf, id);
|
||||
e = btf_enum(t);
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; e++, i++) {
|
||||
enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
|
||||
const char *prog_type_name;
|
||||
const char *prog_type_str;
|
||||
char buf[256];
|
||||
|
||||
prog_type_name = btf__str_by_offset(btf, e->name_off);
|
||||
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
|
||||
ASSERT_OK_PTR(prog_type_str, prog_type_name);
|
||||
|
||||
snprintf(buf, sizeof(buf), "BPF_PROG_TYPE_%s", prog_type_str);
|
||||
uppercase(buf);
|
||||
|
||||
ASSERT_STREQ(buf, prog_type_name, "exp_str_value");
|
||||
}
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Run all libbpf str conversion tests.
|
||||
*/
|
||||
void test_libbpf_str(void)
|
||||
{
|
||||
if (test__start_subtest("bpf_attach_type_str"))
|
||||
test_libbpf_bpf_attach_type_str();
|
||||
|
||||
if (test__start_subtest("bpf_link_type_str"))
|
||||
test_libbpf_bpf_link_type_str();
|
||||
|
||||
if (test__start_subtest("bpf_map_type_str"))
|
||||
test_libbpf_bpf_map_type_str();
|
||||
|
||||
if (test__start_subtest("bpf_prog_type_str"))
|
||||
test_libbpf_bpf_prog_type_str();
|
||||
}
|
@ -646,7 +646,7 @@ static void test_tcp_clear_dtime(struct test_tc_dtime *skel)
|
||||
__u32 *errs = skel->bss->errs[t];
|
||||
|
||||
skel->bss->test = t;
|
||||
test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 0);
|
||||
test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 50000 + t);
|
||||
|
||||
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
|
||||
dtime_cnt_str(t, INGRESS_FWDNS_P100));
|
||||
@ -683,7 +683,7 @@ static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
|
||||
errs = skel->bss->errs[t];
|
||||
|
||||
skel->bss->test = t;
|
||||
test_inet_dtime(family, SOCK_STREAM, addr, 0);
|
||||
test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t);
|
||||
|
||||
/* fwdns_prio100 prog does not read delivery_time_type, so
|
||||
* kernel puts the (rcv) timetamp in __sk_buff->tstamp
|
||||
@ -715,13 +715,13 @@ static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
|
||||
errs = skel->bss->errs[t];
|
||||
|
||||
skel->bss->test = t;
|
||||
test_inet_dtime(family, SOCK_DGRAM, addr, 0);
|
||||
test_inet_dtime(family, SOCK_DGRAM, addr, 50000 + t);
|
||||
|
||||
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
|
||||
dtime_cnt_str(t, INGRESS_FWDNS_P100));
|
||||
/* non mono delivery time is not forwarded */
|
||||
ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
|
||||
dtime_cnt_str(t, INGRESS_FWDNS_P100));
|
||||
dtime_cnt_str(t, INGRESS_FWDNS_P101));
|
||||
for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++)
|
||||
ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
|
||||
|
||||
|
183
tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
Normal file
183
tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
Normal file
@ -0,0 +1,183 @@
|
||||
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
|
||||
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#define CMD_OUT_BUF_SIZE 1023
|
||||
|
||||
#define SYS(cmd) ({ \
|
||||
if (!ASSERT_OK(system(cmd), (cmd))) \
|
||||
goto out; \
|
||||
})
|
||||
|
||||
#define SYS_OUT(cmd, ...) ({ \
|
||||
char buf[1024]; \
|
||||
snprintf(buf, sizeof(buf), (cmd), ##__VA_ARGS__); \
|
||||
FILE *f = popen(buf, "r"); \
|
||||
if (!ASSERT_OK_PTR(f, buf)) \
|
||||
goto out; \
|
||||
f; \
|
||||
})
|
||||
|
||||
/* out must be at least `size * 4 + 1` bytes long */
|
||||
static void escape_str(char *out, const char *in, size_t size)
|
||||
{
|
||||
static const char *hex = "0123456789ABCDEF";
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (isprint(in[i]) && in[i] != '\\' && in[i] != '\'') {
|
||||
*out++ = in[i];
|
||||
} else {
|
||||
*out++ = '\\';
|
||||
*out++ = 'x';
|
||||
*out++ = hex[(in[i] >> 4) & 0xf];
|
||||
*out++ = hex[in[i] & 0xf];
|
||||
}
|
||||
}
|
||||
*out++ = '\0';
|
||||
}
|
||||
|
||||
static bool expect_str(char *buf, size_t size, const char *str, const char *name)
|
||||
{
|
||||
static char escbuf_expected[CMD_OUT_BUF_SIZE * 4];
|
||||
static char escbuf_actual[CMD_OUT_BUF_SIZE * 4];
|
||||
static int duration = 0;
|
||||
bool ok;
|
||||
|
||||
ok = size == strlen(str) && !memcmp(buf, str, size);
|
||||
|
||||
if (!ok) {
|
||||
escape_str(escbuf_expected, str, strlen(str));
|
||||
escape_str(escbuf_actual, buf, size);
|
||||
}
|
||||
CHECK(!ok, name, "unexpected %s: actual '%s' != expected '%s'\n",
|
||||
name, escbuf_actual, escbuf_expected);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
static void test_synproxy(bool xdp)
|
||||
{
|
||||
int server_fd = -1, client_fd = -1, accept_fd = -1;
|
||||
char *prog_id, *prog_id_end;
|
||||
struct nstoken *ns = NULL;
|
||||
FILE *ctrl_file = NULL;
|
||||
char buf[CMD_OUT_BUF_SIZE];
|
||||
size_t size;
|
||||
|
||||
SYS("ip netns add synproxy");
|
||||
|
||||
SYS("ip link add tmp0 type veth peer name tmp1");
|
||||
SYS("ip link set tmp1 netns synproxy");
|
||||
SYS("ip link set tmp0 up");
|
||||
SYS("ip addr replace 198.18.0.1/24 dev tmp0");
|
||||
|
||||
/* When checksum offload is enabled, the XDP program sees wrong
|
||||
* checksums and drops packets.
|
||||
*/
|
||||
SYS("ethtool -K tmp0 tx off");
|
||||
if (xdp)
|
||||
/* Workaround required for veth. */
|
||||
SYS("ip link set tmp0 xdp object xdp_dummy.o section xdp 2> /dev/null");
|
||||
|
||||
ns = open_netns("synproxy");
|
||||
if (!ASSERT_OK_PTR(ns, "setns"))
|
||||
goto out;
|
||||
|
||||
SYS("ip link set lo up");
|
||||
SYS("ip link set tmp1 up");
|
||||
SYS("ip addr replace 198.18.0.2/24 dev tmp1");
|
||||
SYS("sysctl -w net.ipv4.tcp_syncookies=2");
|
||||
SYS("sysctl -w net.ipv4.tcp_timestamps=1");
|
||||
SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0");
|
||||
SYS("iptables -t raw -I PREROUTING \
|
||||
-i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack");
|
||||
SYS("iptables -t filter -A INPUT \
|
||||
-i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \
|
||||
-j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460");
|
||||
SYS("iptables -t filter -A INPUT \
|
||||
-i tmp1 -m state --state INVALID -j DROP");
|
||||
|
||||
ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \
|
||||
--single --mss4 1460 --mss6 1440 \
|
||||
--wscale 7 --ttl 64%s", xdp ? "" : " --tc");
|
||||
size = fread(buf, 1, sizeof(buf), ctrl_file);
|
||||
pclose(ctrl_file);
|
||||
if (!expect_str(buf, size, "Total SYNACKs generated: 0\n",
|
||||
"initial SYNACKs"))
|
||||
goto out;
|
||||
|
||||
if (!xdp) {
|
||||
ctrl_file = SYS_OUT("tc filter show dev tmp1 ingress");
|
||||
size = fread(buf, 1, sizeof(buf), ctrl_file);
|
||||
pclose(ctrl_file);
|
||||
prog_id = memmem(buf, size, " id ", 4);
|
||||
if (!ASSERT_OK_PTR(prog_id, "find prog id"))
|
||||
goto out;
|
||||
prog_id += 4;
|
||||
if (!ASSERT_LT(prog_id, buf + size, "find prog id begin"))
|
||||
goto out;
|
||||
prog_id_end = prog_id;
|
||||
while (prog_id_end < buf + size && *prog_id_end >= '0' &&
|
||||
*prog_id_end <= '9')
|
||||
prog_id_end++;
|
||||
if (!ASSERT_LT(prog_id_end, buf + size, "find prog id end"))
|
||||
goto out;
|
||||
*prog_id_end = '\0';
|
||||
}
|
||||
|
||||
server_fd = start_server(AF_INET, SOCK_STREAM, "198.18.0.2", 8080, 0);
|
||||
if (!ASSERT_GE(server_fd, 0, "start_server"))
|
||||
goto out;
|
||||
|
||||
close_netns(ns);
|
||||
ns = NULL;
|
||||
|
||||
client_fd = connect_to_fd(server_fd, 10000);
|
||||
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
|
||||
goto out;
|
||||
|
||||
accept_fd = accept(server_fd, NULL, NULL);
|
||||
if (!ASSERT_GE(accept_fd, 0, "accept"))
|
||||
goto out;
|
||||
|
||||
ns = open_netns("synproxy");
|
||||
if (!ASSERT_OK_PTR(ns, "setns"))
|
||||
goto out;
|
||||
|
||||
if (xdp)
|
||||
ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --single");
|
||||
else
|
||||
ctrl_file = SYS_OUT("./xdp_synproxy --prog %s --single",
|
||||
prog_id);
|
||||
size = fread(buf, 1, sizeof(buf), ctrl_file);
|
||||
pclose(ctrl_file);
|
||||
if (!expect_str(buf, size, "Total SYNACKs generated: 1\n",
|
||||
"SYNACKs after connection"))
|
||||
goto out;
|
||||
|
||||
out:
|
||||
if (accept_fd >= 0)
|
||||
close(accept_fd);
|
||||
if (client_fd >= 0)
|
||||
close(client_fd);
|
||||
if (server_fd >= 0)
|
||||
close(server_fd);
|
||||
if (ns)
|
||||
close_netns(ns);
|
||||
|
||||
system("ip link del tmp0");
|
||||
system("ip netns del synproxy");
|
||||
}
|
||||
|
||||
void test_xdp_synproxy(void)
|
||||
{
|
||||
if (test__start_subtest("xdp"))
|
||||
test_synproxy(true);
|
||||
if (test__start_subtest("tc"))
|
||||
test_synproxy(false);
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Bytedance */
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
#define MAX_ENTRIES 1000
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, u32);
|
||||
__type(value, u64);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
} hash_map_bench SEC(".maps");
|
||||
|
||||
u64 __attribute__((__aligned__(256))) percpu_time[256];
|
||||
u64 nr_loops;
|
||||
|
||||
static int loop_update_callback(__u32 index, u32 *key)
|
||||
{
|
||||
u64 init_val = 1;
|
||||
|
||||
bpf_map_update_elem(&hash_map_bench, key, &init_val, BPF_ANY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/" SYS_PREFIX "sys_getpgid")
|
||||
int benchmark(void *ctx)
|
||||
{
|
||||
u32 cpu = bpf_get_smp_processor_id();
|
||||
u32 key = cpu + MAX_ENTRIES;
|
||||
u64 start_time = bpf_ktime_get_ns();
|
||||
|
||||
bpf_loop(nr_loops, loop_update_callback, &key, 0);
|
||||
percpu_time[cpu & 255] = bpf_ktime_get_ns() - start_time;
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
#include "core_reloc_types.h"
|
||||
|
||||
void f(struct core_reloc_enum64val x) {}
|
@ -0,0 +1,3 @@
|
||||
#include "core_reloc_types.h"
|
||||
|
||||
void f(struct core_reloc_enum64val___diff x) {}
|
@ -0,0 +1,3 @@
|
||||
#include "core_reloc_types.h"
|
||||
|
||||
void f(struct core_reloc_enum64val___err_missing x) {}
|
@ -0,0 +1,3 @@
|
||||
#include "core_reloc_types.h"
|
||||
|
||||
void f(struct core_reloc_enum64val___val3_missing x) {}
|
@ -1117,6 +1117,20 @@ struct core_reloc_enumval_output {
|
||||
int anon_val2;
|
||||
};
|
||||
|
||||
struct core_reloc_enum64val_output {
|
||||
bool unsigned_val1_exists;
|
||||
bool unsigned_val2_exists;
|
||||
bool unsigned_val3_exists;
|
||||
bool signed_val1_exists;
|
||||
bool signed_val2_exists;
|
||||
bool signed_val3_exists;
|
||||
|
||||
long unsigned_val1;
|
||||
long unsigned_val2;
|
||||
long signed_val1;
|
||||
long signed_val2;
|
||||
};
|
||||
|
||||
enum named_enum {
|
||||
NAMED_ENUM_VAL1 = 1,
|
||||
NAMED_ENUM_VAL2 = 2,
|
||||
@ -1134,6 +1148,23 @@ struct core_reloc_enumval {
|
||||
anon_enum f2;
|
||||
};
|
||||
|
||||
enum named_unsigned_enum64 {
|
||||
UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL2 = 0x2,
|
||||
UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
|
||||
};
|
||||
|
||||
enum named_signed_enum64 {
|
||||
SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
|
||||
SIGNED_ENUM64_VAL2 = -2,
|
||||
SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
|
||||
};
|
||||
|
||||
struct core_reloc_enum64val {
|
||||
enum named_unsigned_enum64 f1;
|
||||
enum named_signed_enum64 f2;
|
||||
};
|
||||
|
||||
/* differing enumerator values */
|
||||
enum named_enum___diff {
|
||||
NAMED_ENUM_VAL1___diff = 101,
|
||||
@ -1152,6 +1183,23 @@ struct core_reloc_enumval___diff {
|
||||
anon_enum___diff f2;
|
||||
};
|
||||
|
||||
enum named_unsigned_enum64___diff {
|
||||
UNSIGNED_ENUM64_VAL1___diff = 0x101ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL2___diff = 0x202ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL3___diff = 0x303ffffffffULL,
|
||||
};
|
||||
|
||||
enum named_signed_enum64___diff {
|
||||
SIGNED_ENUM64_VAL1___diff = -101,
|
||||
SIGNED_ENUM64_VAL2___diff = -202,
|
||||
SIGNED_ENUM64_VAL3___diff = -303,
|
||||
};
|
||||
|
||||
struct core_reloc_enum64val___diff {
|
||||
enum named_unsigned_enum64___diff f1;
|
||||
enum named_signed_enum64___diff f2;
|
||||
};
|
||||
|
||||
/* missing (optional) third enum value */
|
||||
enum named_enum___val3_missing {
|
||||
NAMED_ENUM_VAL1___val3_missing = 111,
|
||||
@ -1168,6 +1216,21 @@ struct core_reloc_enumval___val3_missing {
|
||||
anon_enum___val3_missing f2;
|
||||
};
|
||||
|
||||
enum named_unsigned_enum64___val3_missing {
|
||||
UNSIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL2___val3_missing = 0x222,
|
||||
};
|
||||
|
||||
enum named_signed_enum64___val3_missing {
|
||||
SIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffLL,
|
||||
SIGNED_ENUM64_VAL2___val3_missing = -222,
|
||||
};
|
||||
|
||||
struct core_reloc_enum64val___val3_missing {
|
||||
enum named_unsigned_enum64___val3_missing f1;
|
||||
enum named_signed_enum64___val3_missing f2;
|
||||
};
|
||||
|
||||
/* missing (mandatory) second enum value, should fail */
|
||||
enum named_enum___err_missing {
|
||||
NAMED_ENUM_VAL1___err_missing = 1,
|
||||
@ -1183,3 +1246,18 @@ struct core_reloc_enumval___err_missing {
|
||||
enum named_enum___err_missing f1;
|
||||
anon_enum___err_missing f2;
|
||||
};
|
||||
|
||||
enum named_unsigned_enum64___err_missing {
|
||||
UNSIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL3___err_missing = 0x3ffffffffULL,
|
||||
};
|
||||
|
||||
enum named_signed_enum64___err_missing {
|
||||
SIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffLL,
|
||||
SIGNED_ENUM64_VAL3___err_missing = -3,
|
||||
};
|
||||
|
||||
struct core_reloc_enum64val___err_missing {
|
||||
enum named_unsigned_enum64___err_missing f1;
|
||||
enum named_signed_enum64___err_missing f2;
|
||||
};
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <stdbool.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
int kprobe_res = 0;
|
||||
@ -17,6 +18,11 @@ int uprobe_byname_res = 0;
|
||||
int uretprobe_byname_res = 0;
|
||||
int uprobe_byname2_res = 0;
|
||||
int uretprobe_byname2_res = 0;
|
||||
int uprobe_byname3_sleepable_res = 0;
|
||||
int uprobe_byname3_res = 0;
|
||||
int uretprobe_byname3_sleepable_res = 0;
|
||||
int uretprobe_byname3_res = 0;
|
||||
void *user_ptr = 0;
|
||||
|
||||
SEC("kprobe")
|
||||
int handle_kprobe(struct pt_regs *ctx)
|
||||
@ -32,6 +38,17 @@ int BPF_KPROBE(handle_kprobe_auto)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* This program will be manually made sleepable on the userspace side
|
||||
* and should thus be unattachable.
|
||||
*/
|
||||
SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
|
||||
int handle_kprobe_sleepable(struct pt_regs *ctx)
|
||||
{
|
||||
kprobe_res = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kretprobe")
|
||||
int handle_kretprobe(struct pt_regs *ctx)
|
||||
{
|
||||
@ -93,4 +110,47 @@ int handle_uretprobe_byname2(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline bool verify_sleepable_user_copy(void)
|
||||
{
|
||||
char data[9];
|
||||
|
||||
bpf_copy_from_user(data, sizeof(data), user_ptr);
|
||||
return bpf_strncmp(data, sizeof(data), "test_data") == 0;
|
||||
}
|
||||
|
||||
SEC("uprobe.s//proc/self/exe:trigger_func3")
|
||||
int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
|
||||
{
|
||||
if (verify_sleepable_user_copy())
|
||||
uprobe_byname3_sleepable_res = 9;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* same target as the uprobe.s above to force sleepable and non-sleepable
|
||||
* programs in the same bpf_prog_array
|
||||
*/
|
||||
SEC("uprobe//proc/self/exe:trigger_func3")
|
||||
int handle_uprobe_byname3(struct pt_regs *ctx)
|
||||
{
|
||||
uprobe_byname3_res = 10;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe.s//proc/self/exe:trigger_func3")
|
||||
int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
|
||||
{
|
||||
if (verify_sleepable_user_copy())
|
||||
uretprobe_byname3_sleepable_res = 11;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe//proc/self/exe:trigger_func3")
|
||||
int handle_uretprobe_byname3(struct pt_regs *ctx)
|
||||
{
|
||||
uretprobe_byname3_res = 12;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
@ -0,0 +1,70 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct {
|
||||
char in[256];
|
||||
char out[256];
|
||||
bool skip;
|
||||
} data = {};
|
||||
|
||||
enum named_unsigned_enum64 {
|
||||
UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL2 = 0x2ffffffffULL,
|
||||
UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
|
||||
};
|
||||
|
||||
enum named_signed_enum64 {
|
||||
SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
|
||||
SIGNED_ENUM64_VAL2 = -2,
|
||||
SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
|
||||
};
|
||||
|
||||
struct core_reloc_enum64val_output {
|
||||
bool unsigned_val1_exists;
|
||||
bool unsigned_val2_exists;
|
||||
bool unsigned_val3_exists;
|
||||
bool signed_val1_exists;
|
||||
bool signed_val2_exists;
|
||||
bool signed_val3_exists;
|
||||
|
||||
long unsigned_val1;
|
||||
long unsigned_val2;
|
||||
long signed_val1;
|
||||
long signed_val2;
|
||||
};
|
||||
|
||||
SEC("raw_tracepoint/sys_enter")
|
||||
int test_core_enum64val(void *ctx)
|
||||
{
|
||||
#if __clang_major__ >= 15
|
||||
struct core_reloc_enum64val_output *out = (void *)&data.out;
|
||||
enum named_unsigned_enum64 named_unsigned = 0;
|
||||
enum named_signed_enum64 named_signed = 0;
|
||||
|
||||
out->unsigned_val1_exists = bpf_core_enum_value_exists(named_unsigned, UNSIGNED_ENUM64_VAL1);
|
||||
out->unsigned_val2_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL2);
|
||||
out->unsigned_val3_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL3);
|
||||
out->signed_val1_exists = bpf_core_enum_value_exists(named_signed, SIGNED_ENUM64_VAL1);
|
||||
out->signed_val2_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL2);
|
||||
out->signed_val3_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL3);
|
||||
|
||||
out->unsigned_val1 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL1);
|
||||
out->unsigned_val2 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL2);
|
||||
out->signed_val1 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL1);
|
||||
out->signed_val2 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL2);
|
||||
/* NAMED_ENUM64_VAL3 value is optional */
|
||||
|
||||
#else
|
||||
data.skip = true;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
@ -11,6 +11,8 @@
|
||||
#include <linux/in.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_endian.h>
|
||||
#include <sys/socket.h>
|
||||
@ -115,6 +117,19 @@ static bool bpf_fwd(void)
|
||||
return test < TCP_IP4_RT_FWD;
|
||||
}
|
||||
|
||||
static __u8 get_proto(void)
|
||||
{
|
||||
switch (test) {
|
||||
case UDP_IP4:
|
||||
case UDP_IP6:
|
||||
case UDP_IP4_RT_FWD:
|
||||
case UDP_IP6_RT_FWD:
|
||||
return IPPROTO_UDP;
|
||||
default:
|
||||
return IPPROTO_TCP;
|
||||
}
|
||||
}
|
||||
|
||||
/* -1: parse error: TC_ACT_SHOT
|
||||
* 0: not testing traffic: TC_ACT_OK
|
||||
* >0: first byte is the inet_proto, second byte has the netns
|
||||
@ -122,11 +137,16 @@ static bool bpf_fwd(void)
|
||||
*/
|
||||
static int skb_get_type(struct __sk_buff *skb)
|
||||
{
|
||||
__u16 dst_ns_port = __bpf_htons(50000 + test);
|
||||
void *data_end = ctx_ptr(skb->data_end);
|
||||
void *data = ctx_ptr(skb->data);
|
||||
__u8 inet_proto = 0, ns = 0;
|
||||
struct ipv6hdr *ip6h;
|
||||
__u16 sport, dport;
|
||||
struct iphdr *iph;
|
||||
struct tcphdr *th;
|
||||
struct udphdr *uh;
|
||||
void *trans;
|
||||
|
||||
switch (skb->protocol) {
|
||||
case __bpf_htons(ETH_P_IP):
|
||||
@ -138,6 +158,7 @@ static int skb_get_type(struct __sk_buff *skb)
|
||||
else if (iph->saddr == ip4_dst)
|
||||
ns = DST_NS;
|
||||
inet_proto = iph->protocol;
|
||||
trans = iph + 1;
|
||||
break;
|
||||
case __bpf_htons(ETH_P_IPV6):
|
||||
ip6h = data + sizeof(struct ethhdr);
|
||||
@ -148,15 +169,43 @@ static int skb_get_type(struct __sk_buff *skb)
|
||||
else if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_dst))
|
||||
ns = DST_NS;
|
||||
inet_proto = ip6h->nexthdr;
|
||||
trans = ip6h + 1;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((inet_proto != IPPROTO_TCP && inet_proto != IPPROTO_UDP) || !ns)
|
||||
/* skb is not from src_ns or dst_ns.
|
||||
* skb is not the testing IPPROTO.
|
||||
*/
|
||||
if (!ns || inet_proto != get_proto())
|
||||
return 0;
|
||||
|
||||
switch (inet_proto) {
|
||||
case IPPROTO_TCP:
|
||||
th = trans;
|
||||
if (th + 1 > data_end)
|
||||
return -1;
|
||||
sport = th->source;
|
||||
dport = th->dest;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
uh = trans;
|
||||
if (uh + 1 > data_end)
|
||||
return -1;
|
||||
sport = uh->source;
|
||||
dport = uh->dest;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The skb is the testing traffic */
|
||||
if ((ns == SRC_NS && dport == dst_ns_port) ||
|
||||
(ns == DST_NS && sport == dst_ns_port))
|
||||
return (ns << 8 | inet_proto);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* format: direction@iface@netns
|
||||
|
@ -41,20 +41,20 @@ int handler64_unsigned(void *regs)
|
||||
{
|
||||
int pid = bpf_get_current_pid_tgid() >> 32;
|
||||
void *payload = payload1;
|
||||
u64 len;
|
||||
long len;
|
||||
|
||||
/* ignore irrelevant invocations */
|
||||
if (test_pid != pid || !capture)
|
||||
return 0;
|
||||
|
||||
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
|
||||
if (len <= MAX_LEN) {
|
||||
if (len >= 0) {
|
||||
payload += len;
|
||||
payload1_len1 = len;
|
||||
}
|
||||
|
||||
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
|
||||
if (len <= MAX_LEN) {
|
||||
if (len >= 0) {
|
||||
payload += len;
|
||||
payload1_len2 = len;
|
||||
}
|
||||
@ -123,7 +123,7 @@ int handler32_signed(void *regs)
|
||||
{
|
||||
int pid = bpf_get_current_pid_tgid() >> 32;
|
||||
void *payload = payload4;
|
||||
int len;
|
||||
long len;
|
||||
|
||||
/* ignore irrelevant invocations */
|
||||
if (test_pid != pid || !capture)
|
||||
|
833
tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
Normal file
833
tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
Normal file
@ -0,0 +1,833 @@
|
||||
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
|
||||
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#include "vmlinux.h"
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_endian.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define TC_ACT_OK 0
|
||||
#define TC_ACT_SHOT 2
|
||||
|
||||
#define NSEC_PER_SEC 1000000000L
|
||||
|
||||
#define ETH_ALEN 6
|
||||
#define ETH_P_IP 0x0800
|
||||
#define ETH_P_IPV6 0x86DD
|
||||
|
||||
#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3])
|
||||
|
||||
#define IP_DF 0x4000
|
||||
#define IP_MF 0x2000
|
||||
#define IP_OFFSET 0x1fff
|
||||
|
||||
#define NEXTHDR_TCP 6
|
||||
|
||||
#define TCPOPT_NOP 1
|
||||
#define TCPOPT_EOL 0
|
||||
#define TCPOPT_MSS 2
|
||||
#define TCPOPT_WINDOW 3
|
||||
#define TCPOPT_SACK_PERM 4
|
||||
#define TCPOPT_TIMESTAMP 8
|
||||
|
||||
#define TCPOLEN_MSS 4
|
||||
#define TCPOLEN_WINDOW 3
|
||||
#define TCPOLEN_SACK_PERM 2
|
||||
#define TCPOLEN_TIMESTAMP 10
|
||||
|
||||
#define TCP_TS_HZ 1000
|
||||
#define TS_OPT_WSCALE_MASK 0xf
|
||||
#define TS_OPT_SACK (1 << 4)
|
||||
#define TS_OPT_ECN (1 << 5)
|
||||
#define TSBITS 6
|
||||
#define TSMASK (((__u32)1 << TSBITS) - 1)
|
||||
#define TCP_MAX_WSCALE 14U
|
||||
|
||||
#define IPV4_MAXLEN 60
|
||||
#define TCP_MAXLEN 60
|
||||
|
||||
#define DEFAULT_MSS4 1460
|
||||
#define DEFAULT_MSS6 1440
|
||||
#define DEFAULT_WSCALE 7
|
||||
#define DEFAULT_TTL 64
|
||||
#define MAX_ALLOWED_PORTS 8
|
||||
|
||||
#define swap(a, b) \
|
||||
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
||||
|
||||
#define __get_unaligned_t(type, ptr) ({ \
|
||||
const struct { type x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(ptr); \
|
||||
__pptr->x; \
|
||||
})
|
||||
|
||||
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
__uint(max_entries, 2);
|
||||
} values SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__type(key, __u32);
|
||||
__type(value, __u16);
|
||||
__uint(max_entries, MAX_ALLOWED_PORTS);
|
||||
} allowed_ports SEC(".maps");
|
||||
|
||||
extern struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx,
|
||||
struct bpf_sock_tuple *bpf_tuple,
|
||||
__u32 len_tuple,
|
||||
struct bpf_ct_opts *opts,
|
||||
__u32 len_opts) __ksym;
|
||||
|
||||
extern struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *skb_ctx,
|
||||
struct bpf_sock_tuple *bpf_tuple,
|
||||
u32 len_tuple,
|
||||
struct bpf_ct_opts *opts,
|
||||
u32 len_opts) __ksym;
|
||||
|
||||
extern void bpf_ct_release(struct nf_conn *ct) __ksym;
|
||||
|
||||
static __always_inline void swap_eth_addr(__u8 *a, __u8 *b)
|
||||
{
|
||||
__u8 tmp[ETH_ALEN];
|
||||
|
||||
__builtin_memcpy(tmp, a, ETH_ALEN);
|
||||
__builtin_memcpy(a, b, ETH_ALEN);
|
||||
__builtin_memcpy(b, tmp, ETH_ALEN);
|
||||
}
|
||||
|
||||
static __always_inline __u16 csum_fold(__u32 csum)
|
||||
{
|
||||
csum = (csum & 0xffff) + (csum >> 16);
|
||||
csum = (csum & 0xffff) + (csum >> 16);
|
||||
return (__u16)~csum;
|
||||
}
|
||||
|
||||
static __always_inline __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
|
||||
__u32 len, __u8 proto,
|
||||
__u32 csum)
|
||||
{
|
||||
__u64 s = csum;
|
||||
|
||||
s += (__u32)saddr;
|
||||
s += (__u32)daddr;
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
s += proto + len;
|
||||
#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
s += (proto + len) << 8;
|
||||
#else
|
||||
#error Unknown endian
|
||||
#endif
|
||||
s = (s & 0xffffffff) + (s >> 32);
|
||||
s = (s & 0xffffffff) + (s >> 32);
|
||||
|
||||
return csum_fold((__u32)s);
|
||||
}
|
||||
|
||||
static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
const struct in6_addr *daddr,
|
||||
__u32 len, __u8 proto, __u32 csum)
|
||||
{
|
||||
__u64 sum = csum;
|
||||
int i;
|
||||
|
||||
#pragma unroll
|
||||
for (i = 0; i < 4; i++)
|
||||
sum += (__u32)saddr->in6_u.u6_addr32[i];
|
||||
|
||||
#pragma unroll
|
||||
for (i = 0; i < 4; i++)
|
||||
sum += (__u32)daddr->in6_u.u6_addr32[i];
|
||||
|
||||
/* Don't combine additions to avoid 32-bit overflow. */
|
||||
sum += bpf_htonl(len);
|
||||
sum += bpf_htonl(proto);
|
||||
|
||||
sum = (sum & 0xffffffff) + (sum >> 32);
|
||||
sum = (sum & 0xffffffff) + (sum >> 32);
|
||||
|
||||
return csum_fold((__u32)sum);
|
||||
}
|
||||
|
||||
static __always_inline __u64 tcp_clock_ns(void)
|
||||
{
|
||||
return bpf_ktime_get_ns();
|
||||
}
|
||||
|
||||
static __always_inline __u32 tcp_ns_to_ts(__u64 ns)
|
||||
{
|
||||
return ns / (NSEC_PER_SEC / TCP_TS_HZ);
|
||||
}
|
||||
|
||||
static __always_inline __u32 tcp_time_stamp_raw(void)
|
||||
{
|
||||
return tcp_ns_to_ts(tcp_clock_ns());
|
||||
}
|
||||
|
||||
struct tcpopt_context {
|
||||
__u8 *ptr;
|
||||
__u8 *end;
|
||||
void *data_end;
|
||||
__be32 *tsecr;
|
||||
__u8 wscale;
|
||||
bool option_timestamp;
|
||||
bool option_sack;
|
||||
};
|
||||
|
||||
static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
|
||||
{
|
||||
__u8 opcode, opsize;
|
||||
|
||||
if (ctx->ptr >= ctx->end)
|
||||
return 1;
|
||||
if (ctx->ptr >= ctx->data_end)
|
||||
return 1;
|
||||
|
||||
opcode = ctx->ptr[0];
|
||||
|
||||
if (opcode == TCPOPT_EOL)
|
||||
return 1;
|
||||
if (opcode == TCPOPT_NOP) {
|
||||
++ctx->ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ctx->ptr + 1 >= ctx->end)
|
||||
return 1;
|
||||
if (ctx->ptr + 1 >= ctx->data_end)
|
||||
return 1;
|
||||
opsize = ctx->ptr[1];
|
||||
if (opsize < 2)
|
||||
return 1;
|
||||
|
||||
if (ctx->ptr + opsize > ctx->end)
|
||||
return 1;
|
||||
|
||||
switch (opcode) {
|
||||
case TCPOPT_WINDOW:
|
||||
if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end)
|
||||
ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE;
|
||||
break;
|
||||
case TCPOPT_TIMESTAMP:
|
||||
if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) {
|
||||
ctx->option_timestamp = true;
|
||||
/* Client's tsval becomes our tsecr. */
|
||||
*ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2));
|
||||
}
|
||||
break;
|
||||
case TCPOPT_SACK_PERM:
|
||||
if (opsize == TCPOLEN_SACK_PERM)
|
||||
ctx->option_sack = true;
|
||||
break;
|
||||
}
|
||||
|
||||
ctx->ptr += opsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 7; i++)
|
||||
if (tscookie_tcpopt_parse(context))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
|
||||
__u16 tcp_len, __be32 *tsval,
|
||||
__be32 *tsecr, void *data_end)
|
||||
{
|
||||
struct tcpopt_context loop_ctx = {
|
||||
.ptr = (__u8 *)(tcp_header + 1),
|
||||
.end = (__u8 *)tcp_header + tcp_len,
|
||||
.data_end = data_end,
|
||||
.tsecr = tsecr,
|
||||
.wscale = TS_OPT_WSCALE_MASK,
|
||||
.option_timestamp = false,
|
||||
.option_sack = false,
|
||||
};
|
||||
u32 cookie;
|
||||
|
||||
bpf_loop(6, tscookie_tcpopt_parse_batch, &loop_ctx, 0);
|
||||
|
||||
if (!loop_ctx.option_timestamp)
|
||||
return false;
|
||||
|
||||
cookie = tcp_time_stamp_raw() & ~TSMASK;
|
||||
cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK;
|
||||
if (loop_ctx.option_sack)
|
||||
cookie |= TS_OPT_SACK;
|
||||
if (tcp_header->ece && tcp_header->cwr)
|
||||
cookie |= TS_OPT_ECN;
|
||||
*tsval = bpf_htonl(cookie);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale,
|
||||
__u8 *ttl, bool ipv6)
|
||||
{
|
||||
__u32 key = 0;
|
||||
__u64 *value;
|
||||
|
||||
value = bpf_map_lookup_elem(&values, &key);
|
||||
if (value && *value != 0) {
|
||||
if (ipv6)
|
||||
*mss = (*value >> 32) & 0xffff;
|
||||
else
|
||||
*mss = *value & 0xffff;
|
||||
*wscale = (*value >> 16) & 0xf;
|
||||
*ttl = (*value >> 24) & 0xff;
|
||||
return;
|
||||
}
|
||||
|
||||
*mss = ipv6 ? DEFAULT_MSS6 : DEFAULT_MSS4;
|
||||
*wscale = DEFAULT_WSCALE;
|
||||
*ttl = DEFAULT_TTL;
|
||||
}
|
||||
|
||||
static __always_inline void values_inc_synacks(void)
|
||||
{
|
||||
__u32 key = 1;
|
||||
__u32 *value;
|
||||
|
||||
value = bpf_map_lookup_elem(&values, &key);
|
||||
if (value)
|
||||
__sync_fetch_and_add(value, 1);
|
||||
}
|
||||
|
||||
static __always_inline bool check_port_allowed(__u16 port)
|
||||
{
|
||||
__u32 i;
|
||||
|
||||
for (i = 0; i < MAX_ALLOWED_PORTS; i++) {
|
||||
__u32 key = i;
|
||||
__u16 *value;
|
||||
|
||||
value = bpf_map_lookup_elem(&allowed_ports, &key);
|
||||
|
||||
if (!value)
|
||||
break;
|
||||
/* 0 is a terminator value. Check it first to avoid matching on
|
||||
* a forbidden port == 0 and returning true.
|
||||
*/
|
||||
if (*value == 0)
|
||||
break;
|
||||
|
||||
if (*value == port)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
struct header_pointers {
|
||||
struct ethhdr *eth;
|
||||
struct iphdr *ipv4;
|
||||
struct ipv6hdr *ipv6;
|
||||
struct tcphdr *tcp;
|
||||
__u16 tcp_len;
|
||||
};
|
||||
|
||||
static __always_inline int tcp_dissect(void *data, void *data_end,
|
||||
struct header_pointers *hdr)
|
||||
{
|
||||
hdr->eth = data;
|
||||
if (hdr->eth + 1 > data_end)
|
||||
return XDP_DROP;
|
||||
|
||||
switch (bpf_ntohs(hdr->eth->h_proto)) {
|
||||
case ETH_P_IP:
|
||||
hdr->ipv6 = NULL;
|
||||
|
||||
hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
|
||||
if (hdr->ipv4 + 1 > data_end)
|
||||
return XDP_DROP;
|
||||
if (hdr->ipv4->ihl * 4 < sizeof(*hdr->ipv4))
|
||||
return XDP_DROP;
|
||||
if (hdr->ipv4->version != 4)
|
||||
return XDP_DROP;
|
||||
|
||||
if (hdr->ipv4->protocol != IPPROTO_TCP)
|
||||
return XDP_PASS;
|
||||
|
||||
hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
|
||||
break;
|
||||
case ETH_P_IPV6:
|
||||
hdr->ipv4 = NULL;
|
||||
|
||||
hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
|
||||
if (hdr->ipv6 + 1 > data_end)
|
||||
return XDP_DROP;
|
||||
if (hdr->ipv6->version != 6)
|
||||
return XDP_DROP;
|
||||
|
||||
/* XXX: Extension headers are not supported and could circumvent
|
||||
* XDP SYN flood protection.
|
||||
*/
|
||||
if (hdr->ipv6->nexthdr != NEXTHDR_TCP)
|
||||
return XDP_PASS;
|
||||
|
||||
hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
|
||||
break;
|
||||
default:
|
||||
/* XXX: VLANs will circumvent XDP SYN flood protection. */
|
||||
return XDP_PASS;
|
||||
}
|
||||
|
||||
if (hdr->tcp + 1 > data_end)
|
||||
return XDP_DROP;
|
||||
hdr->tcp_len = hdr->tcp->doff * 4;
|
||||
if (hdr->tcp_len < sizeof(*hdr->tcp))
|
||||
return XDP_DROP;
|
||||
|
||||
return XDP_TX;
|
||||
}
|
||||
|
||||
static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bool xdp)
|
||||
{
|
||||
struct bpf_ct_opts ct_lookup_opts = {
|
||||
.netns_id = BPF_F_CURRENT_NETNS,
|
||||
.l4proto = IPPROTO_TCP,
|
||||
};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
__u32 tup_size;
|
||||
|
||||
if (hdr->ipv4) {
|
||||
/* TCP doesn't normally use fragments, and XDP can't reassemble
|
||||
* them.
|
||||
*/
|
||||
if ((hdr->ipv4->frag_off & bpf_htons(IP_DF | IP_MF | IP_OFFSET)) != bpf_htons(IP_DF))
|
||||
return XDP_DROP;
|
||||
|
||||
tup.ipv4.saddr = hdr->ipv4->saddr;
|
||||
tup.ipv4.daddr = hdr->ipv4->daddr;
|
||||
tup.ipv4.sport = hdr->tcp->source;
|
||||
tup.ipv4.dport = hdr->tcp->dest;
|
||||
tup_size = sizeof(tup.ipv4);
|
||||
} else if (hdr->ipv6) {
|
||||
__builtin_memcpy(tup.ipv6.saddr, &hdr->ipv6->saddr, sizeof(tup.ipv6.saddr));
|
||||
__builtin_memcpy(tup.ipv6.daddr, &hdr->ipv6->daddr, sizeof(tup.ipv6.daddr));
|
||||
tup.ipv6.sport = hdr->tcp->source;
|
||||
tup.ipv6.dport = hdr->tcp->dest;
|
||||
tup_size = sizeof(tup.ipv6);
|
||||
} else {
|
||||
/* The verifier can't track that either ipv4 or ipv6 is not
|
||||
* NULL.
|
||||
*/
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
if (xdp)
|
||||
ct = bpf_xdp_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
|
||||
else
|
||||
ct = bpf_skb_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
|
||||
if (ct) {
|
||||
unsigned long status = ct->status;
|
||||
|
||||
bpf_ct_release(ct);
|
||||
if (status & IPS_CONFIRMED_BIT)
|
||||
return XDP_PASS;
|
||||
} else if (ct_lookup_opts.error != -ENOENT) {
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
/* error == -ENOENT || !(status & IPS_CONFIRMED_BIT) */
|
||||
return XDP_TX;
|
||||
}
|
||||
|
||||
static __always_inline __u8 tcp_mkoptions(__be32 *buf, __be32 *tsopt, __u16 mss,
|
||||
__u8 wscale)
|
||||
{
|
||||
__be32 *start = buf;
|
||||
|
||||
*buf++ = bpf_htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
|
||||
|
||||
if (!tsopt)
|
||||
return buf - start;
|
||||
|
||||
if (tsopt[0] & bpf_htonl(1 << 4))
|
||||
*buf++ = bpf_htonl((TCPOPT_SACK_PERM << 24) |
|
||||
(TCPOLEN_SACK_PERM << 16) |
|
||||
(TCPOPT_TIMESTAMP << 8) |
|
||||
TCPOLEN_TIMESTAMP);
|
||||
else
|
||||
*buf++ = bpf_htonl((TCPOPT_NOP << 24) |
|
||||
(TCPOPT_NOP << 16) |
|
||||
(TCPOPT_TIMESTAMP << 8) |
|
||||
TCPOLEN_TIMESTAMP);
|
||||
*buf++ = tsopt[0];
|
||||
*buf++ = tsopt[1];
|
||||
|
||||
if ((tsopt[0] & bpf_htonl(0xf)) != bpf_htonl(0xf))
|
||||
*buf++ = bpf_htonl((TCPOPT_NOP << 24) |
|
||||
(TCPOPT_WINDOW << 16) |
|
||||
(TCPOLEN_WINDOW << 8) |
|
||||
wscale);
|
||||
|
||||
return buf - start;
|
||||
}
|
||||
|
||||
static __always_inline void tcp_gen_synack(struct tcphdr *tcp_header,
|
||||
__u32 cookie, __be32 *tsopt,
|
||||
__u16 mss, __u8 wscale)
|
||||
{
|
||||
void *tcp_options;
|
||||
|
||||
tcp_flag_word(tcp_header) = TCP_FLAG_SYN | TCP_FLAG_ACK;
|
||||
if (tsopt && (tsopt[0] & bpf_htonl(1 << 5)))
|
||||
tcp_flag_word(tcp_header) |= TCP_FLAG_ECE;
|
||||
tcp_header->doff = 5; /* doff is part of tcp_flag_word. */
|
||||
swap(tcp_header->source, tcp_header->dest);
|
||||
tcp_header->ack_seq = bpf_htonl(bpf_ntohl(tcp_header->seq) + 1);
|
||||
tcp_header->seq = bpf_htonl(cookie);
|
||||
tcp_header->window = 0;
|
||||
tcp_header->urg_ptr = 0;
|
||||
tcp_header->check = 0; /* Calculate checksum later. */
|
||||
|
||||
tcp_options = (void *)(tcp_header + 1);
|
||||
tcp_header->doff += tcp_mkoptions(tcp_options, tsopt, mss, wscale);
|
||||
}
|
||||
|
||||
static __always_inline void tcpv4_gen_synack(struct header_pointers *hdr,
|
||||
__u32 cookie, __be32 *tsopt)
|
||||
{
|
||||
__u8 wscale;
|
||||
__u16 mss;
|
||||
__u8 ttl;
|
||||
|
||||
values_get_tcpipopts(&mss, &wscale, &ttl, false);
|
||||
|
||||
swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
|
||||
|
||||
swap(hdr->ipv4->saddr, hdr->ipv4->daddr);
|
||||
hdr->ipv4->check = 0; /* Calculate checksum later. */
|
||||
hdr->ipv4->tos = 0;
|
||||
hdr->ipv4->id = 0;
|
||||
hdr->ipv4->ttl = ttl;
|
||||
|
||||
tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
|
||||
|
||||
hdr->tcp_len = hdr->tcp->doff * 4;
|
||||
hdr->ipv4->tot_len = bpf_htons(sizeof(*hdr->ipv4) + hdr->tcp_len);
|
||||
}
|
||||
|
||||
static __always_inline void tcpv6_gen_synack(struct header_pointers *hdr,
|
||||
__u32 cookie, __be32 *tsopt)
|
||||
{
|
||||
__u8 wscale;
|
||||
__u16 mss;
|
||||
__u8 ttl;
|
||||
|
||||
values_get_tcpipopts(&mss, &wscale, &ttl, true);
|
||||
|
||||
swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
|
||||
|
||||
swap(hdr->ipv6->saddr, hdr->ipv6->daddr);
|
||||
*(__be32 *)hdr->ipv6 = bpf_htonl(0x60000000);
|
||||
hdr->ipv6->hop_limit = ttl;
|
||||
|
||||
tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
|
||||
|
||||
hdr->tcp_len = hdr->tcp->doff * 4;
|
||||
hdr->ipv6->payload_len = bpf_htons(hdr->tcp_len);
|
||||
}
|
||||
|
||||
static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
|
||||
void *ctx,
|
||||
void *data, void *data_end,
|
||||
bool xdp)
|
||||
{
|
||||
__u32 old_pkt_size, new_pkt_size;
|
||||
/* Unlike clang 10, clang 11 and 12 generate code that doesn't pass the
|
||||
* BPF verifier if tsopt is not volatile. Volatile forces it to store
|
||||
* the pointer value and use it directly, otherwise tcp_mkoptions is
|
||||
* (mis)compiled like this:
|
||||
* if (!tsopt)
|
||||
* return buf - start;
|
||||
* reg = stored_return_value_of_tscookie_init;
|
||||
* if (reg)
|
||||
* tsopt = tsopt_buf;
|
||||
* else
|
||||
* tsopt = NULL;
|
||||
* ...
|
||||
* *buf++ = tsopt[1];
|
||||
* It creates a dead branch where tsopt is assigned NULL, but the
|
||||
* verifier can't prove it's dead and blocks the program.
|
||||
*/
|
||||
__be32 * volatile tsopt = NULL;
|
||||
__be32 tsopt_buf[2] = {};
|
||||
__u16 ip_len;
|
||||
__u32 cookie;
|
||||
__s64 value;
|
||||
|
||||
/* Checksum is not yet verified, but both checksum failure and TCP
|
||||
* header checks return XDP_DROP, so the order doesn't matter.
|
||||
*/
|
||||
if (hdr->tcp->fin || hdr->tcp->rst)
|
||||
return XDP_DROP;
|
||||
|
||||
/* Issue SYN cookies on allowed ports, drop SYN packets on blocked
|
||||
* ports.
|
||||
*/
|
||||
if (!check_port_allowed(bpf_ntohs(hdr->tcp->dest)))
|
||||
return XDP_DROP;
|
||||
|
||||
if (hdr->ipv4) {
|
||||
/* Check the IPv4 and TCP checksums before creating a SYNACK. */
|
||||
value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, hdr->ipv4->ihl * 4, 0);
|
||||
if (value < 0)
|
||||
return XDP_ABORTED;
|
||||
if (csum_fold(value) != 0)
|
||||
return XDP_DROP; /* Bad IPv4 checksum. */
|
||||
|
||||
value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
|
||||
if (value < 0)
|
||||
return XDP_ABORTED;
|
||||
if (csum_tcpudp_magic(hdr->ipv4->saddr, hdr->ipv4->daddr,
|
||||
hdr->tcp_len, IPPROTO_TCP, value) != 0)
|
||||
return XDP_DROP; /* Bad TCP checksum. */
|
||||
|
||||
ip_len = sizeof(*hdr->ipv4);
|
||||
|
||||
value = bpf_tcp_raw_gen_syncookie_ipv4(hdr->ipv4, hdr->tcp,
|
||||
hdr->tcp_len);
|
||||
} else if (hdr->ipv6) {
|
||||
/* Check the TCP checksum before creating a SYNACK. */
|
||||
value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
|
||||
if (value < 0)
|
||||
return XDP_ABORTED;
|
||||
if (csum_ipv6_magic(&hdr->ipv6->saddr, &hdr->ipv6->daddr,
|
||||
hdr->tcp_len, IPPROTO_TCP, value) != 0)
|
||||
return XDP_DROP; /* Bad TCP checksum. */
|
||||
|
||||
ip_len = sizeof(*hdr->ipv6);
|
||||
|
||||
value = bpf_tcp_raw_gen_syncookie_ipv6(hdr->ipv6, hdr->tcp,
|
||||
hdr->tcp_len);
|
||||
} else {
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
if (value < 0)
|
||||
return XDP_ABORTED;
|
||||
cookie = (__u32)value;
|
||||
|
||||
if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
|
||||
&tsopt_buf[0], &tsopt_buf[1], data_end))
|
||||
tsopt = tsopt_buf;
|
||||
|
||||
/* Check that there is enough space for a SYNACK. It also covers
|
||||
* the check that the destination of the __builtin_memmove below
|
||||
* doesn't overflow.
|
||||
*/
|
||||
if (data + sizeof(*hdr->eth) + ip_len + TCP_MAXLEN > data_end)
|
||||
return XDP_ABORTED;
|
||||
|
||||
if (hdr->ipv4) {
|
||||
if (hdr->ipv4->ihl * 4 > sizeof(*hdr->ipv4)) {
|
||||
struct tcphdr *new_tcp_header;
|
||||
|
||||
new_tcp_header = data + sizeof(*hdr->eth) + sizeof(*hdr->ipv4);
|
||||
__builtin_memmove(new_tcp_header, hdr->tcp, sizeof(*hdr->tcp));
|
||||
hdr->tcp = new_tcp_header;
|
||||
|
||||
hdr->ipv4->ihl = sizeof(*hdr->ipv4) / 4;
|
||||
}
|
||||
|
||||
tcpv4_gen_synack(hdr, cookie, tsopt);
|
||||
} else if (hdr->ipv6) {
|
||||
tcpv6_gen_synack(hdr, cookie, tsopt);
|
||||
} else {
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
/* Recalculate checksums. */
|
||||
hdr->tcp->check = 0;
|
||||
value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
|
||||
if (value < 0)
|
||||
return XDP_ABORTED;
|
||||
if (hdr->ipv4) {
|
||||
hdr->tcp->check = csum_tcpudp_magic(hdr->ipv4->saddr,
|
||||
hdr->ipv4->daddr,
|
||||
hdr->tcp_len,
|
||||
IPPROTO_TCP,
|
||||
value);
|
||||
|
||||
hdr->ipv4->check = 0;
|
||||
value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, sizeof(*hdr->ipv4), 0);
|
||||
if (value < 0)
|
||||
return XDP_ABORTED;
|
||||
hdr->ipv4->check = csum_fold(value);
|
||||
} else if (hdr->ipv6) {
|
||||
hdr->tcp->check = csum_ipv6_magic(&hdr->ipv6->saddr,
|
||||
&hdr->ipv6->daddr,
|
||||
hdr->tcp_len,
|
||||
IPPROTO_TCP,
|
||||
value);
|
||||
} else {
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
/* Set the new packet size. */
|
||||
old_pkt_size = data_end - data;
|
||||
new_pkt_size = sizeof(*hdr->eth) + ip_len + hdr->tcp->doff * 4;
|
||||
if (xdp) {
|
||||
if (bpf_xdp_adjust_tail(ctx, new_pkt_size - old_pkt_size))
|
||||
return XDP_ABORTED;
|
||||
} else {
|
||||
if (bpf_skb_change_tail(ctx, new_pkt_size, 0))
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
values_inc_synacks();
|
||||
|
||||
return XDP_TX;
|
||||
}
|
||||
|
||||
static __always_inline int syncookie_handle_ack(struct header_pointers *hdr)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (hdr->tcp->rst)
|
||||
return XDP_DROP;
|
||||
|
||||
if (hdr->ipv4)
|
||||
err = bpf_tcp_raw_check_syncookie_ipv4(hdr->ipv4, hdr->tcp);
|
||||
else if (hdr->ipv6)
|
||||
err = bpf_tcp_raw_check_syncookie_ipv6(hdr->ipv6, hdr->tcp);
|
||||
else
|
||||
return XDP_ABORTED;
|
||||
if (err)
|
||||
return XDP_DROP;
|
||||
|
||||
return XDP_PASS;
|
||||
}
|
||||
|
||||
static __always_inline int syncookie_part1(void *ctx, void *data, void *data_end,
|
||||
struct header_pointers *hdr, bool xdp)
|
||||
{
|
||||
struct bpf_ct_opts ct_lookup_opts = {
|
||||
.netns_id = BPF_F_CURRENT_NETNS,
|
||||
.l4proto = IPPROTO_TCP,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = tcp_dissect(data, data_end, hdr);
|
||||
if (ret != XDP_TX)
|
||||
return ret;
|
||||
|
||||
ret = tcp_lookup(ctx, hdr, xdp);
|
||||
if (ret != XDP_TX)
|
||||
return ret;
|
||||
|
||||
/* Packet is TCP and doesn't belong to an established connection. */
|
||||
|
||||
if ((hdr->tcp->syn ^ hdr->tcp->ack) != 1)
|
||||
return XDP_DROP;
|
||||
|
||||
/* Grow the TCP header to TCP_MAXLEN to be able to pass any hdr->tcp_len
|
||||
* to bpf_tcp_raw_gen_syncookie_ipv{4,6} and pass the verifier.
|
||||
*/
|
||||
if (xdp) {
|
||||
if (bpf_xdp_adjust_tail(ctx, TCP_MAXLEN - hdr->tcp_len))
|
||||
return XDP_ABORTED;
|
||||
} else {
|
||||
/* Without volatile the verifier throws this error:
|
||||
* R9 32-bit pointer arithmetic prohibited
|
||||
*/
|
||||
volatile u64 old_len = data_end - data;
|
||||
|
||||
if (bpf_skb_change_tail(ctx, old_len + TCP_MAXLEN - hdr->tcp_len, 0))
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
return XDP_TX;
|
||||
}
|
||||
|
||||
static __always_inline int syncookie_part2(void *ctx, void *data, void *data_end,
|
||||
struct header_pointers *hdr, bool xdp)
|
||||
{
|
||||
if (hdr->ipv4) {
|
||||
hdr->eth = data;
|
||||
hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
|
||||
/* IPV4_MAXLEN is needed when calculating checksum.
|
||||
* At least sizeof(struct iphdr) is needed here to access ihl.
|
||||
*/
|
||||
if ((void *)hdr->ipv4 + IPV4_MAXLEN > data_end)
|
||||
return XDP_ABORTED;
|
||||
hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
|
||||
} else if (hdr->ipv6) {
|
||||
hdr->eth = data;
|
||||
hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
|
||||
hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
|
||||
} else {
|
||||
return XDP_ABORTED;
|
||||
}
|
||||
|
||||
if ((void *)hdr->tcp + TCP_MAXLEN > data_end)
|
||||
return XDP_ABORTED;
|
||||
|
||||
/* We run out of registers, tcp_len gets spilled to the stack, and the
|
||||
* verifier forgets its min and max values checked above in tcp_dissect.
|
||||
*/
|
||||
hdr->tcp_len = hdr->tcp->doff * 4;
|
||||
if (hdr->tcp_len < sizeof(*hdr->tcp))
|
||||
return XDP_ABORTED;
|
||||
|
||||
return hdr->tcp->syn ? syncookie_handle_syn(hdr, ctx, data, data_end, xdp) :
|
||||
syncookie_handle_ack(hdr);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
int syncookie_xdp(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
void *data = (void *)(long)ctx->data;
|
||||
struct header_pointers hdr;
|
||||
int ret;
|
||||
|
||||
ret = syncookie_part1(ctx, data, data_end, &hdr, true);
|
||||
if (ret != XDP_TX)
|
||||
return ret;
|
||||
|
||||
data_end = (void *)(long)ctx->data_end;
|
||||
data = (void *)(long)ctx->data;
|
||||
|
||||
return syncookie_part2(ctx, data, data_end, &hdr, true);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int syncookie_tc(struct __sk_buff *skb)
|
||||
{
|
||||
void *data_end = (void *)(long)skb->data_end;
|
||||
void *data = (void *)(long)skb->data;
|
||||
struct header_pointers hdr;
|
||||
int ret;
|
||||
|
||||
ret = syncookie_part1(skb, data, data_end, &hdr, false);
|
||||
if (ret != XDP_TX)
|
||||
return ret == XDP_PASS ? TC_ACT_OK : TC_ACT_SHOT;
|
||||
|
||||
data_end = (void *)(long)skb->data_end;
|
||||
data = (void *)(long)skb->data;
|
||||
|
||||
ret = syncookie_part2(skb, data, data_end, &hdr, false);
|
||||
switch (ret) {
|
||||
case XDP_PASS:
|
||||
return TC_ACT_OK;
|
||||
case XDP_TX:
|
||||
return bpf_redirect(skb->ifindex, 0);
|
||||
default:
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -58,7 +58,7 @@ class BlockParser(object):
|
||||
|
||||
class ArrayParser(BlockParser):
|
||||
"""
|
||||
A parser for extracting dicionaries of values from some BPF-related arrays.
|
||||
A parser for extracting a set of values from some BPF-related arrays.
|
||||
@reader: a pointer to the open file to parse
|
||||
@array_name: name of the array to parse
|
||||
"""
|
||||
@ -66,7 +66,7 @@ class ArrayParser(BlockParser):
|
||||
|
||||
def __init__(self, reader, array_name):
|
||||
self.array_name = array_name
|
||||
self.start_marker = re.compile(f'(static )?const char \* const {self.array_name}\[.*\] = {{\n')
|
||||
self.start_marker = re.compile(f'(static )?const bool {self.array_name}\[.*\] = {{\n')
|
||||
super().__init__(reader)
|
||||
|
||||
def search_block(self):
|
||||
@ -80,15 +80,15 @@ class ArrayParser(BlockParser):
|
||||
Parse a block and return data as a dictionary. Items to extract must be
|
||||
on separate lines in the file.
|
||||
"""
|
||||
pattern = re.compile('\[(BPF_\w*)\]\s*= "(.*)",?$')
|
||||
entries = {}
|
||||
pattern = re.compile('\[(BPF_\w*)\]\s*= (true|false),?$')
|
||||
entries = set()
|
||||
while True:
|
||||
line = self.reader.readline()
|
||||
if line == '' or re.match(self.end_marker, line):
|
||||
break
|
||||
capture = pattern.search(line)
|
||||
if capture:
|
||||
entries[capture.group(1)] = capture.group(2)
|
||||
entries |= {capture.group(1)}
|
||||
return entries
|
||||
|
||||
class InlineListParser(BlockParser):
|
||||
@ -115,7 +115,7 @@ class InlineListParser(BlockParser):
|
||||
class FileExtractor(object):
|
||||
"""
|
||||
A generic reader for extracting data from a given file. This class contains
|
||||
several helper methods that wrap arround parser objects to extract values
|
||||
several helper methods that wrap around parser objects to extract values
|
||||
from different structures.
|
||||
This class does not offer a way to set a filename, which is expected to be
|
||||
defined in children classes.
|
||||
@ -139,21 +139,19 @@ class FileExtractor(object):
|
||||
|
||||
def get_types_from_array(self, array_name):
|
||||
"""
|
||||
Search for and parse an array associating names to BPF_* enum members,
|
||||
for example:
|
||||
Search for and parse a list of allowed BPF_* enum members, for example:
|
||||
|
||||
const char * const prog_type_name[] = {
|
||||
[BPF_PROG_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
|
||||
[BPF_PROG_TYPE_KPROBE] = "kprobe",
|
||||
const bool prog_type_name[] = {
|
||||
[BPF_PROG_TYPE_UNSPEC] = true,
|
||||
[BPF_PROG_TYPE_SOCKET_FILTER] = true,
|
||||
[BPF_PROG_TYPE_KPROBE] = true,
|
||||
};
|
||||
|
||||
Return a dictionary with the enum member names as keys and the
|
||||
associated names as values, for example:
|
||||
Return a set of the enum members, for example:
|
||||
|
||||
{'BPF_PROG_TYPE_UNSPEC': 'unspec',
|
||||
'BPF_PROG_TYPE_SOCKET_FILTER': 'socket_filter',
|
||||
'BPF_PROG_TYPE_KPROBE': 'kprobe'}
|
||||
{'BPF_PROG_TYPE_UNSPEC',
|
||||
'BPF_PROG_TYPE_SOCKET_FILTER',
|
||||
'BPF_PROG_TYPE_KPROBE'}
|
||||
|
||||
@array_name: name of the array to parse
|
||||
"""
|
||||
@ -186,6 +184,27 @@ class FileExtractor(object):
|
||||
parser.search_block(start_marker)
|
||||
return parser.parse(pattern, end_marker)
|
||||
|
||||
def make_enum_map(self, names, enum_prefix):
|
||||
"""
|
||||
Search for and parse an enum containing BPF_* members, just as get_enum
|
||||
does. However, instead of just returning a set of the variant names,
|
||||
also generate a textual representation from them by (assuming and)
|
||||
removing a provided prefix and lowercasing the remainder. Then return a
|
||||
dict mapping from name to textual representation.
|
||||
|
||||
@enum_values: a set of enum values; e.g., as retrieved by get_enum
|
||||
@enum_prefix: the prefix to remove from each of the variants to infer
|
||||
textual representation
|
||||
"""
|
||||
mapping = {}
|
||||
for name in names:
|
||||
if not name.startswith(enum_prefix):
|
||||
raise Exception(f"enum variant {name} does not start with {enum_prefix}")
|
||||
text = name[len(enum_prefix):].lower()
|
||||
mapping[name] = text
|
||||
|
||||
return mapping
|
||||
|
||||
def __get_description_list(self, start_marker, pattern, end_marker):
|
||||
parser = InlineListParser(self.reader)
|
||||
parser.search_block(start_marker)
|
||||
@ -333,11 +352,9 @@ class ProgFileExtractor(SourceFileExtractor):
|
||||
"""
|
||||
filename = os.path.join(BPFTOOL_DIR, 'prog.c')
|
||||
|
||||
def get_prog_types(self):
|
||||
return self.get_types_from_array('prog_type_name')
|
||||
|
||||
def get_attach_types(self):
|
||||
return self.get_types_from_array('attach_type_strings')
|
||||
types = self.get_types_from_array('attach_types')
|
||||
return self.make_enum_map(types, 'BPF_')
|
||||
|
||||
def get_prog_attach_help(self):
|
||||
return self.get_help_list('ATTACH_TYPE')
|
||||
@ -348,9 +365,6 @@ class MapFileExtractor(SourceFileExtractor):
|
||||
"""
|
||||
filename = os.path.join(BPFTOOL_DIR, 'map.c')
|
||||
|
||||
def get_map_types(self):
|
||||
return self.get_types_from_array('map_type_name')
|
||||
|
||||
def get_map_help(self):
|
||||
return self.get_help_list('TYPE')
|
||||
|
||||
@ -363,30 +377,6 @@ class CgroupFileExtractor(SourceFileExtractor):
|
||||
def get_prog_attach_help(self):
|
||||
return self.get_help_list('ATTACH_TYPE')
|
||||
|
||||
class CommonFileExtractor(SourceFileExtractor):
|
||||
"""
|
||||
An extractor for bpftool's common.c.
|
||||
"""
|
||||
filename = os.path.join(BPFTOOL_DIR, 'common.c')
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.attach_types = {}
|
||||
|
||||
def get_attach_types(self):
|
||||
if not self.attach_types:
|
||||
self.attach_types = self.get_types_from_array('attach_type_name')
|
||||
return self.attach_types
|
||||
|
||||
def get_cgroup_attach_types(self):
|
||||
if not self.attach_types:
|
||||
self.get_attach_types()
|
||||
cgroup_types = {}
|
||||
for (key, value) in self.attach_types.items():
|
||||
if key.find('BPF_CGROUP') != -1:
|
||||
cgroup_types[key] = value
|
||||
return cgroup_types
|
||||
|
||||
class GenericSourceExtractor(SourceFileExtractor):
|
||||
"""
|
||||
An extractor for generic source code files.
|
||||
@ -403,14 +393,28 @@ class BpfHeaderExtractor(FileExtractor):
|
||||
"""
|
||||
filename = os.path.join(INCLUDE_DIR, 'uapi/linux/bpf.h')
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.attach_types = {}
|
||||
|
||||
def get_prog_types(self):
|
||||
return self.get_enum('bpf_prog_type')
|
||||
|
||||
def get_map_types(self):
|
||||
return self.get_enum('bpf_map_type')
|
||||
def get_map_type_map(self):
|
||||
names = self.get_enum('bpf_map_type')
|
||||
return self.make_enum_map(names, 'BPF_MAP_TYPE_')
|
||||
|
||||
def get_attach_types(self):
|
||||
return self.get_enum('bpf_attach_type')
|
||||
def get_attach_type_map(self):
|
||||
if not self.attach_types:
|
||||
names = self.get_enum('bpf_attach_type')
|
||||
self.attach_types = self.make_enum_map(names, 'BPF_')
|
||||
return self.attach_types
|
||||
|
||||
def get_cgroup_attach_type_map(self):
|
||||
if not self.attach_types:
|
||||
self.get_attach_type_map()
|
||||
return {name: text for name, text in self.attach_types.items()
|
||||
if name.startswith('BPF_CGROUP')}
|
||||
|
||||
class ManPageExtractor(FileExtractor):
|
||||
"""
|
||||
@ -495,21 +499,12 @@ def main():
|
||||
""")
|
||||
args = argParser.parse_args()
|
||||
|
||||
# Map types (enum)
|
||||
|
||||
bpf_info = BpfHeaderExtractor()
|
||||
ref = bpf_info.get_map_types()
|
||||
|
||||
map_info = MapFileExtractor()
|
||||
source_map_items = map_info.get_map_types()
|
||||
map_types_enum = set(source_map_items.keys())
|
||||
|
||||
verify(ref, map_types_enum,
|
||||
f'Comparing BPF header (enum bpf_map_type) and {MapFileExtractor.filename} (map_type_name):')
|
||||
|
||||
# Map types (names)
|
||||
|
||||
source_map_types = set(source_map_items.values())
|
||||
map_info = MapFileExtractor()
|
||||
source_map_types = set(bpf_info.get_map_type_map().values())
|
||||
source_map_types.discard('unspec')
|
||||
|
||||
help_map_types = map_info.get_map_help()
|
||||
@ -525,37 +520,17 @@ def main():
|
||||
bashcomp_map_types = bashcomp_info.get_map_types()
|
||||
|
||||
verify(source_map_types, help_map_types,
|
||||
f'Comparing {MapFileExtractor.filename} (map_type_name) and {MapFileExtractor.filename} (do_help() TYPE):')
|
||||
f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {MapFileExtractor.filename} (do_help() TYPE):')
|
||||
verify(source_map_types, man_map_types,
|
||||
f'Comparing {MapFileExtractor.filename} (map_type_name) and {ManMapExtractor.filename} (TYPE):')
|
||||
f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {ManMapExtractor.filename} (TYPE):')
|
||||
verify(help_map_options, man_map_options,
|
||||
f'Comparing {MapFileExtractor.filename} (do_help() OPTIONS) and {ManMapExtractor.filename} (OPTIONS):')
|
||||
verify(source_map_types, bashcomp_map_types,
|
||||
f'Comparing {MapFileExtractor.filename} (map_type_name) and {BashcompExtractor.filename} (BPFTOOL_MAP_CREATE_TYPES):')
|
||||
|
||||
# Program types (enum)
|
||||
|
||||
ref = bpf_info.get_prog_types()
|
||||
|
||||
prog_info = ProgFileExtractor()
|
||||
prog_types = set(prog_info.get_prog_types().keys())
|
||||
|
||||
verify(ref, prog_types,
|
||||
f'Comparing BPF header (enum bpf_prog_type) and {ProgFileExtractor.filename} (prog_type_name):')
|
||||
|
||||
# Attach types (enum)
|
||||
|
||||
ref = bpf_info.get_attach_types()
|
||||
bpf_info.close()
|
||||
|
||||
common_info = CommonFileExtractor()
|
||||
attach_types = common_info.get_attach_types()
|
||||
|
||||
verify(ref, attach_types,
|
||||
f'Comparing BPF header (enum bpf_attach_type) and {CommonFileExtractor.filename} (attach_type_name):')
|
||||
f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {BashcompExtractor.filename} (BPFTOOL_MAP_CREATE_TYPES):')
|
||||
|
||||
# Attach types (names)
|
||||
|
||||
prog_info = ProgFileExtractor()
|
||||
source_prog_attach_types = set(prog_info.get_attach_types().values())
|
||||
|
||||
help_prog_attach_types = prog_info.get_prog_attach_help()
|
||||
@ -571,18 +546,17 @@ def main():
|
||||
bashcomp_prog_attach_types = bashcomp_info.get_prog_attach_types()
|
||||
|
||||
verify(source_prog_attach_types, help_prog_attach_types,
|
||||
f'Comparing {ProgFileExtractor.filename} (attach_type_strings) and {ProgFileExtractor.filename} (do_help() ATTACH_TYPE):')
|
||||
f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {ProgFileExtractor.filename} (do_help() ATTACH_TYPE):')
|
||||
verify(source_prog_attach_types, man_prog_attach_types,
|
||||
f'Comparing {ProgFileExtractor.filename} (attach_type_strings) and {ManProgExtractor.filename} (ATTACH_TYPE):')
|
||||
f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {ManProgExtractor.filename} (ATTACH_TYPE):')
|
||||
verify(help_prog_options, man_prog_options,
|
||||
f'Comparing {ProgFileExtractor.filename} (do_help() OPTIONS) and {ManProgExtractor.filename} (OPTIONS):')
|
||||
verify(source_prog_attach_types, bashcomp_prog_attach_types,
|
||||
f'Comparing {ProgFileExtractor.filename} (attach_type_strings) and {BashcompExtractor.filename} (BPFTOOL_PROG_ATTACH_TYPES):')
|
||||
f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {BashcompExtractor.filename} (BPFTOOL_PROG_ATTACH_TYPES):')
|
||||
|
||||
# Cgroup attach types
|
||||
|
||||
source_cgroup_attach_types = set(common_info.get_cgroup_attach_types().values())
|
||||
common_info.close()
|
||||
source_cgroup_attach_types = set(bpf_info.get_cgroup_attach_type_map().values())
|
||||
bpf_info.close()
|
||||
|
||||
cgroup_info = CgroupFileExtractor()
|
||||
help_cgroup_attach_types = cgroup_info.get_prog_attach_help()
|
||||
@ -598,13 +572,13 @@ def main():
|
||||
bashcomp_info.close()
|
||||
|
||||
verify(source_cgroup_attach_types, help_cgroup_attach_types,
|
||||
f'Comparing {CommonFileExtractor.filename} (attach_type_strings) and {CgroupFileExtractor.filename} (do_help() ATTACH_TYPE):')
|
||||
f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {CgroupFileExtractor.filename} (do_help() ATTACH_TYPE):')
|
||||
verify(source_cgroup_attach_types, man_cgroup_attach_types,
|
||||
f'Comparing {CommonFileExtractor.filename} (attach_type_strings) and {ManCgroupExtractor.filename} (ATTACH_TYPE):')
|
||||
f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {ManCgroupExtractor.filename} (ATTACH_TYPE):')
|
||||
verify(help_cgroup_options, man_cgroup_options,
|
||||
f'Comparing {CgroupFileExtractor.filename} (do_help() OPTIONS) and {ManCgroupExtractor.filename} (OPTIONS):')
|
||||
verify(source_cgroup_attach_types, bashcomp_cgroup_attach_types,
|
||||
f'Comparing {CommonFileExtractor.filename} (attach_type_strings) and {BashcompExtractor.filename} (BPFTOOL_CGROUP_ATTACH_TYPES):')
|
||||
f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {BashcompExtractor.filename} (BPFTOOL_CGROUP_ATTACH_TYPES):')
|
||||
|
||||
# Options for remaining commands
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define BTF_MEMBER_ENC(name, type, bits_offset) \
|
||||
(name), (type), (bits_offset)
|
||||
#define BTF_ENUM_ENC(name, val) (name), (val)
|
||||
#define BTF_ENUM64_ENC(name, val_lo32, val_hi32) (name), (val_lo32), (val_hi32)
|
||||
#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
|
||||
((bitfield_size) << 24 | (bits_offset))
|
||||
|
||||
|
@ -95,5 +95,9 @@ for server_args in "" "-I veth0 -s -S" ; do
|
||||
test "$client_args" "$server_args"
|
||||
done
|
||||
|
||||
# Test drv mode
|
||||
test "-I veth1 -N" "-I veth0 -s -N"
|
||||
test "-I veth1 -N -c 10" "-I veth0 -s -N"
|
||||
|
||||
echo "OK. All tests passed"
|
||||
exit 0
|
||||
|
466
tools/testing/selftests/bpf/xdp_synproxy.c
Normal file
466
tools/testing/selftests/bpf/xdp_synproxy.c
Normal file
@ -0,0 +1,466 @@
|
||||
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
|
||||
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#include <stdnoreturn.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <getopt.h>
|
||||
#include <signal.h>
|
||||
#include <sys/types.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <net/if.h>
|
||||
#include <linux/if_link.h>
|
||||
#include <linux/limits.h>
|
||||
|
||||
static unsigned int ifindex;
|
||||
static __u32 attached_prog_id;
|
||||
static bool attached_tc;
|
||||
|
||||
static void noreturn cleanup(int sig)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
|
||||
int prog_fd;
|
||||
int err;
|
||||
|
||||
if (attached_prog_id == 0)
|
||||
exit(0);
|
||||
|
||||
if (attached_tc) {
|
||||
LIBBPF_OPTS(bpf_tc_hook, hook,
|
||||
.ifindex = ifindex,
|
||||
.attach_point = BPF_TC_INGRESS);
|
||||
|
||||
err = bpf_tc_hook_destroy(&hook);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_tc_hook_destroy: %s\n", strerror(-err));
|
||||
fprintf(stderr, "Failed to destroy the TC hook\n");
|
||||
exit(1);
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
prog_fd = bpf_prog_get_fd_by_id(attached_prog_id);
|
||||
if (prog_fd < 0) {
|
||||
fprintf(stderr, "Error: bpf_prog_get_fd_by_id: %s\n", strerror(-prog_fd));
|
||||
err = bpf_xdp_attach(ifindex, -1, 0, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_set_link_xdp_fd: %s\n", strerror(-err));
|
||||
fprintf(stderr, "Failed to detach XDP program\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
opts.old_prog_fd = prog_fd;
|
||||
err = bpf_xdp_attach(ifindex, -1, XDP_FLAGS_REPLACE, &opts);
|
||||
close(prog_fd);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_set_link_xdp_fd_opts: %s\n", strerror(-err));
|
||||
/* Not an error if already replaced by someone else. */
|
||||
if (err != -EEXIST) {
|
||||
fprintf(stderr, "Failed to detach XDP program\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
static noreturn void usage(const char *progname)
|
||||
{
|
||||
fprintf(stderr, "Usage: %s [--iface <iface>|--prog <prog_id>] [--mss4 <mss ipv4> --mss6 <mss ipv6> --wscale <wscale> --ttl <ttl>] [--ports <port1>,<port2>,...] [--single] [--tc]\n",
|
||||
progname);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static unsigned long parse_arg_ul(const char *progname, const char *arg, unsigned long limit)
|
||||
{
|
||||
unsigned long res;
|
||||
char *endptr;
|
||||
|
||||
errno = 0;
|
||||
res = strtoul(arg, &endptr, 10);
|
||||
if (errno != 0 || *endptr != '\0' || arg[0] == '\0' || res > limit)
|
||||
usage(progname);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *prog_id,
|
||||
__u64 *tcpipopts, char **ports, bool *single, bool *tc)
|
||||
{
|
||||
static struct option long_options[] = {
|
||||
{ "help", no_argument, NULL, 'h' },
|
||||
{ "iface", required_argument, NULL, 'i' },
|
||||
{ "prog", required_argument, NULL, 'x' },
|
||||
{ "mss4", required_argument, NULL, 4 },
|
||||
{ "mss6", required_argument, NULL, 6 },
|
||||
{ "wscale", required_argument, NULL, 'w' },
|
||||
{ "ttl", required_argument, NULL, 't' },
|
||||
{ "ports", required_argument, NULL, 'p' },
|
||||
{ "single", no_argument, NULL, 's' },
|
||||
{ "tc", no_argument, NULL, 'c' },
|
||||
{ NULL, 0, NULL, 0 },
|
||||
};
|
||||
unsigned long mss4, mss6, wscale, ttl;
|
||||
unsigned int tcpipopts_mask = 0;
|
||||
|
||||
if (argc < 2)
|
||||
usage(argv[0]);
|
||||
|
||||
*ifindex = 0;
|
||||
*prog_id = 0;
|
||||
*tcpipopts = 0;
|
||||
*ports = NULL;
|
||||
*single = false;
|
||||
|
||||
while (true) {
|
||||
int opt;
|
||||
|
||||
opt = getopt_long(argc, argv, "", long_options, NULL);
|
||||
if (opt == -1)
|
||||
break;
|
||||
|
||||
switch (opt) {
|
||||
case 'h':
|
||||
usage(argv[0]);
|
||||
break;
|
||||
case 'i':
|
||||
*ifindex = if_nametoindex(optarg);
|
||||
if (*ifindex == 0)
|
||||
usage(argv[0]);
|
||||
break;
|
||||
case 'x':
|
||||
*prog_id = parse_arg_ul(argv[0], optarg, UINT32_MAX);
|
||||
if (*prog_id == 0)
|
||||
usage(argv[0]);
|
||||
break;
|
||||
case 4:
|
||||
mss4 = parse_arg_ul(argv[0], optarg, UINT16_MAX);
|
||||
tcpipopts_mask |= 1 << 0;
|
||||
break;
|
||||
case 6:
|
||||
mss6 = parse_arg_ul(argv[0], optarg, UINT16_MAX);
|
||||
tcpipopts_mask |= 1 << 1;
|
||||
break;
|
||||
case 'w':
|
||||
wscale = parse_arg_ul(argv[0], optarg, 14);
|
||||
tcpipopts_mask |= 1 << 2;
|
||||
break;
|
||||
case 't':
|
||||
ttl = parse_arg_ul(argv[0], optarg, UINT8_MAX);
|
||||
tcpipopts_mask |= 1 << 3;
|
||||
break;
|
||||
case 'p':
|
||||
*ports = optarg;
|
||||
break;
|
||||
case 's':
|
||||
*single = true;
|
||||
break;
|
||||
case 'c':
|
||||
*tc = true;
|
||||
break;
|
||||
default:
|
||||
usage(argv[0]);
|
||||
}
|
||||
}
|
||||
if (optind < argc)
|
||||
usage(argv[0]);
|
||||
|
||||
if (tcpipopts_mask == 0xf) {
|
||||
if (mss4 == 0 || mss6 == 0 || wscale == 0 || ttl == 0)
|
||||
usage(argv[0]);
|
||||
*tcpipopts = (mss6 << 32) | (ttl << 24) | (wscale << 16) | mss4;
|
||||
} else if (tcpipopts_mask != 0) {
|
||||
usage(argv[0]);
|
||||
}
|
||||
|
||||
if (*ifindex != 0 && *prog_id != 0)
|
||||
usage(argv[0]);
|
||||
if (*ifindex == 0 && *prog_id == 0)
|
||||
usage(argv[0]);
|
||||
}
|
||||
|
||||
static int syncookie_attach(const char *argv0, unsigned int ifindex, bool tc)
|
||||
{
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
char xdp_filename[PATH_MAX];
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
int prog_fd;
|
||||
int err;
|
||||
|
||||
snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv0);
|
||||
obj = bpf_object__open_file(xdp_filename, NULL);
|
||||
err = libbpf_get_error(obj);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_object__open_file: %s\n", strerror(-err));
|
||||
return err;
|
||||
}
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_object__open_file: %s\n", strerror(-err));
|
||||
return err;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, tc ? "syncookie_tc" : "syncookie_xdp");
|
||||
if (!prog) {
|
||||
fprintf(stderr, "Error: bpf_object__find_program_by_name: program was not found\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
|
||||
goto out;
|
||||
}
|
||||
attached_tc = tc;
|
||||
attached_prog_id = info.id;
|
||||
signal(SIGINT, cleanup);
|
||||
signal(SIGTERM, cleanup);
|
||||
if (tc) {
|
||||
LIBBPF_OPTS(bpf_tc_hook, hook,
|
||||
.ifindex = ifindex,
|
||||
.attach_point = BPF_TC_INGRESS);
|
||||
LIBBPF_OPTS(bpf_tc_opts, opts,
|
||||
.handle = 1,
|
||||
.priority = 1,
|
||||
.prog_fd = prog_fd);
|
||||
|
||||
err = bpf_tc_hook_create(&hook);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_tc_hook_create: %s\n",
|
||||
strerror(-err));
|
||||
goto fail;
|
||||
}
|
||||
err = bpf_tc_attach(&hook, &opts);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_tc_attach: %s\n",
|
||||
strerror(-err));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
} else {
|
||||
err = bpf_xdp_attach(ifindex, prog_fd,
|
||||
XDP_FLAGS_UPDATE_IF_NOEXIST, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_set_link_xdp_fd: %s\n",
|
||||
strerror(-err));
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
err = 0;
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
return err;
|
||||
fail:
|
||||
signal(SIGINT, SIG_DFL);
|
||||
signal(SIGTERM, SIG_DFL);
|
||||
attached_prog_id = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports_map_fd)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
__u32 map_ids[8];
|
||||
__u32 info_len;
|
||||
int prog_fd;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
*values_map_fd = -1;
|
||||
*ports_map_fd = -1;
|
||||
|
||||
prog_fd = bpf_prog_get_fd_by_id(prog_id);
|
||||
if (prog_fd < 0) {
|
||||
fprintf(stderr, "Error: bpf_prog_get_fd_by_id: %s\n", strerror(-prog_fd));
|
||||
return prog_fd;
|
||||
}
|
||||
|
||||
prog_info = (struct bpf_prog_info) {
|
||||
.nr_map_ids = 8,
|
||||
.map_ids = (__u64)map_ids,
|
||||
};
|
||||
info_len = sizeof(prog_info);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (prog_info.nr_map_ids < 2) {
|
||||
fprintf(stderr, "Error: Found %u BPF maps, expected at least 2\n",
|
||||
prog_info.nr_map_ids);
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < prog_info.nr_map_ids; i++) {
|
||||
struct bpf_map_info map_info = {};
|
||||
int map_fd;
|
||||
|
||||
err = bpf_map_get_fd_by_id(map_ids[i]);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_map_get_fd_by_id: %s\n", strerror(-err));
|
||||
goto err_close_map_fds;
|
||||
}
|
||||
map_fd = err;
|
||||
|
||||
info_len = sizeof(map_info);
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
|
||||
close(map_fd);
|
||||
goto err_close_map_fds;
|
||||
}
|
||||
if (strcmp(map_info.name, "values") == 0) {
|
||||
*values_map_fd = map_fd;
|
||||
continue;
|
||||
}
|
||||
if (strcmp(map_info.name, "allowed_ports") == 0) {
|
||||
*ports_map_fd = map_fd;
|
||||
continue;
|
||||
}
|
||||
close(map_fd);
|
||||
}
|
||||
|
||||
if (*values_map_fd != -1 && *ports_map_fd != -1) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -ENOENT;
|
||||
|
||||
err_close_map_fds:
|
||||
if (*values_map_fd != -1)
|
||||
close(*values_map_fd);
|
||||
if (*ports_map_fd != -1)
|
||||
close(*ports_map_fd);
|
||||
*values_map_fd = -1;
|
||||
*ports_map_fd = -1;
|
||||
|
||||
out:
|
||||
close(prog_fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int values_map_fd, ports_map_fd;
|
||||
__u64 tcpipopts;
|
||||
bool firstiter;
|
||||
__u64 prevcnt;
|
||||
__u32 prog_id;
|
||||
char *ports;
|
||||
bool single;
|
||||
int err = 0;
|
||||
bool tc;
|
||||
|
||||
parse_options(argc, argv, &ifindex, &prog_id, &tcpipopts, &ports,
|
||||
&single, &tc);
|
||||
|
||||
if (prog_id == 0) {
|
||||
if (!tc) {
|
||||
err = bpf_xdp_query_id(ifindex, 0, &prog_id);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error: bpf_get_link_xdp_id: %s\n",
|
||||
strerror(-err));
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (prog_id == 0) {
|
||||
err = syncookie_attach(argv[0], ifindex, tc);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
prog_id = attached_prog_id;
|
||||
}
|
||||
}
|
||||
|
||||
err = syncookie_open_bpf_maps(prog_id, &values_map_fd, &ports_map_fd);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (ports) {
|
||||
__u16 port_last = 0;
|
||||
__u32 port_idx = 0;
|
||||
char *p = ports;
|
||||
|
||||
fprintf(stderr, "Replacing allowed ports\n");
|
||||
|
||||
while (p && *p != '\0') {
|
||||
char *token = strsep(&p, ",");
|
||||
__u16 port;
|
||||
|
||||
port = parse_arg_ul(argv[0], token, UINT16_MAX);
|
||||
err = bpf_map_update_elem(ports_map_fd, &port_idx, &port, BPF_ANY);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
|
||||
fprintf(stderr, "Failed to add port %u (index %u)\n",
|
||||
port, port_idx);
|
||||
goto out_close_maps;
|
||||
}
|
||||
fprintf(stderr, "Added port %u\n", port);
|
||||
port_idx++;
|
||||
}
|
||||
err = bpf_map_update_elem(ports_map_fd, &port_idx, &port_last, BPF_ANY);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
|
||||
fprintf(stderr, "Failed to add the terminator value 0 (index %u)\n",
|
||||
port_idx);
|
||||
goto out_close_maps;
|
||||
}
|
||||
}
|
||||
|
||||
if (tcpipopts) {
|
||||
__u32 key = 0;
|
||||
|
||||
fprintf(stderr, "Replacing TCP/IP options\n");
|
||||
|
||||
err = bpf_map_update_elem(values_map_fd, &key, &tcpipopts, BPF_ANY);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
|
||||
goto out_close_maps;
|
||||
}
|
||||
}
|
||||
|
||||
if ((ports || tcpipopts) && attached_prog_id == 0 && !single)
|
||||
goto out_close_maps;
|
||||
|
||||
prevcnt = 0;
|
||||
firstiter = true;
|
||||
while (true) {
|
||||
__u32 key = 1;
|
||||
__u64 value;
|
||||
|
||||
err = bpf_map_lookup_elem(values_map_fd, &key, &value);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "Error: bpf_map_lookup_elem: %s\n", strerror(-err));
|
||||
goto out_close_maps;
|
||||
}
|
||||
if (firstiter) {
|
||||
prevcnt = value;
|
||||
firstiter = false;
|
||||
}
|
||||
if (single) {
|
||||
printf("Total SYNACKs generated: %llu\n", value);
|
||||
break;
|
||||
}
|
||||
printf("SYNACKs generated: %llu (total %llu)\n", value - prevcnt, value);
|
||||
prevcnt = value;
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
out_close_maps:
|
||||
close(values_map_fd);
|
||||
close(ports_map_fd);
|
||||
out:
|
||||
return err == 0 ? 0 : 1;
|
||||
}
|
Loading…
Reference in New Issue
Block a user