From 9efc7794496d531593751a3fb008030d5f9ba87c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 22 May 2019 10:51:28 -0700 Subject: [PATCH 01/83] libbpf: emit diff of mismatched public API, if any It's easy to have a mismatch of "intended to be public" vs really exposed API functions. While Makefile does check for this mismatch, if it actually occurs it's not trivial to determine which functions are accidentally exposed. This patch dumps out a diff showing what's not supposed to be exposed facilitating easier fixing. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/Makefile | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index f91639bf5650..a2aceadf68db 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -204,6 +204,16 @@ check_abi: $(OUTPUT)libbpf.so "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ "Please make sure all LIBBPF_API symbols are" \ "versioned in $(VERSION_SCRIPT)." >&2; \ + readelf -s --wide $(OUTPUT)libbpf-in.o | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ + sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ + readelf -s --wide $(OUTPUT)libbpf.so | \ + grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ + sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \ + diff -u $(OUTPUT)libbpf_global_syms.tmp \ + $(OUTPUT)libbpf_versioned_syms.tmp; \ + rm $(OUTPUT)libbpf_global_syms.tmp \ + $(OUTPUT)libbpf_versioned_syms.tmp; \ exit 1; \ fi From b285fcb760da7aa87d6d31e6c6a4907d82d9299c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 May 2019 20:14:19 -0700 Subject: [PATCH 02/83] bpf: bump jmp sequence limit The limit of 1024 subsequent jumps was causing otherwise valid programs to be rejected. Bump it to 8192 and make the error more verbose. Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 95f9354495ad..3f8b5443cc67 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -176,7 +176,7 @@ struct bpf_verifier_stack_elem { struct bpf_verifier_stack_elem *next; }; -#define BPF_COMPLEXITY_LIMIT_STACK 1024 +#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 #define BPF_COMPLEXITY_LIMIT_STATES 64 #define BPF_MAP_PTR_UNPRIV 1UL @@ -782,8 +782,9 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, if (err) goto err; elem->st.speculative |= speculative; - if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { - verbose(env, "BPF program is too complex\n"); + if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { + verbose(env, "The sequence of %d jumps is too complex.\n", + env->stack_size); goto err; } return &elem->st; From 7c0c6095d48dcd0e67c917aa73cdbb2715aafc36 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 May 2019 20:14:20 -0700 Subject: [PATCH 03/83] selftests/bpf: adjust verifier scale test Adjust scale tests to check for new jmp sequence limit. BPF_JGT had to be changed to BPF_JEQ because the verifier was too smart. It tracked the known safe range of R0 values and pruned the search earlier before hitting exact 8192 limit. bpf_semi_rand_get() was too (un)?lucky. k = 0; was missing in bpf_fill_scale2. It was testing a bit shorter sequence of jumps than intended. Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_verifier.c | 31 +++++++++++---------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index ccd896b98cac..6e2fec84c929 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -210,33 +210,35 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) self->retval = (uint32_t)res; } -/* test the sequence of 1k jumps */ +#define MAX_JMP_SEQ 8192 + +/* test the sequence of 8k jumps */ static void bpf_fill_scale1(struct bpf_test *self) { struct bpf_insn *insn = self->fill_insns; int i = 0, k = 0; insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); - /* test to check that the sequence of 1024 jumps is acceptable */ - while (k++ < 1024) { + /* test to check that the long sequence of jumps is acceptable */ + while (k++ < MAX_JMP_SEQ) { insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32); - insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2); + insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, -8 * (k % 64 + 1)); } - /* every jump adds 1024 steps to insn_processed, so to stay exactly - * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT + /* every jump adds 1 step to insn_processed, so to stay exactly + * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT */ - while (i < MAX_TEST_INSNS - 1025) + while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1) insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); insn[i] = BPF_EXIT_INSN(); self->prog_len = i + 1; self->retval = 42; } -/* test the sequence of 1k jumps in inner most function (function depth 8)*/ +/* test the sequence of 8k jumps in inner most function (function depth 8)*/ static void bpf_fill_scale2(struct bpf_test *self) { struct bpf_insn *insn = self->fill_insns; @@ -248,19 +250,20 @@ static void bpf_fill_scale2(struct bpf_test *self) insn[i++] = BPF_EXIT_INSN(); } insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); - /* test to check that the sequence of 1024 jumps is acceptable */ - while (k++ < 1024) { + /* test to check that the long sequence of jumps is acceptable */ + k = 0; + while (k++ < MAX_JMP_SEQ) { insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32); - insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2); + insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, -8 * (k % (64 - 4 * FUNC_NEST) + 1)); } - /* every jump adds 1024 steps to insn_processed, so to stay exactly - * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT + /* every jump adds 1 step to insn_processed, so to stay exactly + * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT */ - while (i < MAX_TEST_INSNS - 1025) + while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1) insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); insn[i] = BPF_EXIT_INSN(); self->prog_len = i + 1; From 7c9441066ab53168093c79477aabd575f7c14129 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 May 2019 20:14:21 -0700 Subject: [PATCH 04/83] selftests/bpf: add pyperf scale test Add a snippet of pyperf bpf program used to collect python stack traces as a scale test for the verifier. At 189 loop iterations llvm 9.0 starts ignoring '#pragma unroll' and generates partially unrolled loop instead. Hence use 50, 100, and 180 loop iterations to stress test. Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- .../bpf/prog_tests/bpf_verif_scale.c | 31 +- tools/testing/selftests/bpf/progs/pyperf.h | 268 ++++++++++++++++++ tools/testing/selftests/bpf/progs/pyperf100.c | 4 + tools/testing/selftests/bpf/progs/pyperf180.c | 4 + tools/testing/selftests/bpf/progs/pyperf50.c | 4 + 5 files changed, 298 insertions(+), 13 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/pyperf.h create mode 100644 tools/testing/selftests/bpf/progs/pyperf100.c create mode 100644 tools/testing/selftests/bpf/progs/pyperf180.c create mode 100644 tools/testing/selftests/bpf/progs/pyperf50.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index b74e2f6e96d0..ce03289c9077 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -12,7 +12,7 @@ static int libbpf_debug_print(enum libbpf_print_level level, return vfprintf(stderr, "%s", args); } -static int check_load(const char *file) +static int check_load(const char *file, enum bpf_prog_type type) { struct bpf_prog_load_attr attr; struct bpf_object *obj = NULL; @@ -20,7 +20,7 @@ static int check_load(const char *file) memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); attr.file = file; - attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; + attr.prog_type = type; attr.log_level = 4; err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); bpf_object__close(obj); @@ -31,19 +31,24 @@ static int check_load(const char *file) void test_bpf_verif_scale(void) { - const char *file1 = "./test_verif_scale1.o"; - const char *file2 = "./test_verif_scale2.o"; - const char *file3 = "./test_verif_scale3.o"; - int err; + const char *scale[] = { + "./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o" + }; + const char *pyperf[] = { + "./pyperf50.o", "./pyperf100.o", "./pyperf180.o" + }; + int err, i; if (verifier_stats) libbpf_set_print(libbpf_debug_print); - err = check_load(file1); - err |= check_load(file2); - err |= check_load(file3); - if (!err) - printf("test_verif_scale:OK\n"); - else - printf("test_verif_scale:FAIL\n"); + for (i = 0; i < ARRAY_SIZE(scale); i++) { + err = check_load(scale[i], BPF_PROG_TYPE_SCHED_CLS); + printf("test_scale:%s:%s\n", scale[i], err ? "FAIL" : "OK"); + } + + for (i = 0; i < ARRAY_SIZE(pyperf); i++) { + err = check_load(pyperf[i], BPF_PROG_TYPE_RAW_TRACEPOINT); + printf("test_scale:%s:%s\n", pyperf[i], err ? "FAIL" : "OK"); + } } diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h new file mode 100644 index 000000000000..0cc5e4ee90bd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +#define FUNCTION_NAME_LEN 64 +#define FILE_NAME_LEN 128 +#define TASK_COMM_LEN 16 + +typedef struct { + int PyThreadState_frame; + int PyThreadState_thread; + int PyFrameObject_back; + int PyFrameObject_code; + int PyFrameObject_lineno; + int PyCodeObject_filename; + int PyCodeObject_name; + int String_data; + int String_size; +} OffsetConfig; + +typedef struct { + uintptr_t current_state_addr; + uintptr_t tls_key_addr; + OffsetConfig offsets; + bool use_tls; +} PidData; + +typedef struct { + uint32_t success; +} Stats; + +typedef struct { + char name[FUNCTION_NAME_LEN]; + char file[FILE_NAME_LEN]; +} Symbol; + +typedef struct { + uint32_t pid; + uint32_t tid; + char comm[TASK_COMM_LEN]; + int32_t kernel_stack_id; + int32_t user_stack_id; + bool thread_current; + bool pthread_match; + bool stack_complete; + int16_t stack_len; + int32_t stack[STACK_MAX_LEN]; + + int has_meta; + int metadata; + char dummy_safeguard; +} Event; + + +struct bpf_elf_map { + __u32 type; + __u32 size_key; + __u32 size_value; + __u32 max_elem; + __u32 flags; +}; + +typedef int pid_t; + +typedef struct { + void* f_back; // PyFrameObject.f_back, previous frame + void* f_code; // PyFrameObject.f_code, pointer to PyCodeObject + void* co_filename; // PyCodeObject.co_filename + void* co_name; // PyCodeObject.co_name +} FrameData; + +static inline __attribute__((__always_inline__)) void* +get_thread_state(void* tls_base, PidData* pidData) +{ + void* thread_state; + int key; + + bpf_probe_read(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); + bpf_probe_read(&thread_state, sizeof(thread_state), + tls_base + 0x310 + key * 0x10 + 0x08); + return thread_state; +} + +static inline __attribute__((__always_inline__)) bool +get_frame_data(void* frame_ptr, PidData* pidData, FrameData* frame, Symbol* symbol) +{ + // read data from PyFrameObject + bpf_probe_read(&frame->f_back, + sizeof(frame->f_back), + frame_ptr + pidData->offsets.PyFrameObject_back); + bpf_probe_read(&frame->f_code, + sizeof(frame->f_code), + frame_ptr + pidData->offsets.PyFrameObject_code); + + // read data from PyCodeObject + if (!frame->f_code) + return false; + bpf_probe_read(&frame->co_filename, + sizeof(frame->co_filename), + frame->f_code + pidData->offsets.PyCodeObject_filename); + bpf_probe_read(&frame->co_name, + sizeof(frame->co_name), + frame->f_code + pidData->offsets.PyCodeObject_name); + // read actual names into symbol + if (frame->co_filename) + bpf_probe_read_str(&symbol->file, + sizeof(symbol->file), + frame->co_filename + pidData->offsets.String_data); + if (frame->co_name) + bpf_probe_read_str(&symbol->name, + sizeof(symbol->name), + frame->co_name + pidData->offsets.String_data); + return true; +} + +struct bpf_elf_map SEC("maps") pidmap = { + .type = BPF_MAP_TYPE_HASH, + .size_key = sizeof(int), + .size_value = sizeof(PidData), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") eventmap = { + .type = BPF_MAP_TYPE_HASH, + .size_key = sizeof(int), + .size_value = sizeof(Event), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") symbolmap = { + .type = BPF_MAP_TYPE_HASH, + .size_key = sizeof(Symbol), + .size_value = sizeof(int), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") statsmap = { + .type = BPF_MAP_TYPE_ARRAY, + .size_key = sizeof(Stats), + .size_value = sizeof(int), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") perfmap = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .size_key = sizeof(int), + .size_value = sizeof(int), + .max_elem = 32, +}; + +struct bpf_elf_map SEC("maps") stackmap = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .size_key = sizeof(int), + .size_value = sizeof(long long) * 127, + .max_elem = 1000, +}; + +static inline __attribute__((__always_inline__)) int __on_event(struct pt_regs *ctx) +{ + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + pid_t pid = (pid_t)(pid_tgid >> 32); + PidData* pidData = bpf_map_lookup_elem(&pidmap, &pid); + if (!pidData) + return 0; + + int zero = 0; + Event* event = bpf_map_lookup_elem(&eventmap, &zero); + if (!event) + return 0; + + event->pid = pid; + + event->tid = (pid_t)pid_tgid; + bpf_get_current_comm(&event->comm, sizeof(event->comm)); + + event->user_stack_id = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK); + event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0); + + void* thread_state_current = (void*)0; + bpf_probe_read(&thread_state_current, + sizeof(thread_state_current), + (void*)(long)pidData->current_state_addr); + + struct task_struct* task = (struct task_struct*)bpf_get_current_task(); + void* tls_base = (void*)task; + + void* thread_state = pidData->use_tls ? get_thread_state(tls_base, pidData) + : thread_state_current; + event->thread_current = thread_state == thread_state_current; + + if (pidData->use_tls) { + uint64_t pthread_created; + uint64_t pthread_self; + bpf_probe_read(&pthread_self, sizeof(pthread_self), tls_base + 0x10); + + bpf_probe_read(&pthread_created, + sizeof(pthread_created), + thread_state + pidData->offsets.PyThreadState_thread); + event->pthread_match = pthread_created == pthread_self; + } else { + event->pthread_match = 1; + } + + if (event->pthread_match || !pidData->use_tls) { + void* frame_ptr; + FrameData frame; + Symbol sym = {}; + int cur_cpu = bpf_get_smp_processor_id(); + + bpf_probe_read(&frame_ptr, + sizeof(frame_ptr), + thread_state + pidData->offsets.PyThreadState_frame); + + int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); + if (symbol_counter == NULL) + return 0; +#pragma unroll + /* Unwind python stack */ + for (int i = 0; i < STACK_MAX_LEN; ++i) { + if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { + int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; + int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); + if (!symbol_id) { + bpf_map_update_elem(&symbolmap, &sym, &zero, 0); + symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); + if (!symbol_id) + return 0; + } + if (*symbol_id == new_symbol_id) + (*symbol_counter)++; + event->stack[i] = *symbol_id; + event->stack_len = i + 1; + frame_ptr = frame.f_back; + } + } + event->stack_complete = frame_ptr == NULL; + } else { + event->stack_complete = 1; + } + + Stats* stats = bpf_map_lookup_elem(&statsmap, &zero); + if (stats) + stats->success++; + + event->has_meta = 0; + bpf_perf_event_output(ctx, &perfmap, 0, event, offsetof(Event, metadata)); + return 0; +} + +SEC("raw_tracepoint/kfree_skb") +int on_event(struct pt_regs* ctx) +{ + int i, ret = 0; + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ret |= __on_event(ctx); + return ret; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/pyperf100.c b/tools/testing/selftests/bpf/progs/pyperf100.c new file mode 100644 index 000000000000..29786325db54 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf100.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 100 +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c new file mode 100644 index 000000000000..c39f559d3100 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf180.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 180 +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf50.c b/tools/testing/selftests/bpf/progs/pyperf50.c new file mode 100644 index 000000000000..ef7ce340a292 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf50.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 50 +#include "pyperf.h" From 5d839021675a2e1b76653189cc6a90cfd8e30a69 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 May 2019 20:17:05 -0700 Subject: [PATCH 05/83] bpf: cleanup explored_states clean up explored_states to prep for introduction of hashtable No functional changes. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3f8b5443cc67..736b5a0d4848 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5437,6 +5437,17 @@ enum { }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) +static struct bpf_verifier_state_list **explored_state( + struct bpf_verifier_env *env, + int idx) +{ + return &env->explored_states[idx]; +} + +static void init_explored_state(struct bpf_verifier_env *env, int idx) +{ + env->explored_states[idx] = STATE_LIST_MARK; +} /* t, w, e - match pseudo-code above: * t - index of current instruction @@ -5462,7 +5473,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) if (e == BRANCH) /* mark branch target for state pruning */ - env->explored_states[w] = STATE_LIST_MARK; + init_explored_state(env, w); if (insn_state[w] == 0) { /* tree-edge */ @@ -5530,9 +5541,9 @@ peek_stack: else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) - env->explored_states[t + 1] = STATE_LIST_MARK; + init_explored_state(env, t + 1); if (insns[t].src_reg == BPF_PSEUDO_CALL) { - env->explored_states[t] = STATE_LIST_MARK; + init_explored_state(env, t); ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); if (ret == 1) goto peek_stack; @@ -5555,10 +5566,10 @@ peek_stack: * after every call and jump */ if (t + 1 < insn_cnt) - env->explored_states[t + 1] = STATE_LIST_MARK; + init_explored_state(env, t + 1); } else { /* conditional jump with two edges */ - env->explored_states[t] = STATE_LIST_MARK; + init_explored_state(env, t); ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; @@ -6006,7 +6017,7 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state_list *sl; int i; - sl = env->explored_states[insn]; + sl = *explored_state(env, insn); if (!sl) return; @@ -6365,7 +6376,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; - pprev = &env->explored_states[insn_idx]; + pprev = explored_state(env, insn_idx); sl = *pprev; if (!sl) @@ -6452,8 +6463,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) kfree(new_sl); return err; } - new_sl->next = env->explored_states[insn_idx]; - env->explored_states[insn_idx] = new_sl; + new_sl->next = *explored_state(env, insn_idx); + *explored_state(env, insn_idx) = new_sl; /* connect new state to parentage chain. Current frame needs all * registers connected. Only r6 - r9 of the callers are alive (pushed * to the stack implicitly by JITs) so in callers' frames connect just From a8f500af0ccffc3d2aaf9018537981cb173865a1 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 May 2019 20:17:06 -0700 Subject: [PATCH 06/83] bpf: split explored_states split explored_states into prune_point boolean mark and link list of explored states. This removes STATE_LIST_MARK hack and allows marks to be separate from states. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 31 +++++++++++++------------------ 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1305ccbd8fe6..02bba09a0ea1 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -233,6 +233,7 @@ struct bpf_insn_aux_data { int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ u8 alu_state; /* used in combination with alu_limit */ + bool prune_point; unsigned int orig_idx; /* original instruction index */ }; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 736b5a0d4848..6a3e69ba891e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5436,7 +5436,6 @@ enum { BRANCH = 2, }; -#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static struct bpf_verifier_state_list **explored_state( struct bpf_verifier_env *env, int idx) @@ -5446,7 +5445,7 @@ static struct bpf_verifier_state_list **explored_state( static void init_explored_state(struct bpf_verifier_env *env, int idx) { - env->explored_states[idx] = STATE_LIST_MARK; + env->insn_aux_data[idx].prune_point = true; } /* t, w, e - match pseudo-code above: @@ -6018,10 +6017,7 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn, int i; sl = *explored_state(env, insn); - if (!sl) - return; - - while (sl != STATE_LIST_MARK) { + while (sl) { if (sl->state.curframe != cur->curframe) goto next; for (i = 0; i <= cur->curframe; i++) @@ -6376,18 +6372,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; - pprev = explored_state(env, insn_idx); - sl = *pprev; - - if (!sl) + if (!env->insn_aux_data[insn_idx].prune_point) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; + pprev = explored_state(env, insn_idx); + sl = *pprev; + clean_live_states(env, insn_idx, cur); - while (sl != STATE_LIST_MARK) { + while (sl) { if (states_equal(env, &sl->state, cur)) { sl->hit_cnt++; /* reached equivalent register/stack state, @@ -8145,13 +8141,12 @@ static void free_states(struct bpf_verifier_env *env) for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; - if (sl) - while (sl != STATE_LIST_MARK) { - sln = sl->next; - free_verifier_state(&sl->state, false); - kfree(sl); - sl = sln; - } + while (sl) { + sln = sl->next; + free_verifier_state(&sl->state, false); + kfree(sl); + sl = sln; + } } kvfree(env->explored_states); From dc2a4ebc0b44a212fcf72242210e56aa17e7317b Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 May 2019 20:17:07 -0700 Subject: [PATCH 07/83] bpf: convert explored_states to hash table All prune points inside a callee bpf function most likely will have different callsites. For example, if function foo() is called from two callsites the half of explored states in all prune points in foo() will be useless for subsequent walking of one of those callsites. Fortunately explored_states pruning heuristics keeps the number of states per prune point small, but walking these states is still a waste of cpu time when the callsite of the current state is different from the callsite of the explored state. To improve pruning logic convert explored_states into hash table and use simple insn_idx ^ callsite hash to select hash bucket. This optimization has no effect on programs without bpf2bpf calls and drastically improves programs with calls. In the later case it reduces total memory consumption in 1M scale tests by almost 3 times (peak_states drops from 5752 to 2016). Care should be taken when comparing the states for equivalency. Since the same hash bucket can now contain states with different indices the insn_idx has to be part of verifier_state and compared. Different hash table sizes and different hash functions were explored, but the results were not significantly better vs this patch. They can be improved in the future. Hit/miss heuristic is not counting index miscompare as a miss. Otherwise verifier stats become unstable when experimenting with different hash functions. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 23 ++++++++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 02bba09a0ea1..405b502283c5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -187,6 +187,7 @@ struct bpf_func_state { struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; + u32 insn_idx; u32 curframe; u32 active_spin_lock; bool speculative; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6a3e69ba891e..550091c7a46a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5436,11 +5436,19 @@ enum { BRANCH = 2, }; +static u32 state_htab_size(struct bpf_verifier_env *env) +{ + return env->prog->len; +} + static struct bpf_verifier_state_list **explored_state( struct bpf_verifier_env *env, int idx) { - return &env->explored_states[idx]; + struct bpf_verifier_state *cur = env->cur_state; + struct bpf_func_state *state = cur->frame[cur->curframe]; + + return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; } static void init_explored_state(struct bpf_verifier_env *env, int idx) @@ -6018,7 +6026,8 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn, sl = *explored_state(env, insn); while (sl) { - if (sl->state.curframe != cur->curframe) + if (sl->state.insn_idx != insn || + sl->state.curframe != cur->curframe) goto next; for (i = 0; i <= cur->curframe; i++) if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) @@ -6384,6 +6393,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) clean_live_states(env, insn_idx, cur); while (sl) { + states_cnt++; + if (sl->state.insn_idx != insn_idx) + goto next; if (states_equal(env, &sl->state, cur)) { sl->hit_cnt++; /* reached equivalent register/stack state, @@ -6401,7 +6413,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return err; return 1; } - states_cnt++; sl->miss_cnt++; /* heuristic to determine whether this state is beneficial * to keep checking from state equivalence point of view. @@ -6428,6 +6439,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) sl = *pprev; continue; } +next: pprev = &sl->next; sl = *pprev; } @@ -6459,6 +6471,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) kfree(new_sl); return err; } + new->insn_idx = insn_idx; new_sl->next = *explored_state(env, insn_idx); *explored_state(env, insn_idx) = new_sl; /* connect new state to parentage chain. Current frame needs all @@ -8138,7 +8151,7 @@ static void free_states(struct bpf_verifier_env *env) if (!env->explored_states) return; - for (i = 0; i < env->prog->len; i++) { + for (i = 0; i < state_htab_size(env); i++) { sl = env->explored_states[i]; while (sl) { @@ -8246,7 +8259,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, goto skip_full_check; } - env->explored_states = kvcalloc(env->prog->len, + env->explored_states = kvcalloc(state_htab_size(env), sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; From 37739d1b4fe744da9c2f342224000ae7fbb5c063 Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Thu, 23 May 2019 14:53:54 +0200 Subject: [PATCH 08/83] selftests: bpf: Move bpf_printk to bpf_helpers.h bpf_printk is a macro which is commonly used to print out debug messages in BPF programs and it was copied in many selftests and samples. Since all of them include bpf_helpers.h, this change moves the macro there. Signed-off-by: Michal Rostecki Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/bpf_helpers.h | 8 ++++++++ tools/testing/selftests/bpf/progs/sockmap_parse_prog.c | 7 ------- tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c | 7 ------- tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c | 7 ------- tools/testing/selftests/bpf/progs/test_lwt_seg6local.c | 7 ------- tools/testing/selftests/bpf/progs/test_xdp_noinline.c | 7 ------- tools/testing/selftests/bpf/test_sockmap_kern.h | 7 ------- 7 files changed, 8 insertions(+), 42 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 5f6f9e7aba2a..4b27840b8109 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -8,6 +8,14 @@ */ #define SEC(NAME) __attribute__((section(NAME), used)) +/* helper macro to print out debug messages */ +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + /* helper functions called from eBPF programs written in C */ static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) BPF_FUNC_map_lookup_elem; diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c index 0f92858f6226..ed3e4a551c57 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c @@ -5,13 +5,6 @@ int _version SEC("version") = 1; -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sk_skb1") int bpf_prog1(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c index 12a7b5c82ed6..65fbfdb6cd3a 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c @@ -5,13 +5,6 @@ int _version SEC("version") = 1; -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sk_msg1") int bpf_prog1(struct sk_msg_md *msg) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index 2ce7634a4012..bdc22be46f2e 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -5,13 +5,6 @@ int _version SEC("version") = 1; -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - struct bpf_map_def SEC("maps") sock_map_rx = { .type = BPF_MAP_TYPE_SOCKMAP, .key_size = sizeof(int), diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c index 0575751bc1bc..7c7cb3177463 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c @@ -6,13 +6,6 @@ #include "bpf_helpers.h" #include "bpf_endian.h" -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index 5e4aac74f9d0..4fe6aaad22a4 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -15,13 +15,6 @@ #include #include "bpf_helpers.h" -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - static __u32 rol32(__u32 word, unsigned int shift) { return (word << shift) | (word >> ((-shift) & 31)); diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.h b/tools/testing/selftests/bpf/test_sockmap_kern.h index e7639f66a941..4e7d3da21357 100644 --- a/tools/testing/selftests/bpf/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/test_sockmap_kern.h @@ -28,13 +28,6 @@ * are established and verdicts are decided. */ -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - struct bpf_map_def SEC("maps") sock_map = { .type = TEST_MAP_TYPE, .key_size = sizeof(int), From c87f60a77db2c01770ef9c6ea792a1d39c3594ff Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Thu, 23 May 2019 14:53:55 +0200 Subject: [PATCH 09/83] samples: bpf: Do not define bpf_printk macro The bpf_printk macro was moved to bpf_helpers.h which is included in all example programs. Signed-off-by: Michal Rostecki Signed-off-by: Alexei Starovoitov --- samples/bpf/hbm_kern.h | 11 ++--------- samples/bpf/tcp_basertt_kern.c | 7 ------- samples/bpf/tcp_bufs_kern.c | 7 ------- samples/bpf/tcp_clamp_kern.c | 7 ------- samples/bpf/tcp_cong_kern.c | 7 ------- samples/bpf/tcp_iw_kern.c | 7 ------- samples/bpf/tcp_rwnd_kern.c | 7 ------- samples/bpf/tcp_synrto_kern.c | 7 ------- samples/bpf/tcp_tos_reflect_kern.c | 7 ------- samples/bpf/xdp_sample_pkts_kern.c | 7 ------- 10 files changed, 2 insertions(+), 72 deletions(-) diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h index c5635d924193..41384be233b9 100644 --- a/samples/bpf/hbm_kern.h +++ b/samples/bpf/hbm_kern.h @@ -30,15 +30,8 @@ #define ALLOW_PKT 1 #define TCP_ECN_OK 1 -#define HBM_DEBUG 0 // Set to 1 to enable debugging -#if HBM_DEBUG -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) -#else +#ifndef HBM_DEBUG // Define HBM_DEBUG to enable debugging +#undef bpf_printk #define bpf_printk(fmt, ...) #endif diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c index 6ef1625e8b2c..9dba48c2b920 100644 --- a/samples/bpf/tcp_basertt_kern.c +++ b/samples/bpf/tcp_basertt_kern.c @@ -21,13 +21,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_basertt(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_bufs_kern.c b/samples/bpf/tcp_bufs_kern.c index e03e204739fa..af8486f33771 100644 --- a/samples/bpf/tcp_bufs_kern.c +++ b/samples/bpf/tcp_bufs_kern.c @@ -22,13 +22,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_bufs(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_clamp_kern.c b/samples/bpf/tcp_clamp_kern.c index a0dc2d254aca..26c0fd091f3c 100644 --- a/samples/bpf/tcp_clamp_kern.c +++ b/samples/bpf/tcp_clamp_kern.c @@ -22,13 +22,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_clamp(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_cong_kern.c b/samples/bpf/tcp_cong_kern.c index 4fd3ca979a06..6d4dc4c7dd1e 100644 --- a/samples/bpf/tcp_cong_kern.c +++ b/samples/bpf/tcp_cong_kern.c @@ -21,13 +21,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_cong(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_iw_kern.c b/samples/bpf/tcp_iw_kern.c index 9b139ec69560..da61d53378b3 100644 --- a/samples/bpf/tcp_iw_kern.c +++ b/samples/bpf/tcp_iw_kern.c @@ -22,13 +22,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_iw(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_rwnd_kern.c b/samples/bpf/tcp_rwnd_kern.c index cc71ee96e044..d011e38b80d2 100644 --- a/samples/bpf/tcp_rwnd_kern.c +++ b/samples/bpf/tcp_rwnd_kern.c @@ -21,13 +21,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_rwnd(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_synrto_kern.c b/samples/bpf/tcp_synrto_kern.c index ca87ed34f896..720d1950322d 100644 --- a/samples/bpf/tcp_synrto_kern.c +++ b/samples/bpf/tcp_synrto_kern.c @@ -21,13 +21,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_synrto(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/tcp_tos_reflect_kern.c b/samples/bpf/tcp_tos_reflect_kern.c index de788be6f862..369faca70a15 100644 --- a/samples/bpf/tcp_tos_reflect_kern.c +++ b/samples/bpf/tcp_tos_reflect_kern.c @@ -20,13 +20,6 @@ #define DEBUG 1 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sockops") int bpf_basertt(struct bpf_sock_ops *skops) { diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c index f7ca8b850978..6c7c7e0aaeda 100644 --- a/samples/bpf/xdp_sample_pkts_kern.c +++ b/samples/bpf/xdp_sample_pkts_kern.c @@ -7,13 +7,6 @@ #define SAMPLE_SIZE 64ul #define MAX_CPUS 128 -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - struct bpf_map_def SEC("maps") my_map = { .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, .key_size = sizeof(int), From 1d7a08b3bdaec1e25ba7979ff598272b7e34318f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:58:56 -0700 Subject: [PATCH 10/83] libbpf: ensure libbpf.h is included along libbpf_internal.h libbpf_internal.h expects a bunch of stuff defined in libbpf.h to be defined. This patch makes sure that libbpf.h is always included. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf_internal.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f3025b4d90e1..850f7bdec5cb 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -9,6 +9,8 @@ #ifndef __LIBBPF_LIBBPF_INTERNAL_H #define __LIBBPF_LIBBPF_INTERNAL_H +#include "libbpf.h" + #define BTF_INFO_ENC(kind, kind_flag, vlen) \ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) #define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type) From e6c64855fd7ad565d87b2bd617f4a50d3bdad82f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:58:57 -0700 Subject: [PATCH 11/83] libbpf: add btf__parse_elf API to load .BTF and .BTF.ext Loading BTF and BTF.ext from ELF file is a common need. Instead of requiring every user to re-implement it, let's provide this API from libbpf itself. It's mostly copy/paste from `bpftool btf dump` implementation, which will be switched to libbpf's version in next patch. btf__parse_elf allows to load BTF and optionally BTF.ext. This is also useful for tests that need to load/work with BTF, loaded from test ELF files. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/btf.c | 128 +++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/btf.h | 2 + tools/lib/bpf/libbpf.map | 5 ++ 3 files changed, 135 insertions(+) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 03348c4d6bd4..6139550810a1 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -4,10 +4,12 @@ #include #include #include +#include #include #include #include #include +#include #include "btf.h" #include "bpf.h" #include "libbpf.h" @@ -417,6 +419,132 @@ done: return btf; } +static bool btf_check_endianness(const GElf_Ehdr *ehdr) +{ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif +} + +struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) +{ + Elf_Data *btf_data = NULL, *btf_ext_data = NULL; + int err = 0, fd = -1, idx = 0; + struct btf *btf = NULL; + Elf_Scn *scn = NULL; + Elf *elf = NULL; + GElf_Ehdr ehdr; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warning("failed to init libelf for %s\n", path); + return ERR_PTR(-LIBBPF_ERRNO__LIBELF); + } + + fd = open(path, O_RDONLY); + if (fd < 0) { + err = -errno; + pr_warning("failed to open %s: %s\n", path, strerror(errno)); + return ERR_PTR(err); + } + + err = -LIBBPF_ERRNO__FORMAT; + + elf = elf_begin(fd, ELF_C_READ, NULL); + if (!elf) { + pr_warning("failed to open %s as ELF file\n", path); + goto done; + } + if (!gelf_getehdr(elf, &ehdr)) { + pr_warning("failed to get EHDR from %s\n", path); + goto done; + } + if (!btf_check_endianness(&ehdr)) { + pr_warning("non-native ELF endianness is not supported\n"); + goto done; + } + if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { + pr_warning("failed to get e_shstrndx from %s\n", path); + goto done; + } + + while ((scn = elf_nextscn(elf, scn)) != NULL) { + GElf_Shdr sh; + char *name; + + idx++; + if (gelf_getshdr(scn, &sh) != &sh) { + pr_warning("failed to get section(%d) header from %s\n", + idx, path); + goto done; + } + name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); + if (!name) { + pr_warning("failed to get section(%d) name from %s\n", + idx, path); + goto done; + } + if (strcmp(name, BTF_ELF_SEC) == 0) { + btf_data = elf_getdata(scn, 0); + if (!btf_data) { + pr_warning("failed to get section(%d, %s) data from %s\n", + idx, name, path); + goto done; + } + continue; + } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { + btf_ext_data = elf_getdata(scn, 0); + if (!btf_ext_data) { + pr_warning("failed to get section(%d, %s) data from %s\n", + idx, name, path); + goto done; + } + continue; + } + } + + err = 0; + + if (!btf_data) { + err = -ENOENT; + goto done; + } + btf = btf__new(btf_data->d_buf, btf_data->d_size); + if (IS_ERR(btf)) + goto done; + + if (btf_ext && btf_ext_data) { + *btf_ext = btf_ext__new(btf_ext_data->d_buf, + btf_ext_data->d_size); + if (IS_ERR(*btf_ext)) + goto done; + } else if (btf_ext) { + *btf_ext = NULL; + } +done: + if (elf) + elf_end(elf); + close(fd); + + if (err) + return ERR_PTR(err); + /* + * btf is always parsed before btf_ext, so no need to clean up + * btf_ext, if btf loading failed + */ + if (IS_ERR(btf)) + return btf; + if (btf_ext && IS_ERR(*btf_ext)) { + btf__free(btf); + err = PTR_ERR(*btf_ext); + return ERR_PTR(err); + } + return btf; +} + static int compare_vsi_off(const void *_a, const void *_b) { const struct btf_var_secinfo *a = _a; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index c7b399e81fce..bded210df9e8 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -59,6 +59,8 @@ struct btf_ext_header { LIBBPF_API void btf__free(struct btf *btf); LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); +LIBBPF_API struct btf *btf__parse_elf(const char *path, + struct btf_ext **btf_ext); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 673001787cba..6ea5ce19b9e0 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -164,3 +164,8 @@ LIBBPF_0.0.3 { bpf_map_freeze; btf__finalize_data; } LIBBPF_0.0.2; + +LIBBPF_0.0.4 { + global: + btf__parse_elf; +} LIBBPF_0.0.3; From 58650cc47382af7d87282f53a5b508ac4bf734af Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:58:58 -0700 Subject: [PATCH 12/83] bpftool: use libbpf's btf__parse_elf API Use btf__parse_elf() API, provided by libbpf, instead of implementing ELF parsing by itself. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/btf.c | 117 +++------------------------------------- 1 file changed, 8 insertions(+), 109 deletions(-) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 7317438ecd9e..a22ef6587ebe 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -8,8 +8,8 @@ #include #include #include -#include #include +#include #include #include "btf.h" @@ -340,112 +340,6 @@ static int dump_btf_raw(const struct btf *btf, return 0; } -static bool check_btf_endianness(GElf_Ehdr *ehdr) -{ - static unsigned int const endian = 1; - - switch (ehdr->e_ident[EI_DATA]) { - case ELFDATA2LSB: - return *(unsigned char const *)&endian == 1; - case ELFDATA2MSB: - return *(unsigned char const *)&endian == 0; - default: - return 0; - } -} - -static int btf_load_from_elf(const char *path, struct btf **btf) -{ - int err = -1, fd = -1, idx = 0; - Elf_Data *btf_data = NULL; - Elf_Scn *scn = NULL; - Elf *elf = NULL; - GElf_Ehdr ehdr; - - if (elf_version(EV_CURRENT) == EV_NONE) { - p_err("failed to init libelf for %s", path); - return -1; - } - - fd = open(path, O_RDONLY); - if (fd < 0) { - p_err("failed to open %s: %s", path, strerror(errno)); - return -1; - } - - elf = elf_begin(fd, ELF_C_READ, NULL); - if (!elf) { - p_err("failed to open %s as ELF file", path); - goto done; - } - if (!gelf_getehdr(elf, &ehdr)) { - p_err("failed to get EHDR from %s", path); - goto done; - } - if (!check_btf_endianness(&ehdr)) { - p_err("non-native ELF endianness is not supported"); - goto done; - } - if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { - p_err("failed to get e_shstrndx from %s\n", path); - goto done; - } - - while ((scn = elf_nextscn(elf, scn)) != NULL) { - GElf_Shdr sh; - char *name; - - idx++; - if (gelf_getshdr(scn, &sh) != &sh) { - p_err("failed to get section(%d) header from %s", - idx, path); - goto done; - } - name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); - if (!name) { - p_err("failed to get section(%d) name from %s", - idx, path); - goto done; - } - if (strcmp(name, BTF_ELF_SEC) == 0) { - btf_data = elf_getdata(scn, 0); - if (!btf_data) { - p_err("failed to get section(%d, %s) data from %s", - idx, name, path); - goto done; - } - break; - } - } - - if (!btf_data) { - p_err("%s ELF section not found in %s", BTF_ELF_SEC, path); - goto done; - } - - *btf = btf__new(btf_data->d_buf, btf_data->d_size); - if (IS_ERR(*btf)) { - err = PTR_ERR(*btf); - *btf = NULL; - p_err("failed to load BTF data from %s: %s", - path, strerror(err)); - goto done; - } - - err = 0; -done: - if (err) { - if (*btf) { - btf__free(*btf); - *btf = NULL; - } - } - if (elf) - elf_end(elf); - close(fd); - return err; -} - static int do_dump(int argc, char **argv) { struct btf *btf = NULL; @@ -522,9 +416,14 @@ static int do_dump(int argc, char **argv) } NEXT_ARG(); } else if (is_prefix(src, "file")) { - err = btf_load_from_elf(*argv, &btf); - if (err) + btf = btf__parse_elf(*argv, NULL); + if (IS_ERR(btf)) { + err = PTR_ERR(btf); + btf = NULL; + p_err("failed to load BTF from %s: %s", + *argv, strerror(err)); goto done; + } NEXT_ARG(); } else { err = -1; From 9db324314d29442c8bb8212dd40a3bb26f86c1c9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:58:59 -0700 Subject: [PATCH 13/83] selftests/bpf: use btf__parse_elf to check presence of BTF/BTF.ext Switch test_btf.c to rely on btf__parse_elf to check presence of BTF and BTF.ext data, instead of implementing its own ELF parsing. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_btf.c | 71 +++++--------------------- 1 file changed, 13 insertions(+), 58 deletions(-) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 42c1ce988945..289daf54dec4 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -4025,62 +4025,13 @@ static struct btf_file_test file_tests[] = { }, }; -static int file_has_btf_elf(const char *fn, bool *has_btf_ext) -{ - Elf_Scn *scn = NULL; - GElf_Ehdr ehdr; - int ret = 0; - int elf_fd; - Elf *elf; - - if (CHECK(elf_version(EV_CURRENT) == EV_NONE, - "elf_version(EV_CURRENT) == EV_NONE")) - return -1; - - elf_fd = open(fn, O_RDONLY); - if (CHECK(elf_fd == -1, "open(%s): errno:%d", fn, errno)) - return -1; - - elf = elf_begin(elf_fd, ELF_C_READ, NULL); - if (CHECK(!elf, "elf_begin(%s): %s", fn, elf_errmsg(elf_errno()))) { - ret = -1; - goto done; - } - - if (CHECK(!gelf_getehdr(elf, &ehdr), "!gelf_getehdr(%s)", fn)) { - ret = -1; - goto done; - } - - while ((scn = elf_nextscn(elf, scn))) { - const char *sh_name; - GElf_Shdr sh; - - if (CHECK(gelf_getshdr(scn, &sh) != &sh, - "file:%s gelf_getshdr != &sh", fn)) { - ret = -1; - goto done; - } - - sh_name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); - if (!strcmp(sh_name, BTF_ELF_SEC)) - ret = 1; - if (!strcmp(sh_name, BTF_EXT_ELF_SEC)) - *has_btf_ext = true; - } - -done: - close(elf_fd); - elf_end(elf); - return ret; -} - static int do_test_file(unsigned int test_num) { const struct btf_file_test *test = &file_tests[test_num - 1]; const char *expected_fnames[] = {"_dummy_tracepoint", "test_long_fname_1", "test_long_fname_2"}; + struct btf_ext *btf_ext = NULL; struct bpf_prog_info info = {}; struct bpf_object *obj = NULL; struct bpf_func_info *finfo; @@ -4095,15 +4046,19 @@ static int do_test_file(unsigned int test_num) fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num, test->file); - err = file_has_btf_elf(test->file, &has_btf_ext); - if (err == -1) - return err; - - if (err == 0) { - fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC); - skip_cnt++; - return 0; + btf = btf__parse_elf(test->file, &btf_ext); + if (IS_ERR(btf)) { + if (PTR_ERR(btf) == -ENOENT) { + fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC); + skip_cnt++; + return 0; + } + return PTR_ERR(btf); } + btf__free(btf); + + has_btf_ext = btf_ext != NULL; + btf_ext__free(btf_ext); obj = bpf_object__open(test->file); if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj))) From e3b924224028c6fc31545e3812eecbe2ddbf35f6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:00 -0700 Subject: [PATCH 14/83] libbpf: add resizable non-thread safe internal hashmap There is a need for fast point lookups inside libbpf for multiple use cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in BTF for upcoming BPF CO-RE relocation support, etc). This patch implements simple resizable non-thread safe hashmap using single linked list chains. Four different insert strategies are supported: - HASHMAP_ADD - only add key/value if key doesn't exist yet; - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise, update value; - HASHMAP_UPDATE - update value, if key already exists; otherwise, do nothing and return -ENOENT; - HASHMAP_APPEND - always add key/value pair, even if key already exists. This turns hashmap into a multimap by allowing multiple values to be associated with the same key. Most useful read API for such hashmap is hashmap__for_each_key_entry() iteration. If hashmap__find() is still used, it will return last inserted key/value entry (first in a bucket chain). For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so that calling code can handle proper memory management, if necessary. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/Build | 2 +- tools/lib/bpf/hashmap.c | 229 ++++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/hashmap.h | 173 ++++++++++++++++++++++++++++++ 3 files changed, 403 insertions(+), 1 deletion(-) create mode 100644 tools/lib/bpf/hashmap.c create mode 100644 tools/lib/bpf/hashmap.h diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index ee9d5362f35b..dcf0d36598e0 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c new file mode 100644 index 000000000000..6122272943e6 --- /dev/null +++ b/tools/lib/bpf/hashmap.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Generic non-thread safe hash map implementation. + * + * Copyright (c) 2019 Facebook + */ +#include +#include +#include +#include +#include +#include "hashmap.h" + +/* start with 4 buckets */ +#define HASHMAP_MIN_CAP_BITS 2 + +static void hashmap_add_entry(struct hashmap_entry **pprev, + struct hashmap_entry *entry) +{ + entry->next = *pprev; + *pprev = entry; +} + +static void hashmap_del_entry(struct hashmap_entry **pprev, + struct hashmap_entry *entry) +{ + *pprev = entry->next; + entry->next = NULL; +} + +void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, void *ctx) +{ + map->hash_fn = hash_fn; + map->equal_fn = equal_fn; + map->ctx = ctx; + + map->buckets = NULL; + map->cap = 0; + map->cap_bits = 0; + map->sz = 0; +} + +struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, + void *ctx) +{ + struct hashmap *map = malloc(sizeof(struct hashmap)); + + if (!map) + return ERR_PTR(-ENOMEM); + hashmap__init(map, hash_fn, equal_fn, ctx); + return map; +} + +void hashmap__clear(struct hashmap *map) +{ + free(map->buckets); + map->cap = map->cap_bits = map->sz = 0; +} + +void hashmap__free(struct hashmap *map) +{ + if (!map) + return; + + hashmap__clear(map); + free(map); +} + +size_t hashmap__size(const struct hashmap *map) +{ + return map->sz; +} + +size_t hashmap__capacity(const struct hashmap *map) +{ + return map->cap; +} + +static bool hashmap_needs_to_grow(struct hashmap *map) +{ + /* grow if empty or more than 75% filled */ + return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); +} + +static int hashmap_grow(struct hashmap *map) +{ + struct hashmap_entry **new_buckets; + struct hashmap_entry *cur, *tmp; + size_t new_cap_bits, new_cap; + size_t h; + int bkt; + + new_cap_bits = map->cap_bits + 1; + if (new_cap_bits < HASHMAP_MIN_CAP_BITS) + new_cap_bits = HASHMAP_MIN_CAP_BITS; + + new_cap = 1UL << new_cap_bits; + new_buckets = calloc(new_cap, sizeof(new_buckets[0])); + if (!new_buckets) + return -ENOMEM; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits); + hashmap_add_entry(&new_buckets[h], cur); + } + + map->cap = new_cap; + map->cap_bits = new_cap_bits; + free(map->buckets); + map->buckets = new_buckets; + + return 0; +} + +static bool hashmap_find_entry(const struct hashmap *map, + const void *key, size_t hash, + struct hashmap_entry ***pprev, + struct hashmap_entry **entry) +{ + struct hashmap_entry *cur, **prev_ptr; + + if (!map->buckets) + return false; + + for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; + cur; + prev_ptr = &cur->next, cur = cur->next) { + if (map->equal_fn(cur->key, key, map->ctx)) { + if (pprev) + *pprev = prev_ptr; + *entry = cur; + return true; + } + } + + return false; +} + +int hashmap__insert(struct hashmap *map, const void *key, void *value, + enum hashmap_insert_strategy strategy, + const void **old_key, void **old_value) +{ + struct hashmap_entry *entry; + size_t h; + int err; + + if (old_key) + *old_key = NULL; + if (old_value) + *old_value = NULL; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (strategy != HASHMAP_APPEND && + hashmap_find_entry(map, key, h, NULL, &entry)) { + if (old_key) + *old_key = entry->key; + if (old_value) + *old_value = entry->value; + + if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) { + entry->key = key; + entry->value = value; + return 0; + } else if (strategy == HASHMAP_ADD) { + return -EEXIST; + } + } + + if (strategy == HASHMAP_UPDATE) + return -ENOENT; + + if (hashmap_needs_to_grow(map)) { + err = hashmap_grow(map); + if (err) + return err; + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + } + + entry = malloc(sizeof(struct hashmap_entry)); + if (!entry) + return -ENOMEM; + + entry->key = key; + entry->value = value; + hashmap_add_entry(&map->buckets[h], entry); + map->sz++; + + return 0; +} + +bool hashmap__find(const struct hashmap *map, const void *key, void **value) +{ + struct hashmap_entry *entry; + size_t h; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (!hashmap_find_entry(map, key, h, NULL, &entry)) + return false; + + if (value) + *value = entry->value; + return true; +} + +bool hashmap__delete(struct hashmap *map, const void *key, + const void **old_key, void **old_value) +{ + struct hashmap_entry **pprev, *entry; + size_t h; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (!hashmap_find_entry(map, key, h, &pprev, &entry)) + return false; + + if (old_key) + *old_key = entry->key; + if (old_value) + *old_value = entry->value; + + hashmap_del_entry(pprev, entry); + free(entry); + map->sz--; + + return true; +} + diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h new file mode 100644 index 000000000000..03748a742146 --- /dev/null +++ b/tools/lib/bpf/hashmap.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Generic non-thread safe hash map implementation. + * + * Copyright (c) 2019 Facebook + */ +#ifndef __LIBBPF_HASHMAP_H +#define __LIBBPF_HASHMAP_H + +#include +#include +#include "libbpf_internal.h" + +static inline size_t hash_bits(size_t h, int bits) +{ + /* shuffle bits and return requested number of upper bits */ + return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); +} + +typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); +typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx); + +struct hashmap_entry { + const void *key; + void *value; + struct hashmap_entry *next; +}; + +struct hashmap { + hashmap_hash_fn hash_fn; + hashmap_equal_fn equal_fn; + void *ctx; + + struct hashmap_entry **buckets; + size_t cap; + size_t cap_bits; + size_t sz; +}; + +#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \ + .hash_fn = (hash_fn), \ + .equal_fn = (equal_fn), \ + .ctx = (ctx), \ + .buckets = NULL, \ + .cap = 0, \ + .cap_bits = 0, \ + .sz = 0, \ +} + +void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, void *ctx); +struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, + void *ctx); +void hashmap__clear(struct hashmap *map); +void hashmap__free(struct hashmap *map); + +size_t hashmap__size(const struct hashmap *map); +size_t hashmap__capacity(const struct hashmap *map); + +/* + * Hashmap insertion strategy: + * - HASHMAP_ADD - only add key/value if key doesn't exist yet; + * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise, + * update value; + * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do + * nothing and return -ENOENT; + * - HASHMAP_APPEND - always add key/value pair, even if key already exists. + * This turns hashmap into a multimap by allowing multiple values to be + * associated with the same key. Most useful read API for such hashmap is + * hashmap__for_each_key_entry() iteration. If hashmap__find() is still + * used, it will return last inserted key/value entry (first in a bucket + * chain). + */ +enum hashmap_insert_strategy { + HASHMAP_ADD, + HASHMAP_SET, + HASHMAP_UPDATE, + HASHMAP_APPEND, +}; + +/* + * hashmap__insert() adds key/value entry w/ various semantics, depending on + * provided strategy value. If a given key/value pair replaced already + * existing key/value pair, both old key and old value will be returned + * through old_key and old_value to allow calling code do proper memory + * management. + */ +int hashmap__insert(struct hashmap *map, const void *key, void *value, + enum hashmap_insert_strategy strategy, + const void **old_key, void **old_value); + +static inline int hashmap__add(struct hashmap *map, + const void *key, void *value) +{ + return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL); +} + +static inline int hashmap__set(struct hashmap *map, + const void *key, void *value, + const void **old_key, void **old_value) +{ + return hashmap__insert(map, key, value, HASHMAP_SET, + old_key, old_value); +} + +static inline int hashmap__update(struct hashmap *map, + const void *key, void *value, + const void **old_key, void **old_value) +{ + return hashmap__insert(map, key, value, HASHMAP_UPDATE, + old_key, old_value); +} + +static inline int hashmap__append(struct hashmap *map, + const void *key, void *value) +{ + return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL); +} + +bool hashmap__delete(struct hashmap *map, const void *key, + const void **old_key, void **old_value); + +bool hashmap__find(const struct hashmap *map, const void *key, void **value); + +/* + * hashmap__for_each_entry - iterate over all entries in hashmap + * @map: hashmap to iterate + * @cur: struct hashmap_entry * used as a loop cursor + * @bkt: integer used as a bucket loop cursor + */ +#define hashmap__for_each_entry(map, cur, bkt) \ + for (bkt = 0; bkt < map->cap; bkt++) \ + for (cur = map->buckets[bkt]; cur; cur = cur->next) + +/* + * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe + * against removals + * @map: hashmap to iterate + * @cur: struct hashmap_entry * used as a loop cursor + * @tmp: struct hashmap_entry * used as a temporary next cursor storage + * @bkt: integer used as a bucket loop cursor + */ +#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ + for (bkt = 0; bkt < map->cap; bkt++) \ + for (cur = map->buckets[bkt]; \ + cur && ({tmp = cur->next; true; }); \ + cur = tmp) + +/* + * hashmap__for_each_key_entry - iterate over entries associated with given key + * @map: hashmap to iterate + * @cur: struct hashmap_entry * used as a loop cursor + * @key: key to iterate entries for + */ +#define hashmap__for_each_key_entry(map, cur, _key) \ + for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ + map->cap_bits); \ + map->buckets ? map->buckets[bkt] : NULL; }); \ + cur; \ + cur = cur->next) \ + if (map->equal_fn(cur->key, (_key), map->ctx)) + +#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ + for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ + map->cap_bits); \ + cur = map->buckets ? map->buckets[bkt] : NULL; }); \ + cur && ({ tmp = cur->next; true; }); \ + cur = tmp) \ + if (map->equal_fn(cur->key, (_key), map->ctx)) + +#endif /* __LIBBPF_HASHMAP_H */ From 5d04ec687cf98bc6e8073c646994736a46ed4e24 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:01 -0700 Subject: [PATCH 15/83] selftests/bpf: add tests for libbpf's hashmap Test all APIs for internal hashmap implementation. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/.gitignore | 1 + tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/test_hashmap.c | 382 +++++++++++++++++++++ 3 files changed, 384 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/test_hashmap.c diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index dd5d69529382..138b6c063916 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -35,3 +35,4 @@ test_sysctl alu32 libbpf.pc libbpf.so.* +test_hashmap diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 66f2dca1dee1..ddae06498a00 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,7 +23,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \ - test_netcnt test_tcpnotify_user test_sock_fields test_sysctl + test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) TEST_GEN_FILES = $(BPF_OBJ_FILES) diff --git a/tools/testing/selftests/bpf/test_hashmap.c b/tools/testing/selftests/bpf/test_hashmap.c new file mode 100644 index 000000000000..b64094c981e3 --- /dev/null +++ b/tools/testing/selftests/bpf/test_hashmap.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Tests for libbpf's hashmap. + * + * Copyright (c) 2019 Facebook + */ +#include +#include +#include +#include "hashmap.h" + +#define CHECK(condition, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \ + fprintf(stderr, format); \ + } \ + __ret; \ +}) + +size_t hash_fn(const void *k, void *ctx) +{ + return (long)k; +} + +bool equal_fn(const void *a, const void *b, void *ctx) +{ + return (long)a == (long)b; +} + +static inline size_t next_pow_2(size_t n) +{ + size_t r = 1; + + while (r < n) + r <<= 1; + return r; +} + +static inline size_t exp_cap(size_t sz) +{ + size_t r = next_pow_2(sz); + + if (sz * 4 / 3 > r) + r <<= 1; + return r; +} + +#define ELEM_CNT 62 + +int test_hashmap_generic(void) +{ + struct hashmap_entry *entry, *tmp; + int err, bkt, found_cnt, i; + long long found_msk; + struct hashmap *map; + + fprintf(stderr, "%s: ", __func__); + + map = hashmap__new(hash_fn, equal_fn, NULL); + if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) + return 1; + + for (i = 0; i < ELEM_CNT; i++) { + const void *oldk, *k = (const void *)(long)i; + void *oldv, *v = (void *)(long)(1024 + i); + + err = hashmap__update(map, k, v, &oldk, &oldv); + if (CHECK(err != -ENOENT, "unexpected result: %d\n", err)) + return 1; + + if (i % 2) { + err = hashmap__add(map, k, v); + } else { + err = hashmap__set(map, k, v, &oldk, &oldv); + if (CHECK(oldk != NULL || oldv != NULL, + "unexpected k/v: %p=%p\n", oldk, oldv)) + return 1; + } + + if (CHECK(err, "failed to add k/v %ld = %ld: %d\n", + (long)k, (long)v, err)) + return 1; + + if (CHECK(!hashmap__find(map, k, &oldv), + "failed to find key %ld\n", (long)k)) + return 1; + if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv)) + return 1; + } + + if (CHECK(hashmap__size(map) != ELEM_CNT, + "invalid map size: %zu\n", hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + return 1; + + found_msk = 0; + hashmap__for_each_entry(map, entry, bkt) { + long k = (long)entry->key; + long v = (long)entry->value; + + found_msk |= 1ULL << k; + if (CHECK(v - k != 1024, "invalid k/v pair: %ld = %ld\n", k, v)) + return 1; + } + if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, + "not all keys iterated: %llx\n", found_msk)) + return 1; + + for (i = 0; i < ELEM_CNT; i++) { + const void *oldk, *k = (const void *)(long)i; + void *oldv, *v = (void *)(long)(256 + i); + + err = hashmap__add(map, k, v); + if (CHECK(err != -EEXIST, "unexpected add result: %d\n", err)) + return 1; + + if (i % 2) + err = hashmap__update(map, k, v, &oldk, &oldv); + else + err = hashmap__set(map, k, v, &oldk, &oldv); + + if (CHECK(err, "failed to update k/v %ld = %ld: %d\n", + (long)k, (long)v, err)) + return 1; + if (CHECK(!hashmap__find(map, k, &oldv), + "failed to find key %ld\n", (long)k)) + return 1; + if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv)) + return 1; + } + + if (CHECK(hashmap__size(map) != ELEM_CNT, + "invalid updated map size: %zu\n", hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + return 1; + + found_msk = 0; + hashmap__for_each_entry_safe(map, entry, tmp, bkt) { + long k = (long)entry->key; + long v = (long)entry->value; + + found_msk |= 1ULL << k; + if (CHECK(v - k != 256, + "invalid updated k/v pair: %ld = %ld\n", k, v)) + return 1; + } + if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, + "not all keys iterated after update: %llx\n", found_msk)) + return 1; + + found_cnt = 0; + hashmap__for_each_key_entry(map, entry, (void *)0) { + found_cnt++; + } + if (CHECK(!found_cnt, "didn't find any entries for key 0\n")) + return 1; + + found_msk = 0; + found_cnt = 0; + hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) { + const void *oldk, *k; + void *oldv, *v; + + k = entry->key; + v = entry->value; + + found_cnt++; + found_msk |= 1ULL << (long)k; + + if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), + "failed to delete k/v %ld = %ld\n", + (long)k, (long)v)) + return 1; + if (CHECK(oldk != k || oldv != v, + "invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n", + (long)k, (long)v, (long)oldk, (long)oldv)) + return 1; + if (CHECK(hashmap__delete(map, k, &oldk, &oldv), + "unexpectedly deleted k/v %ld = %ld\n", + (long)oldk, (long)oldv)) + return 1; + } + + if (CHECK(!found_cnt || !found_msk, + "didn't delete any key entries\n")) + return 1; + if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, + "invalid updated map size (already deleted: %d): %zu\n", + found_cnt, hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + return 1; + + hashmap__for_each_entry_safe(map, entry, tmp, bkt) { + const void *oldk, *k; + void *oldv, *v; + + k = entry->key; + v = entry->value; + + found_cnt++; + found_msk |= 1ULL << (long)k; + + if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), + "failed to delete k/v %ld = %ld\n", + (long)k, (long)v)) + return 1; + if (CHECK(oldk != k || oldv != v, + "invalid old k/v: expect %ld = %ld, got %ld = %ld\n", + (long)k, (long)v, (long)oldk, (long)oldv)) + return 1; + if (CHECK(hashmap__delete(map, k, &oldk, &oldv), + "unexpectedly deleted k/v %ld = %ld\n", + (long)k, (long)v)) + return 1; + } + + if (CHECK(found_cnt != ELEM_CNT || found_msk != (1ULL << ELEM_CNT) - 1, + "not all keys were deleted: found_cnt:%d, found_msk:%llx\n", + found_cnt, found_msk)) + return 1; + if (CHECK(hashmap__size(map) != 0, + "invalid updated map size (already deleted: %d): %zu\n", + found_cnt, hashmap__size(map))) + return 1; + + found_cnt = 0; + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "unexpected map entries left: %ld = %ld\n", + (long)entry->key, (long)entry->value); + return 1; + } + + hashmap__free(map); + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "unexpected map entries left: %ld = %ld\n", + (long)entry->key, (long)entry->value); + return 1; + } + + fprintf(stderr, "OK\n"); + return 0; +} + +size_t collision_hash_fn(const void *k, void *ctx) +{ + return 0; +} + +int test_hashmap_multimap(void) +{ + void *k1 = (void *)0, *k2 = (void *)1; + struct hashmap_entry *entry; + struct hashmap *map; + long found_msk; + int err, bkt; + + fprintf(stderr, "%s: ", __func__); + + /* force collisions */ + map = hashmap__new(collision_hash_fn, equal_fn, NULL); + if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) + return 1; + + + /* set up multimap: + * [0] -> 1, 2, 4; + * [1] -> 8, 16, 32; + */ + err = hashmap__append(map, k1, (void *)1); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k1, (void *)2); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k1, (void *)4); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + + err = hashmap__append(map, k2, (void *)8); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k2, (void *)16); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k2, (void *)32); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + + if (CHECK(hashmap__size(map) != 6, + "invalid map size: %zu\n", hashmap__size(map))) + return 1; + + /* verify global iteration still works and sees all values */ + found_msk = 0; + hashmap__for_each_entry(map, entry, bkt) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (1 << 6) - 1, + "not all keys iterated: %lx\n", found_msk)) + return 1; + + /* iterate values for key 1 */ + found_msk = 0; + hashmap__for_each_key_entry(map, entry, k1) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (1 | 2 | 4), + "invalid k1 values: %lx\n", found_msk)) + return 1; + + /* iterate values for key 2 */ + found_msk = 0; + hashmap__for_each_key_entry(map, entry, k2) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (8 | 16 | 32), + "invalid k2 values: %lx\n", found_msk)) + return 1; + + fprintf(stderr, "OK\n"); + return 0; +} + +int test_hashmap_empty() +{ + struct hashmap_entry *entry; + int bkt; + struct hashmap *map; + void *k = (void *)0; + + fprintf(stderr, "%s: ", __func__); + + /* force collisions */ + map = hashmap__new(hash_fn, equal_fn, NULL); + if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) + return 1; + + if (CHECK(hashmap__size(map) != 0, + "invalid map size: %zu\n", hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != 0, + "invalid map capacity: %zu\n", hashmap__capacity(map))) + return 1; + if (CHECK(hashmap__find(map, k, NULL), "unexpected find\n")) + return 1; + if (CHECK(hashmap__delete(map, k, NULL, NULL), "unexpected delete\n")) + return 1; + + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "unexpected iterated entry\n"); + return 1; + } + hashmap__for_each_key_entry(map, entry, k) { + CHECK(false, "unexpected key entry\n"); + return 1; + } + + fprintf(stderr, "OK\n"); + return 0; +} + +int main(int argc, char **argv) +{ + bool failed = false; + + if (test_hashmap_generic()) + failed = true; + if (test_hashmap_multimap()) + failed = true; + if (test_hashmap_empty()) + failed = true; + + return failed; +} From 2fc3fc0bcdcc5429947d1e5ed8f6f0a77ba80997 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:02 -0700 Subject: [PATCH 16/83] libbpf: switch btf_dedup() to hashmap for dedup table Utilize libbpf's hashmap as a multimap fof dedup_table implementation. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/btf.c | 203 +++++++++++++++++++------------------------- 1 file changed, 86 insertions(+), 117 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 6139550810a1..b2478e98c367 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -14,6 +14,7 @@ #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" +#include "hashmap.h" #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) @@ -1293,16 +1294,9 @@ done: return err; } -#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14) -#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31 #define BTF_UNPROCESSED_ID ((__u32)-1) #define BTF_IN_PROGRESS_ID ((__u32)-2) -struct btf_dedup_node { - struct btf_dedup_node *next; - __u32 type_id; -}; - struct btf_dedup { /* .BTF section to be deduped in-place */ struct btf *btf; @@ -1318,7 +1312,7 @@ struct btf_dedup { * candidates, which is fine because we rely on subsequent * btf_xxx_equal() checks to authoritatively verify type equality. */ - struct btf_dedup_node **dedup_table; + struct hashmap *dedup_table; /* Canonical types map */ __u32 *map; /* Hypothetical mapping, used during type graph equivalence checks */ @@ -1343,30 +1337,18 @@ struct btf_str_ptrs { __u32 cap; }; -static inline __u32 hash_combine(__u32 h, __u32 value) +static long hash_combine(long h, long value) { -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define GOLDEN_RATIO_PRIME 0x9e370001UL - return h * 37 + value * GOLDEN_RATIO_PRIME; -#undef GOLDEN_RATIO_PRIME + return h * 31 + value; } -#define for_each_dedup_cand(d, hash, node) \ - for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \ - node; \ - node = node->next) +#define for_each_dedup_cand(d, node, hash) \ + hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) -static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id) +static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) { - struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node)); - int bucket = hash & (d->opts.dedup_table_size - 1); - - if (!node) - return -ENOMEM; - node->type_id = type_id; - node->next = d->dedup_table[bucket]; - d->dedup_table[bucket] = node; - return 0; + return hashmap__append(d->dedup_table, + (void *)hash, (void *)(long)type_id); } static int btf_dedup_hypot_map_add(struct btf_dedup *d, @@ -1395,36 +1377,10 @@ static void btf_dedup_clear_hypot_map(struct btf_dedup *d) d->hypot_cnt = 0; } -static void btf_dedup_table_free(struct btf_dedup *d) -{ - struct btf_dedup_node *head, *tmp; - int i; - - if (!d->dedup_table) - return; - - for (i = 0; i < d->opts.dedup_table_size; i++) { - while (d->dedup_table[i]) { - tmp = d->dedup_table[i]; - d->dedup_table[i] = tmp->next; - free(tmp); - } - - head = d->dedup_table[i]; - while (head) { - tmp = head; - head = head->next; - free(tmp); - } - } - - free(d->dedup_table); - d->dedup_table = NULL; -} - static void btf_dedup_free(struct btf_dedup *d) { - btf_dedup_table_free(d); + hashmap__free(d->dedup_table); + d->dedup_table = NULL; free(d->map); d->map = NULL; @@ -1438,40 +1394,43 @@ static void btf_dedup_free(struct btf_dedup *d) free(d); } -/* Find closest power of two >= to size, capped at 2^max_size_log */ -static __u32 roundup_pow2_max(__u32 size, int max_size_log) +static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) { - int i; - - for (i = 0; i < max_size_log && (1U << i) < size; i++) - ; - return 1U << i; + return (size_t)key; } +static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) +{ + return 0; +} + +static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) +{ + return k1 == k2; +} static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts) { struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); + hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; int i, err = 0; - __u32 sz; if (!d) return ERR_PTR(-ENOMEM); d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; - sz = opts && opts->dedup_table_size ? opts->dedup_table_size - : BTF_DEDUP_TABLE_DEFAULT_SIZE; - sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG); - d->opts.dedup_table_size = sz; + /* dedup_table_size is now used only to force collisions in tests */ + if (opts && opts->dedup_table_size == 1) + hash_fn = btf_dedup_collision_hash_fn; d->btf = btf; d->btf_ext = btf_ext; - d->dedup_table = calloc(d->opts.dedup_table_size, - sizeof(struct btf_dedup_node *)); - if (!d->dedup_table) { - err = -ENOMEM; + d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); + if (IS_ERR(d->dedup_table)) { + err = PTR_ERR(d->dedup_table); + d->dedup_table = NULL; goto done; } @@ -1790,9 +1749,9 @@ done: return err; } -static __u32 btf_hash_common(struct btf_type *t) +static long btf_hash_common(struct btf_type *t) { - __u32 h; + long h; h = hash_combine(0, t->name_off); h = hash_combine(h, t->info); @@ -1808,10 +1767,10 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) } /* Calculate type signature hash of INT. */ -static __u32 btf_hash_int(struct btf_type *t) +static long btf_hash_int(struct btf_type *t) { __u32 info = *(__u32 *)(t + 1); - __u32 h; + long h; h = btf_hash_common(t); h = hash_combine(h, info); @@ -1831,9 +1790,9 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) } /* Calculate type signature hash of ENUM. */ -static __u32 btf_hash_enum(struct btf_type *t) +static long btf_hash_enum(struct btf_type *t) { - __u32 h; + long h; /* don't hash vlen and enum members to support enum fwd resolving */ h = hash_combine(0, t->name_off); @@ -1885,11 +1844,11 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) * as referenced type IDs equivalence is established separately during type * graph equivalence check algorithm. */ -static __u32 btf_hash_struct(struct btf_type *t) +static long btf_hash_struct(struct btf_type *t) { struct btf_member *member = (struct btf_member *)(t + 1); __u32 vlen = BTF_INFO_VLEN(t->info); - __u32 h = btf_hash_common(t); + long h = btf_hash_common(t); int i; for (i = 0; i < vlen; i++) { @@ -1932,10 +1891,10 @@ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) * under assumption that they were already resolved to canonical type IDs and * are not going to change. */ -static __u32 btf_hash_array(struct btf_type *t) +static long btf_hash_array(struct btf_type *t) { struct btf_array *info = (struct btf_array *)(t + 1); - __u32 h = btf_hash_common(t); + long h = btf_hash_common(t); h = hash_combine(h, info->type); h = hash_combine(h, info->index_type); @@ -1986,11 +1945,11 @@ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) * under assumption that they were already resolved to canonical type IDs and * are not going to change. */ -static inline __u32 btf_hash_fnproto(struct btf_type *t) +static long btf_hash_fnproto(struct btf_type *t) { struct btf_param *member = (struct btf_param *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); - __u32 h = btf_hash_common(t); + long h = btf_hash_common(t); int i; for (i = 0; i < vlen; i++) { @@ -2008,7 +1967,7 @@ static inline __u32 btf_hash_fnproto(struct btf_type *t) * This function is called during reference types deduplication to compare * FUNC_PROTO to potential canonical representative. */ -static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) +static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) { struct btf_param *m1, *m2; __u16 vlen; @@ -2034,7 +1993,7 @@ static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ -static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) +static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) { struct btf_param *m1, *m2; __u16 vlen; @@ -2065,11 +2024,12 @@ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) { struct btf_type *t = d->btf->types[type_id]; + struct hashmap_entry *hash_entry; struct btf_type *cand; - struct btf_dedup_node *cand_node; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; - __u32 h; + __u32 cand_id; + long h; switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_CONST: @@ -2088,10 +2048,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_INT: h = btf_hash_int(t); - for_each_dedup_cand(d, h, cand_node) { - cand = d->btf->types[cand_node->type_id]; + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = d->btf->types[cand_id]; if (btf_equal_int(t, cand)) { - new_id = cand_node->type_id; + new_id = cand_id; break; } } @@ -2099,10 +2060,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_ENUM: h = btf_hash_enum(t); - for_each_dedup_cand(d, h, cand_node) { - cand = d->btf->types[cand_node->type_id]; + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = d->btf->types[cand_id]; if (btf_equal_enum(t, cand)) { - new_id = cand_node->type_id; + new_id = cand_id; break; } if (d->opts.dont_resolve_fwds) @@ -2110,21 +2072,22 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) if (btf_compat_enum(t, cand)) { if (btf_is_enum_fwd(t)) { /* resolve fwd to full enum */ - new_id = cand_node->type_id; + new_id = cand_id; break; } /* resolve canonical enum fwd to full enum */ - d->map[cand_node->type_id] = type_id; + d->map[cand_id] = type_id; } } break; case BTF_KIND_FWD: h = btf_hash_common(t); - for_each_dedup_cand(d, h, cand_node) { - cand = d->btf->types[cand_node->type_id]; + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = d->btf->types[cand_id]; if (btf_equal_common(t, cand)) { - new_id = cand_node->type_id; + new_id = cand_id; break; } } @@ -2525,12 +2488,12 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) */ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) { - struct btf_dedup_node *cand_node; struct btf_type *cand_type, *t; + struct hashmap_entry *hash_entry; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u16 kind; - __u32 h; + long h; /* already deduped or is in process of deduping (loop detected) */ if (d->map[type_id] <= BTF_MAX_NR_TYPES) @@ -2543,7 +2506,8 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) return 0; h = btf_hash_struct(t); - for_each_dedup_cand(d, h, cand_node) { + for_each_dedup_cand(d, hash_entry, h) { + __u32 cand_id = (__u32)(long)hash_entry->value; int eq; /* @@ -2556,17 +2520,17 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because * FWD and compatible STRUCT/UNION are considered equivalent. */ - cand_type = d->btf->types[cand_node->type_id]; + cand_type = d->btf->types[cand_id]; if (!btf_shallow_equal_struct(t, cand_type)) continue; btf_dedup_clear_hypot_map(d); - eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id); + eq = btf_dedup_is_equiv(d, type_id, cand_id); if (eq < 0) return eq; if (!eq) continue; - new_id = cand_node->type_id; + new_id = cand_id; btf_dedup_merge_hypot_map(d); break; } @@ -2616,12 +2580,12 @@ static int btf_dedup_struct_types(struct btf_dedup *d) */ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) { - struct btf_dedup_node *cand_node; + struct hashmap_entry *hash_entry; + __u32 new_id = type_id, cand_id; struct btf_type *t, *cand; /* if we don't find equivalent type, then we are representative type */ - __u32 new_id = type_id; int ref_type_id; - __u32 h; + long h; if (d->map[type_id] == BTF_IN_PROGRESS_ID) return -ELOOP; @@ -2644,10 +2608,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) t->type = ref_type_id; h = btf_hash_common(t); - for_each_dedup_cand(d, h, cand_node) { - cand = d->btf->types[cand_node->type_id]; + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = d->btf->types[cand_id]; if (btf_equal_common(t, cand)) { - new_id = cand_node->type_id; + new_id = cand_id; break; } } @@ -2667,10 +2632,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) info->index_type = ref_type_id; h = btf_hash_array(t); - for_each_dedup_cand(d, h, cand_node) { - cand = d->btf->types[cand_node->type_id]; + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = d->btf->types[cand_id]; if (btf_equal_array(t, cand)) { - new_id = cand_node->type_id; + new_id = cand_id; break; } } @@ -2698,10 +2664,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) } h = btf_hash_fnproto(t); - for_each_dedup_cand(d, h, cand_node) { - cand = d->btf->types[cand_node->type_id]; + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = d->btf->types[cand_id]; if (btf_equal_fnproto(t, cand)) { - new_id = cand_node->type_id; + new_id = cand_id; break; } } @@ -2728,7 +2695,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d) if (err < 0) return err; } - btf_dedup_table_free(d); + /* we won't need d->dedup_table anymore */ + hashmap__free(d->dedup_table); + d->dedup_table = NULL; return 0; } From 351131b51c7a27daf0fbdce80b619b8d130374c6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:03 -0700 Subject: [PATCH 17/83] libbpf: add btf_dump API for BTF-to-C conversion BTF contains enough type information to allow generating valid compilable C header w/ correct layout of structs/unions and all the typedef/enum definitions. This patch adds a new "object" - btf_dump to facilitate dumping BTF as valid C. btf_dump__dump_type() is the main API which takes care of dumping out (through user-provided printf-like callback function) C definitions for given type ID and it's required dependencies. This allows for not just dumping out entirety of BTF types, but also selective filtering based on user-provided criterias w/ minimal set of dependent types. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/Build | 4 +- tools/lib/bpf/btf.h | 17 + tools/lib/bpf/btf_dump.c | 1336 ++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.map | 3 + 4 files changed, 1359 insertions(+), 1 deletion(-) create mode 100644 tools/lib/bpf/btf_dump.c diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index dcf0d36598e0..e3962cfbc9a6 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1,3 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ + netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \ + btf_dump.o diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index bded210df9e8..ba4ffa831aa4 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -4,6 +4,7 @@ #ifndef __LIBBPF_BTF_H #define __LIBBPF_BTF_H +#include #include #ifdef __cplusplus @@ -102,6 +103,22 @@ struct btf_dedup_opts { LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts); +struct btf_dump; + +struct btf_dump_opts { + void *ctx; +}; + +typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args); + +LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn); +LIBBPF_API void btf_dump__free(struct btf_dump *d); + +LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c new file mode 100644 index 000000000000..4b22db77e2cc --- /dev/null +++ b/tools/lib/bpf/btf_dump.c @@ -0,0 +1,1336 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C type converter. + * + * Copyright (c) 2019 Facebook + */ + +#include +#include +#include +#include +#include +#include +#include +#include "btf.h" +#include "hashmap.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +#define min(x, y) ((x) < (y) ? (x) : (y)) +#define max(x, y) ((x) < (y) ? (y) : (x)) + +static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t"; +static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1; + +static const char *pfx(int lvl) +{ + return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl]; +} + +enum btf_dump_type_order_state { + NOT_ORDERED, + ORDERING, + ORDERED, +}; + +enum btf_dump_type_emit_state { + NOT_EMITTED, + EMITTING, + EMITTED, +}; + +/* per-type auxiliary state */ +struct btf_dump_type_aux_state { + /* topological sorting state */ + enum btf_dump_type_order_state order_state: 2; + /* emitting state used to determine the need for forward declaration */ + enum btf_dump_type_emit_state emit_state: 2; + /* whether forward declaration was already emitted */ + __u8 fwd_emitted: 1; + /* whether unique non-duplicate name was already assigned */ + __u8 name_resolved: 1; +}; + +struct btf_dump { + const struct btf *btf; + const struct btf_ext *btf_ext; + btf_dump_printf_fn_t printf_fn; + struct btf_dump_opts opts; + + /* per-type auxiliary state */ + struct btf_dump_type_aux_state *type_states; + /* per-type optional cached unique name, must be freed, if present */ + const char **cached_names; + + /* topo-sorted list of dependent type definitions */ + __u32 *emit_queue; + int emit_queue_cap; + int emit_queue_cnt; + + /* + * stack of type declarations (e.g., chain of modifiers, arrays, + * funcs, etc) + */ + __u32 *decl_stack; + int decl_stack_cap; + int decl_stack_cnt; + + /* maps struct/union/enum name to a number of name occurrences */ + struct hashmap *type_names; + /* + * maps typedef identifiers and enum value names to a number of such + * name occurrences + */ + struct hashmap *ident_names; +}; + +static size_t str_hash_fn(const void *key, void *ctx) +{ + const char *s = key; + size_t h = 0; + + while (*s) { + h = h * 31 + *s; + s++; + } + return h; +} + +static bool str_equal_fn(const void *a, const void *b, void *ctx) +{ + return strcmp(a, b) == 0; +} + +static __u16 btf_kind_of(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info); +} + +static __u16 btf_vlen_of(const struct btf_type *t) +{ + return BTF_INFO_VLEN(t->info); +} + +static bool btf_kflag_of(const struct btf_type *t) +{ + return BTF_INFO_KFLAG(t->info); +} + +static const char *btf_name_of(const struct btf_dump *d, __u32 name_off) +{ + return btf__name_by_offset(d->btf, name_off); +} + +static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + d->printf_fn(d->opts.ctx, fmt, args); + va_end(args); +} + +struct btf_dump *btf_dump__new(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn) +{ + struct btf_dump *d; + int err; + + d = calloc(1, sizeof(struct btf_dump)); + if (!d) + return ERR_PTR(-ENOMEM); + + d->btf = btf; + d->btf_ext = btf_ext; + d->printf_fn = printf_fn; + d->opts.ctx = opts ? opts->ctx : NULL; + + d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); + if (IS_ERR(d->type_names)) { + err = PTR_ERR(d->type_names); + d->type_names = NULL; + btf_dump__free(d); + return ERR_PTR(err); + } + d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); + if (IS_ERR(d->ident_names)) { + err = PTR_ERR(d->ident_names); + d->ident_names = NULL; + btf_dump__free(d); + return ERR_PTR(err); + } + + return d; +} + +void btf_dump__free(struct btf_dump *d) +{ + int i, cnt; + + if (!d) + return; + + free(d->type_states); + if (d->cached_names) { + /* any set cached name is owned by us and should be freed */ + for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) { + if (d->cached_names[i]) + free((void *)d->cached_names[i]); + } + } + free(d->cached_names); + free(d->emit_queue); + free(d->decl_stack); + hashmap__free(d->type_names); + hashmap__free(d->ident_names); + + free(d); +} + +static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr); +static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id); + +/* + * Dump BTF type in a compilable C syntax, including all the necessary + * dependent types, necessary for compilation. If some of the dependent types + * were already emitted as part of previous btf_dump__dump_type() invocation + * for another type, they won't be emitted again. This API allows callers to + * filter out BTF types according to user-defined criterias and emitted only + * minimal subset of types, necessary to compile everything. Full struct/union + * definitions will still be emitted, even if the only usage is through + * pointer and could be satisfied with just a forward declaration. + * + * Dumping is done in two high-level passes: + * 1. Topologically sort type definitions to satisfy C rules of compilation. + * 2. Emit type definitions in C syntax. + * + * Returns 0 on success; <0, otherwise. + */ +int btf_dump__dump_type(struct btf_dump *d, __u32 id) +{ + int err, i; + + if (id > btf__get_nr_types(d->btf)) + return -EINVAL; + + /* type states are lazily allocated, as they might not be needed */ + if (!d->type_states) { + d->type_states = calloc(1 + btf__get_nr_types(d->btf), + sizeof(d->type_states[0])); + if (!d->type_states) + return -ENOMEM; + d->cached_names = calloc(1 + btf__get_nr_types(d->btf), + sizeof(d->cached_names[0])); + if (!d->cached_names) + return -ENOMEM; + + /* VOID is special */ + d->type_states[0].order_state = ORDERED; + d->type_states[0].emit_state = EMITTED; + } + + d->emit_queue_cnt = 0; + err = btf_dump_order_type(d, id, false); + if (err < 0) + return err; + + for (i = 0; i < d->emit_queue_cnt; i++) + btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/); + + return 0; +} + +static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) +{ + __u32 *new_queue; + size_t new_cap; + + if (d->emit_queue_cnt >= d->emit_queue_cap) { + new_cap = max(16, d->emit_queue_cap * 3 / 2); + new_queue = realloc(d->emit_queue, + new_cap * sizeof(new_queue[0])); + if (!new_queue) + return -ENOMEM; + d->emit_queue = new_queue; + d->emit_queue_cap = new_cap; + } + + d->emit_queue[d->emit_queue_cnt++] = id; + return 0; +} + +/* + * Determine order of emitting dependent types and specified type to satisfy + * C compilation rules. This is done through topological sorting with an + * additional complication which comes from C rules. The main idea for C is + * that if some type is "embedded" into a struct/union, it's size needs to be + * known at the time of definition of containing type. E.g., for: + * + * struct A {}; + * struct B { struct A x; } + * + * struct A *HAS* to be defined before struct B, because it's "embedded", + * i.e., it is part of struct B layout. But in the following case: + * + * struct A; + * struct B { struct A *x; } + * struct A {}; + * + * it's enough to just have a forward declaration of struct A at the time of + * struct B definition, as struct B has a pointer to struct A, so the size of + * field x is known without knowing struct A size: it's sizeof(void *). + * + * Unfortunately, there are some trickier cases we need to handle, e.g.: + * + * struct A {}; // if this was forward-declaration: compilation error + * struct B { + * struct { // anonymous struct + * struct A y; + * } *x; + * }; + * + * In this case, struct B's field x is a pointer, so it's size is known + * regardless of the size of (anonymous) struct it points to. But because this + * struct is anonymous and thus defined inline inside struct B, *and* it + * embeds struct A, compiler requires full definition of struct A to be known + * before struct B can be defined. This creates a transitive dependency + * between struct A and struct B. If struct A was forward-declared before + * struct B definition and fully defined after struct B definition, that would + * trigger compilation error. + * + * All this means that while we are doing topological sorting on BTF type + * graph, we need to determine relationships between different types (graph + * nodes): + * - weak link (relationship) between X and Y, if Y *CAN* be + * forward-declared at the point of X definition; + * - strong link, if Y *HAS* to be fully-defined before X can be defined. + * + * The rule is as follows. Given a chain of BTF types from X to Y, if there is + * BTF_KIND_PTR type in the chain and at least one non-anonymous type + * Z (excluding X, including Y), then link is weak. Otherwise, it's strong. + * Weak/strong relationship is determined recursively during DFS traversal and + * is returned as a result from btf_dump_order_type(). + * + * btf_dump_order_type() is trying to avoid unnecessary forward declarations, + * but it is not guaranteeing that no extraneous forward declarations will be + * emitted. + * + * To avoid extra work, algorithm marks some of BTF types as ORDERED, when + * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT, + * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the + * entire graph path, so depending where from one came to that BTF type, it + * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM, + * once they are processed, there is no need to do it again, so they are + * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces + * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But + * in any case, once those are processed, no need to do it again, as the + * result won't change. + * + * Returns: + * - 1, if type is part of strong link (so there is strong topological + * ordering requirements); + * - 0, if type is part of weak link (so can be satisfied through forward + * declaration); + * - <0, on error (e.g., unsatisfiable type loop detected). + */ +static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) +{ + /* + * Order state is used to detect strong link cycles, but only for BTF + * kinds that are or could be an independent definition (i.e., + * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays, + * func_protos, modifiers are just means to get to these definitions. + * Int/void don't need definitions, they are assumed to be always + * properly defined. We also ignore datasec, var, and funcs for now. + * So for all non-defining kinds, we never even set ordering state, + * for defining kinds we set ORDERING and subsequently ORDERED if it + * forms a strong link. + */ + struct btf_dump_type_aux_state *tstate = &d->type_states[id]; + const struct btf_type *t; + __u16 kind, vlen; + int err, i; + + /* return true, letting typedefs know that it's ok to be emitted */ + if (tstate->order_state == ORDERED) + return 1; + + t = btf__type_by_id(d->btf, id); + kind = btf_kind_of(t); + + if (tstate->order_state == ORDERING) { + /* type loop, but resolvable through fwd declaration */ + if ((kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION) && + through_ptr && t->name_off != 0) + return 0; + pr_warning("unsatisfiable type cycle, id:[%u]\n", id); + return -ELOOP; + } + + switch (kind) { + case BTF_KIND_INT: + tstate->order_state = ORDERED; + return 0; + + case BTF_KIND_PTR: + err = btf_dump_order_type(d, t->type, true); + tstate->order_state = ORDERED; + return err; + + case BTF_KIND_ARRAY: { + const struct btf_array *a = (void *)(t + 1); + + return btf_dump_order_type(d, a->type, through_ptr); + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = (void *)(t + 1); + /* + * struct/union is part of strong link, only if it's embedded + * (so no ptr in a path) or it's anonymous (so has to be + * defined inline, even if declared through ptr) + */ + if (through_ptr && t->name_off != 0) + return 0; + + tstate->order_state = ORDERING; + + vlen = btf_vlen_of(t); + for (i = 0; i < vlen; i++, m++) { + err = btf_dump_order_type(d, m->type, false); + if (err < 0) + return err; + } + + if (t->name_off != 0) { + err = btf_dump_add_emit_queue_id(d, id); + if (err < 0) + return err; + } + + tstate->order_state = ORDERED; + return 1; + } + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + if (t->name_off != 0) { + err = btf_dump_add_emit_queue_id(d, id); + if (err) + return err; + } + tstate->order_state = ORDERED; + return 1; + + case BTF_KIND_TYPEDEF: { + int is_strong; + + is_strong = btf_dump_order_type(d, t->type, through_ptr); + if (is_strong < 0) + return is_strong; + + /* typedef is similar to struct/union w.r.t. fwd-decls */ + if (through_ptr && !is_strong) + return 0; + + /* typedef is always a named definition */ + err = btf_dump_add_emit_queue_id(d, id); + if (err) + return err; + + d->type_states[id].order_state = ORDERED; + return 1; + } + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + return btf_dump_order_type(d, t->type, through_ptr); + + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = (void *)(t + 1); + bool is_strong; + + err = btf_dump_order_type(d, t->type, through_ptr); + if (err < 0) + return err; + is_strong = err > 0; + + vlen = btf_vlen_of(t); + for (i = 0; i < vlen; i++, p++) { + err = btf_dump_order_type(d, p->type, through_ptr); + if (err < 0) + return err; + if (err > 0) + is_strong = true; + } + return is_strong; + } + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DATASEC: + d->type_states[id].order_state = ORDERED; + return 0; + + default: + return -EINVAL; + } +} + +static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t); +static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl); + +static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t); +static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl); + +static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id, + const struct btf_type *t); + +static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl); + +/* a local view into a shared stack */ +struct id_stack { + const __u32 *ids; + int cnt; +}; + +static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, + const char *fname, int lvl); +static void btf_dump_emit_type_chain(struct btf_dump *d, + struct id_stack *decl_stack, + const char *fname, int lvl); + +static const char *btf_dump_type_name(struct btf_dump *d, __u32 id); +static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id); +static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, + const char *orig_name); + +static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id) +{ + const struct btf_type *t = btf__type_by_id(d->btf, id); + + /* __builtin_va_list is a compiler built-in, which causes compilation + * errors, when compiling w/ different compiler, then used to compile + * original code (e.g., GCC to compile kernel, Clang to use generated + * C header from BTF). As it is built-in, it should be already defined + * properly internally in compiler. + */ + if (t->name_off == 0) + return false; + return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0; +} + +/* + * Emit C-syntax definitions of types from chains of BTF types. + * + * High-level handling of determining necessary forward declarations are handled + * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type + * declarations/definitions in C syntax are handled by a combo of + * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to + * corresponding btf_dump_emit_*_{def,fwd}() functions. + * + * We also keep track of "containing struct/union type ID" to determine when + * we reference it from inside and thus can avoid emitting unnecessary forward + * declaration. + * + * This algorithm is designed in such a way, that even if some error occurs + * (either technical, e.g., out of memory, or logical, i.e., malformed BTF + * that doesn't comply to C rules completely), algorithm will try to proceed + * and produce as much meaningful output as possible. + */ +static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) +{ + struct btf_dump_type_aux_state *tstate = &d->type_states[id]; + bool top_level_def = cont_id == 0; + const struct btf_type *t; + __u16 kind; + + if (tstate->emit_state == EMITTED) + return; + + t = btf__type_by_id(d->btf, id); + kind = btf_kind_of(t); + + if (top_level_def && t->name_off == 0) { + pr_warning("unexpected nameless definition, id:[%u]\n", id); + return; + } + + if (tstate->emit_state == EMITTING) { + if (tstate->fwd_emitted) + return; + + switch (kind) { + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* + * if we are referencing a struct/union that we are + * part of - then no need for fwd declaration + */ + if (id == cont_id) + return; + if (t->name_off == 0) { + pr_warning("anonymous struct/union loop, id:[%u]\n", + id); + return; + } + btf_dump_emit_struct_fwd(d, id, t); + btf_dump_printf(d, ";\n\n"); + tstate->fwd_emitted = 1; + break; + case BTF_KIND_TYPEDEF: + /* + * for typedef fwd_emitted means typedef definition + * was emitted, but it can be used only for "weak" + * references through pointer only, not for embedding + */ + if (!btf_dump_is_blacklisted(d, id)) { + btf_dump_emit_typedef_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + }; + tstate->fwd_emitted = 1; + break; + default: + break; + } + + return; + } + + switch (kind) { + case BTF_KIND_INT: + tstate->emit_state = EMITTED; + break; + case BTF_KIND_ENUM: + if (top_level_def) { + btf_dump_emit_enum_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + } + tstate->emit_state = EMITTED; + break; + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + btf_dump_emit_type(d, t->type, cont_id); + break; + case BTF_KIND_ARRAY: { + const struct btf_array *a = (void *)(t + 1); + + btf_dump_emit_type(d, a->type, cont_id); + break; + } + case BTF_KIND_FWD: + btf_dump_emit_fwd_def(d, id, t); + btf_dump_printf(d, ";\n\n"); + tstate->emit_state = EMITTED; + break; + case BTF_KIND_TYPEDEF: + tstate->emit_state = EMITTING; + btf_dump_emit_type(d, t->type, id); + /* + * typedef can server as both definition and forward + * declaration; at this stage someone depends on + * typedef as a forward declaration (refers to it + * through pointer), so unless we already did it, + * emit typedef as a forward declaration + */ + if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) { + btf_dump_emit_typedef_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + } + tstate->emit_state = EMITTED; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + tstate->emit_state = EMITTING; + /* if it's a top-level struct/union definition or struct/union + * is anonymous, then in C we'll be emitting all fields and + * their types (as opposed to just `struct X`), so we need to + * make sure that all types, referenced from struct/union + * members have necessary forward-declarations, where + * applicable + */ + if (top_level_def || t->name_off == 0) { + const struct btf_member *m = (void *)(t + 1); + __u16 vlen = btf_vlen_of(t); + int i, new_cont_id; + + new_cont_id = t->name_off == 0 ? cont_id : id; + for (i = 0; i < vlen; i++, m++) + btf_dump_emit_type(d, m->type, new_cont_id); + } else if (!tstate->fwd_emitted && id != cont_id) { + btf_dump_emit_struct_fwd(d, id, t); + btf_dump_printf(d, ";\n\n"); + tstate->fwd_emitted = 1; + } + + if (top_level_def) { + btf_dump_emit_struct_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + tstate->emit_state = EMITTED; + } else { + tstate->emit_state = NOT_EMITTED; + } + break; + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = (void *)(t + 1); + __u16 vlen = btf_vlen_of(t); + int i; + + btf_dump_emit_type(d, t->type, cont_id); + for (i = 0; i < vlen; i++, p++) + btf_dump_emit_type(d, p->type, cont_id); + + break; + } + default: + break; + } +} + +static int btf_align_of(const struct btf *btf, __u32 id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + __u16 kind = btf_kind_of(t); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_ENUM: + return min(sizeof(void *), t->size); + case BTF_KIND_PTR: + return sizeof(void *); + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + return btf_align_of(btf, t->type); + case BTF_KIND_ARRAY: { + const struct btf_array *a = (void *)(t + 1); + + return btf_align_of(btf, a->type); + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = (void *)(t + 1); + __u16 vlen = btf_vlen_of(t); + int i, align = 1; + + for (i = 0; i < vlen; i++, m++) + align = max(align, btf_align_of(btf, m->type)); + + return align; + } + default: + pr_warning("unsupported BTF_KIND:%u\n", btf_kind_of(t)); + return 1; + } +} + +static bool btf_is_struct_packed(const struct btf *btf, __u32 id, + const struct btf_type *t) +{ + const struct btf_member *m; + int align, i, bit_sz; + __u16 vlen; + bool kflag; + + align = btf_align_of(btf, id); + /* size of a non-packed struct has to be a multiple of its alignment*/ + if (t->size % align) + return true; + + m = (void *)(t + 1); + kflag = btf_kflag_of(t); + vlen = btf_vlen_of(t); + /* all non-bitfield fields have to be naturally aligned */ + for (i = 0; i < vlen; i++, m++) { + align = btf_align_of(btf, m->type); + bit_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0; + if (bit_sz == 0 && m->offset % (8 * align) != 0) + return true; + } + + /* + * if original struct was marked as packed, but its layout is + * naturally aligned, we'll detect that it's not packed + */ + return false; +} + +static int chip_away_bits(int total, int at_most) +{ + return total % at_most ? : at_most; +} + +static void btf_dump_emit_bit_padding(const struct btf_dump *d, + int cur_off, int m_off, int m_bit_sz, + int align, int lvl) +{ + int off_diff = m_off - cur_off; + int ptr_bits = sizeof(void *) * 8; + + if (off_diff <= 0) + /* no gap */ + return; + if (m_bit_sz == 0 && off_diff < align * 8) + /* natural padding will take care of a gap */ + return; + + while (off_diff > 0) { + const char *pad_type; + int pad_bits; + + if (ptr_bits > 32 && off_diff > 32) { + pad_type = "long"; + pad_bits = chip_away_bits(off_diff, ptr_bits); + } else if (off_diff > 16) { + pad_type = "int"; + pad_bits = chip_away_bits(off_diff, 32); + } else if (off_diff > 8) { + pad_type = "short"; + pad_bits = chip_away_bits(off_diff, 16); + } else { + pad_type = "char"; + pad_bits = chip_away_bits(off_diff, 8); + } + btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits); + off_diff -= pad_bits; + } +} + +static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + btf_dump_printf(d, "%s %s", + btf_kind_of(t) == BTF_KIND_STRUCT ? "struct" : "union", + btf_dump_type_name(d, id)); +} + +static void btf_dump_emit_struct_def(struct btf_dump *d, + __u32 id, + const struct btf_type *t, + int lvl) +{ + const struct btf_member *m = (void *)(t + 1); + bool kflag = btf_kflag_of(t), is_struct; + int align, i, packed, off = 0; + __u16 vlen = btf_vlen_of(t); + + is_struct = btf_kind_of(t) == BTF_KIND_STRUCT; + packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0; + align = packed ? 1 : btf_align_of(d->btf, id); + + btf_dump_printf(d, "%s%s%s {", + is_struct ? "struct" : "union", + t->name_off ? " " : "", + btf_dump_type_name(d, id)); + + for (i = 0; i < vlen; i++, m++) { + const char *fname; + int m_off, m_sz; + + fname = btf_name_of(d, m->name_off); + m_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0; + m_off = kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset; + align = packed ? 1 : btf_align_of(d->btf, m->type); + + btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1); + btf_dump_printf(d, "\n%s", pfx(lvl + 1)); + btf_dump_emit_type_decl(d, m->type, fname, lvl + 1); + + if (m_sz) { + btf_dump_printf(d, ": %d", m_sz); + off = m_off + m_sz; + } else { + m_sz = max(0, btf__resolve_size(d->btf, m->type)); + off = m_off + m_sz * 8; + } + btf_dump_printf(d, ";"); + } + + if (vlen) + btf_dump_printf(d, "\n"); + btf_dump_printf(d, "%s}", pfx(lvl)); + if (packed) + btf_dump_printf(d, " __attribute__((packed))"); +} + +static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id)); +} + +static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, + int lvl) +{ + const struct btf_enum *v = (void *)(t+1); + __u16 vlen = btf_vlen_of(t); + const char *name; + size_t dup_cnt; + int i; + + btf_dump_printf(d, "enum%s%s", + t->name_off ? " " : "", + btf_dump_type_name(d, id)); + + if (vlen) { + btf_dump_printf(d, " {"); + for (i = 0; i < vlen; i++, v++) { + name = btf_name_of(d, v->name_off); + /* enumerators share namespace with typedef idents */ + dup_cnt = btf_dump_name_dups(d, d->ident_names, name); + if (dup_cnt > 1) { + btf_dump_printf(d, "\n%s%s___%zu = %d,", + pfx(lvl + 1), name, dup_cnt, + (__s32)v->val); + } else { + btf_dump_printf(d, "\n%s%s = %d,", + pfx(lvl + 1), name, + (__s32)v->val); + } + } + btf_dump_printf(d, "\n%s}", pfx(lvl)); + } +} + +static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + + if (btf_kflag_of(t)) + btf_dump_printf(d, "union %s", name); + else + btf_dump_printf(d, "struct %s", name); +} + +static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl) +{ + const char *name = btf_dump_ident_name(d, id); + + btf_dump_printf(d, "typedef "); + btf_dump_emit_type_decl(d, t->type, name, lvl); +} + +static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id) +{ + __u32 *new_stack; + size_t new_cap; + + if (d->decl_stack_cnt >= d->decl_stack_cap) { + new_cap = max(16, d->decl_stack_cap * 3 / 2); + new_stack = realloc(d->decl_stack, + new_cap * sizeof(new_stack[0])); + if (!new_stack) + return -ENOMEM; + d->decl_stack = new_stack; + d->decl_stack_cap = new_cap; + } + + d->decl_stack[d->decl_stack_cnt++] = id; + + return 0; +} + +/* + * Emit type declaration (e.g., field type declaration in a struct or argument + * declaration in function prototype) in correct C syntax. + * + * For most types it's trivial, but there are few quirky type declaration + * cases worth mentioning: + * - function prototypes (especially nesting of function prototypes); + * - arrays; + * - const/volatile/restrict for pointers vs other types. + * + * For a good discussion of *PARSING* C syntax (as a human), see + * Peter van der Linden's "Expert C Programming: Deep C Secrets", + * Ch.3 "Unscrambling Declarations in C". + * + * It won't help with BTF to C conversion much, though, as it's an opposite + * problem. So we came up with this algorithm in reverse to van der Linden's + * parsing algorithm. It goes from structured BTF representation of type + * declaration to a valid compilable C syntax. + * + * For instance, consider this C typedef: + * typedef const int * const * arr[10] arr_t; + * It will be represented in BTF with this chain of BTF types: + * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int] + * + * Notice how [const] modifier always goes before type it modifies in BTF type + * graph, but in C syntax, const/volatile/restrict modifiers are written to + * the right of pointers, but to the left of other types. There are also other + * quirks, like function pointers, arrays of them, functions returning other + * functions, etc. + * + * We handle that by pushing all the types to a stack, until we hit "terminal" + * type (int/enum/struct/union/fwd). Then depending on the kind of a type on + * top of a stack, modifiers are handled differently. Array/function pointers + * have also wildly different syntax and how nesting of them are done. See + * code for authoritative definition. + * + * To avoid allocating new stack for each independent chain of BTF types, we + * share one bigger stack, with each chain working only on its own local view + * of a stack frame. Some care is required to "pop" stack frames after + * processing type declaration chain. + */ +static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, + const char *fname, int lvl) +{ + struct id_stack decl_stack; + const struct btf_type *t; + int err, stack_start; + __u16 kind; + + stack_start = d->decl_stack_cnt; + for (;;) { + err = btf_dump_push_decl_stack_id(d, id); + if (err < 0) { + /* + * if we don't have enough memory for entire type decl + * chain, restore stack, emit warning, and try to + * proceed nevertheless + */ + pr_warning("not enough memory for decl stack:%d", err); + d->decl_stack_cnt = stack_start; + return; + } + + /* VOID */ + if (id == 0) + break; + + t = btf__type_by_id(d->btf, id); + kind = btf_kind_of(t); + switch (kind) { + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_FUNC_PROTO: + id = t->type; + break; + case BTF_KIND_ARRAY: { + const struct btf_array *a = (void *)(t + 1); + + id = a->type; + break; + } + case BTF_KIND_INT: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_TYPEDEF: + goto done; + default: + pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n", + kind, id); + goto done; + } + } +done: + /* + * We might be inside a chain of declarations (e.g., array of function + * pointers returning anonymous (so inlined) structs, having another + * array field). Each of those needs its own "stack frame" to handle + * emitting of declarations. Those stack frames are non-overlapping + * portions of shared btf_dump->decl_stack. To make it a bit nicer to + * handle this set of nested stacks, we create a view corresponding to + * our own "stack frame" and work with it as an independent stack. + * We'll need to clean up after emit_type_chain() returns, though. + */ + decl_stack.ids = d->decl_stack + stack_start; + decl_stack.cnt = d->decl_stack_cnt - stack_start; + btf_dump_emit_type_chain(d, &decl_stack, fname, lvl); + /* + * emit_type_chain() guarantees that it will pop its entire decl_stack + * frame before returning. But it works with a read-only view into + * decl_stack, so it doesn't actually pop anything from the + * perspective of shared btf_dump->decl_stack, per se. We need to + * reset decl_stack state to how it was before us to avoid it growing + * all the time. + */ + d->decl_stack_cnt = stack_start; +} + +static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack) +{ + const struct btf_type *t; + __u32 id; + + while (decl_stack->cnt) { + id = decl_stack->ids[decl_stack->cnt - 1]; + t = btf__type_by_id(d->btf, id); + + switch (btf_kind_of(t)) { + case BTF_KIND_VOLATILE: + btf_dump_printf(d, "volatile "); + break; + case BTF_KIND_CONST: + btf_dump_printf(d, "const "); + break; + case BTF_KIND_RESTRICT: + btf_dump_printf(d, "restrict "); + break; + default: + return; + } + decl_stack->cnt--; + } +} + +static bool btf_is_mod_kind(const struct btf *btf, __u32 id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + + switch (btf_kind_of(t)) { + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + return true; + default: + return false; + } +} + +static void btf_dump_emit_name(const struct btf_dump *d, + const char *name, bool last_was_ptr) +{ + bool separate = name[0] && !last_was_ptr; + + btf_dump_printf(d, "%s%s", separate ? " " : "", name); +} + +static void btf_dump_emit_type_chain(struct btf_dump *d, + struct id_stack *decls, + const char *fname, int lvl) +{ + /* + * last_was_ptr is used to determine if we need to separate pointer + * asterisk (*) from previous part of type signature with space, so + * that we get `int ***`, instead of `int * * *`. We default to true + * for cases where we have single pointer in a chain. E.g., in ptr -> + * func_proto case. func_proto will start a new emit_type_chain call + * with just ptr, which should be emitted as (*) or (*), so we + * don't want to prepend space for that last pointer. + */ + bool last_was_ptr = true; + const struct btf_type *t; + const char *name; + __u16 kind; + __u32 id; + + while (decls->cnt) { + id = decls->ids[--decls->cnt]; + if (id == 0) { + /* VOID is a special snowflake */ + btf_dump_emit_mods(d, decls); + btf_dump_printf(d, "void"); + last_was_ptr = false; + continue; + } + + t = btf__type_by_id(d->btf, id); + kind = btf_kind_of(t); + + switch (kind) { + case BTF_KIND_INT: + btf_dump_emit_mods(d, decls); + name = btf_name_of(d, t->name_off); + btf_dump_printf(d, "%s", name); + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + btf_dump_emit_mods(d, decls); + /* inline anonymous struct/union */ + if (t->name_off == 0) + btf_dump_emit_struct_def(d, id, t, lvl); + else + btf_dump_emit_struct_fwd(d, id, t); + break; + case BTF_KIND_ENUM: + btf_dump_emit_mods(d, decls); + /* inline anonymous enum */ + if (t->name_off == 0) + btf_dump_emit_enum_def(d, id, t, lvl); + else + btf_dump_emit_enum_fwd(d, id, t); + break; + case BTF_KIND_FWD: + btf_dump_emit_mods(d, decls); + btf_dump_emit_fwd_def(d, id, t); + break; + case BTF_KIND_TYPEDEF: + btf_dump_emit_mods(d, decls); + btf_dump_printf(d, "%s", btf_dump_ident_name(d, id)); + break; + case BTF_KIND_PTR: + btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *"); + break; + case BTF_KIND_VOLATILE: + btf_dump_printf(d, " volatile"); + break; + case BTF_KIND_CONST: + btf_dump_printf(d, " const"); + break; + case BTF_KIND_RESTRICT: + btf_dump_printf(d, " restrict"); + break; + case BTF_KIND_ARRAY: { + const struct btf_array *a = (void *)(t + 1); + const struct btf_type *next_t; + __u32 next_id; + bool multidim; + /* + * GCC has a bug + * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354) + * which causes it to emit extra const/volatile + * modifiers for an array, if array's element type has + * const/volatile modifiers. Clang doesn't do that. + * In general, it doesn't seem very meaningful to have + * a const/volatile modifier for array, so we are + * going to silently skip them here. + */ + while (decls->cnt) { + next_id = decls->ids[decls->cnt - 1]; + if (btf_is_mod_kind(d->btf, next_id)) + decls->cnt--; + else + break; + } + + if (decls->cnt == 0) { + btf_dump_emit_name(d, fname, last_was_ptr); + btf_dump_printf(d, "[%u]", a->nelems); + return; + } + + next_t = btf__type_by_id(d->btf, next_id); + multidim = btf_kind_of(next_t) == BTF_KIND_ARRAY; + /* we need space if we have named non-pointer */ + if (fname[0] && !last_was_ptr) + btf_dump_printf(d, " "); + /* no parentheses for multi-dimensional array */ + if (!multidim) + btf_dump_printf(d, "("); + btf_dump_emit_type_chain(d, decls, fname, lvl); + if (!multidim) + btf_dump_printf(d, ")"); + btf_dump_printf(d, "[%u]", a->nelems); + return; + } + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = (void *)(t + 1); + __u16 vlen = btf_vlen_of(t); + int i; + + btf_dump_emit_mods(d, decls); + if (decls->cnt) { + btf_dump_printf(d, " ("); + btf_dump_emit_type_chain(d, decls, fname, lvl); + btf_dump_printf(d, ")"); + } else { + btf_dump_emit_name(d, fname, last_was_ptr); + } + btf_dump_printf(d, "("); + /* + * Clang for BPF target generates func_proto with no + * args as a func_proto with a single void arg (e.g., + * `int (*f)(void)` vs just `int (*f)()`). We are + * going to pretend there are no args for such case. + */ + if (vlen == 1 && p->type == 0) { + btf_dump_printf(d, ")"); + return; + } + + for (i = 0; i < vlen; i++, p++) { + if (i > 0) + btf_dump_printf(d, ", "); + + /* last arg of type void is vararg */ + if (i == vlen - 1 && p->type == 0) { + btf_dump_printf(d, "..."); + break; + } + + name = btf_name_of(d, p->name_off); + btf_dump_emit_type_decl(d, p->type, name, lvl); + } + + btf_dump_printf(d, ")"); + return; + } + default: + pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n", + kind, id); + return; + } + + last_was_ptr = kind == BTF_KIND_PTR; + } + + btf_dump_emit_name(d, fname, last_was_ptr); +} + +/* return number of duplicates (occurrences) of a given name */ +static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, + const char *orig_name) +{ + size_t dup_cnt = 0; + + hashmap__find(name_map, orig_name, (void **)&dup_cnt); + dup_cnt++; + hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL); + + return dup_cnt; +} + +static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id, + struct hashmap *name_map) +{ + struct btf_dump_type_aux_state *s = &d->type_states[id]; + const struct btf_type *t = btf__type_by_id(d->btf, id); + const char *orig_name = btf_name_of(d, t->name_off); + const char **cached_name = &d->cached_names[id]; + size_t dup_cnt; + + if (t->name_off == 0) + return ""; + + if (s->name_resolved) + return *cached_name ? *cached_name : orig_name; + + dup_cnt = btf_dump_name_dups(d, name_map, orig_name); + if (dup_cnt > 1) { + const size_t max_len = 256; + char new_name[max_len]; + + snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt); + *cached_name = strdup(new_name); + } + + s->name_resolved = 1; + return *cached_name ? *cached_name : orig_name; +} + +static const char *btf_dump_type_name(struct btf_dump *d, __u32 id) +{ + return btf_dump_resolve_name(d, id, d->type_names); +} + +static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id) +{ + return btf_dump_resolve_name(d, id, d->ident_names); +} diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6ea5ce19b9e0..8bf51d0a6072 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -167,5 +167,8 @@ LIBBPF_0.0.3 { LIBBPF_0.0.4 { global: + btf_dump__dump_type; + btf_dump__free; + btf_dump__new; btf__parse_elf; } LIBBPF_0.0.3; From 2d2a3ad872f884d618f11e1a2028cff862503dcd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:04 -0700 Subject: [PATCH 18/83] selftests/bpf: add btf_dump BTF-to-C conversion tests Add new test_btf_dump set of tests, validating BTF-to-C conversion correctness. Tests rely on clang to generate BTF from provided C test cases. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/.gitignore | 1 + tools/testing/selftests/bpf/Makefile | 3 +- .../bpf/progs/btf_dump_test_case_bitfields.c | 92 +++++++ .../bpf/progs/btf_dump_test_case_multidim.c | 35 +++ .../progs/btf_dump_test_case_namespacing.c | 73 ++++++ .../bpf/progs/btf_dump_test_case_ordering.c | 63 +++++ .../bpf/progs/btf_dump_test_case_packing.c | 75 ++++++ .../bpf/progs/btf_dump_test_case_padding.c | 111 +++++++++ .../bpf/progs/btf_dump_test_case_syntax.c | 229 ++++++++++++++++++ tools/testing/selftests/bpf/test_btf_dump.c | 143 +++++++++++ 10 files changed, 824 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c create mode 100644 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c create mode 100644 tools/testing/selftests/bpf/test_btf_dump.c diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 138b6c063916..b3da2ffdc158 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -36,3 +36,4 @@ alu32 libbpf.pc libbpf.so.* test_hashmap +test_btf_dump diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index ddae06498a00..cd23758e8b7a 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,7 +23,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \ - test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap + test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ + test_btf_dump BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) TEST_GEN_FILES = $(BPF_OBJ_FILES) diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c new file mode 100644 index 000000000000..8f44767a75fa --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper tests for bitfield. + * + * Copyright (c) 2019 Facebook + */ +#include + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct bitfields_only_mixed_types { + * int a: 3; + * long int b: 2; + * _Bool c: 1; + * enum { + * A = 0, + * B = 1, + * } d: 1; + * short e: 5; + * int: 20; + * unsigned int f: 30; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct bitfields_only_mixed_types { + int a: 3; + long int b: 2; + bool c: 1; /* it's really a _Bool type */ + enum { + A, /* A = 0, dumper is very explicit */ + B, /* B = 1, same */ + } d: 1; + short e: 5; + /* 20-bit padding here */ + unsigned f: 30; /* this gets aligned on 4-byte boundary */ +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct bitfield_mixed_with_others { + * char: 4; + * int a: 4; + * short b; + * long int c; + * long int d: 8; + * int e; + * int f; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ +struct bitfield_mixed_with_others { + long: 4; /* char is enough as a backing field */ + int a: 4; + /* 8-bit implicit padding */ + short b; /* combined with previous bitfield */ + /* 4 more bytes of implicit padding */ + long c; + long d: 8; + /* 24 bits implicit padding */ + int e; /* combined with previous bitfield */ + int f; + /* 4 bytes of padding */ +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct bitfield_flushed { + * int a: 4; + * long: 60; + * long int b: 16; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ +struct bitfield_flushed { + int a: 4; + long: 0; /* flush until next natural alignment boundary */ + long b: 16; +}; + +int f(struct { + struct bitfields_only_mixed_types _1; + struct bitfield_mixed_with_others _2; + struct bitfield_flushed _3; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c new file mode 100644 index 000000000000..ba97165bdb28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test for multi-dimensional array output. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +typedef int arr_t[2]; + +typedef int multiarr_t[3][4][5]; + +typedef int *ptr_arr_t[6]; + +typedef int *ptr_multiarr_t[7][8][9][10]; + +typedef int * (*fn_ptr_arr_t[11])(); + +typedef int * (*fn_ptr_multiarr_t[12][13])(); + +struct root_struct { + arr_t _1; + multiarr_t _2; + ptr_arr_t _3; + ptr_multiarr_t _4; + fn_ptr_arr_t _5; + fn_ptr_multiarr_t _6; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + +int f(struct root_struct *s) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c new file mode 100644 index 000000000000..92a4ad428710 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test validating no name versioning happens between + * independent C namespaces (struct/union/enum vs typedef/enum values). + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct S { + int S; + int U; +}; + +typedef struct S S; + +union U { + int S; + int U; +}; + +typedef union U U; + +enum E { + V = 0, +}; + +typedef enum E E; + +struct A {}; + +union B {}; + +enum C { + A = 1, + B = 2, + C = 3, +}; + +struct X {}; + +union Y {}; + +enum Z; + +typedef int X; + +typedef int Y; + +typedef int Z; + +/*------ END-EXPECTED-OUTPUT ------ */ + +int f(struct { + struct S _1; + S _2; + union U _3; + U _4; + enum E _5; + E _6; + struct A a; + union B b; + enum C c; + struct X x; + union Y y; + enum Z *z; + X xx; + Y yy; + Z zz; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c new file mode 100644 index 000000000000..7c95702ee4cb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test for topological sorting of dependent structs. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct s1 {}; + +struct s3; + +struct s4; + +struct s2 { + struct s2 *s2; + struct s3 *s3; + struct s4 *s4; +}; + +struct s3 { + struct s1 s1; + struct s2 s2; +}; + +struct s4 { + struct s1 s1; + struct s3 s3; +}; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct hlist_head { + struct hlist_node *first; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct root_struct { + struct s4 s4; + struct list_head l; + struct hlist_node n; + struct hlist_head h; + struct callback_head cb; +}; + +/*------ END-EXPECTED-OUTPUT ------ */ + +int f(struct root_struct *root) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c new file mode 100644 index 000000000000..1cef3bec1dc7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper tests for struct packing determination. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct packed_trailing_space { + int a; + short b; +} __attribute__((packed)); + +struct non_packed_trailing_space { + int a; + short b; +}; + +struct packed_fields { + short a; + int b; +} __attribute__((packed)); + +struct non_packed_fields { + short a; + int b; +}; + +struct nested_packed { + char: 4; + int a: 4; + long int b; + struct { + char c; + int d; + } __attribute__((packed)) e; +} __attribute__((packed)); + +union union_is_never_packed { + int a: 4; + char b; + char c: 1; +}; + +union union_does_not_need_packing { + struct { + long int a; + int b; + } __attribute__((packed)); + int c; +}; + +union jump_code_union { + char code[5]; + struct { + char jump; + int offset; + } __attribute__((packed)); +}; + +/*------ END-EXPECTED-OUTPUT ------ */ + +int f(struct { + struct packed_trailing_space _1; + struct non_packed_trailing_space _2; + struct packed_fields _3; + struct non_packed_fields _4; + struct nested_packed _5; + union union_is_never_packed _6; + union union_does_not_need_packing _7; + union jump_code_union _8; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c new file mode 100644 index 000000000000..3a62119c7498 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper tests for implicit and explicit padding between fields and + * at the end of a struct. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct padded_implicitly { + int a; + long int b; + char c; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct padded_explicitly { + * int a; + * int: 32; + * int b; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct padded_explicitly { + int a; + int: 1; /* algo will explicitly pad with full 32 bits here */ + int b; +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct padded_a_lot { + * int a; + * long: 32; + * long: 64; + * long: 64; + * int b; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct padded_a_lot { + int a; + /* 32 bit of implicit padding here, which algo will make explicit */ + long: 64; + long: 64; + int b; +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct padded_cache_line { + * int a; + * long: 32; + * long: 64; + * long: 64; + * long: 64; + * int b; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct padded_cache_line { + int a; + int b __attribute__((aligned(32))); +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct zone_padding { + * char x[0]; + *}; + * + *struct zone { + * int a; + * short b; + * short: 16; + * struct zone_padding __pad__; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct zone_padding { + char x[0]; +} __attribute__((__aligned__(8))); + +struct zone { + int a; + short b; + short: 16; + struct zone_padding __pad__; +}; + +int f(struct { + struct padded_implicitly _1; + struct padded_explicitly _2; + struct padded_a_lot _3; + struct padded_cache_line _4; + struct zone _5; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c new file mode 100644 index 000000000000..d4a02fe44a12 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test for majority of C syntax quirks. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +enum e1 { + A = 0, + B = 1, +}; + +enum e2 { + C = 100, + D = -100, + E = 0, +}; + +typedef enum e2 e2_t; + +typedef enum { + F = 0, + G = 1, + H = 2, +} e3_t; + +typedef int int_t; + +typedef volatile const int * volatile const crazy_ptr_t; + +typedef int *****we_need_to_go_deeper_ptr_t; + +typedef volatile const we_need_to_go_deeper_ptr_t * restrict * volatile * const * restrict volatile * restrict const * volatile const * restrict volatile const how_about_this_ptr_t; + +typedef int *ptr_arr_t[10]; + +typedef void (*fn_ptr1_t)(int); + +typedef void (*printf_fn_t)(const char *, ...); + +/* ------ END-EXPECTED-OUTPUT ------ */ +/* + * While previous function pointers are pretty trivial (C-syntax-level + * trivial), the following are deciphered here for future generations: + * + * - `fn_ptr2_t`: function, taking anonymous struct as a first arg and pointer + * to a function, that takes int and returns int, as a second arg; returning + * a pointer to a const pointer to a char. Equivalent to: + * typedef struct { int a; } s_t; + * typedef int (*fn_t)(int); + * typedef char * const * (*fn_ptr2_t)(s_t, fn_t); + * + * - `fn_complext_t`: pointer to a function returning struct and accepting + * union and struct. All structs and enum are anonymous and defined inline. + * + * - `signal_t: pointer to a function accepting a pointer to a function as an + * argument and returning pointer to a function as a result. Sane equivalent: + * typedef void (*signal_handler_t)(int); + * typedef signal_handler_t (*signal_ptr_t)(int, signal_handler_t); + * + * - fn_ptr_arr1_t: array of pointers to a function accepting pointer to + * a pointer to an int and returning pointer to a char. Easy. + * + * - fn_ptr_arr2_t: array of const pointers to a function taking no arguments + * and returning a const pointer to a function, that takes pointer to a + * `int -> char *` function and returns pointer to a char. Equivalent: + * typedef char * (*fn_input_t)(int); + * typedef char * (*fn_output_outer_t)(fn_input_t); + * typedef const fn_output_outer_t (* fn_output_inner_t)(); + * typedef const fn_output_inner_t fn_ptr_arr2_t[5]; + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +typedef char * const * (*fn_ptr2_t)(struct { + int a; +}, int (*)(int)); + +typedef struct { + int a; + void (*b)(int, struct { + int c; + }, union { + char d; + int e[5]; + }); +} (*fn_complex_t)(union { + void *f; + char g[16]; +}, struct { + int h; +}); + +typedef void (* (*signal_t)(int, void (*)(int)))(int); + +typedef char * (*fn_ptr_arr1_t[10])(int **); + +typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int)); + +struct struct_w_typedefs { + int_t a; + crazy_ptr_t b; + we_need_to_go_deeper_ptr_t c; + how_about_this_ptr_t d; + ptr_arr_t e; + fn_ptr1_t f; + printf_fn_t g; + fn_ptr2_t h; + fn_complex_t i; + signal_t j; + fn_ptr_arr1_t k; + fn_ptr_arr2_t l; +}; + +typedef struct { + int x; + int y; + int z; +} anon_struct_t; + +struct struct_fwd; + +typedef struct struct_fwd struct_fwd_t; + +typedef struct struct_fwd *struct_fwd_ptr_t; + +union union_fwd; + +typedef union union_fwd union_fwd_t; + +typedef union union_fwd *union_fwd_ptr_t; + +struct struct_empty {}; + +struct struct_simple { + int a; + char b; + const int_t *p; + struct struct_empty s; + enum e2 e; + enum { + ANON_VAL1 = 1, + ANON_VAL2 = 2, + } f; + int arr1[13]; + enum e2 arr2[5]; +}; + +union union_empty {}; + +union union_simple { + void *ptr; + int num; + int_t num2; + union union_empty u; +}; + +struct struct_in_struct { + struct struct_simple simple; + union union_simple also_simple; + struct { + int a; + } not_so_hard_as_well; + union { + int b; + int c; + } anon_union_is_good; + struct { + int d; + int e; + }; + union { + int f; + int g; + }; +}; + +struct struct_with_embedded_stuff { + int a; + struct { + int b; + struct { + struct struct_with_embedded_stuff *c; + const char *d; + } e; + union { + volatile long int f; + void * restrict g; + }; + }; + union { + const int_t *h; + void (*i)(char, int, void *); + } j; + enum { + K = 100, + L = 200, + } m; + char n[16]; + struct { + char o; + int p; + void (*q)(int); + } r[5]; + struct struct_in_struct s[10]; + int t[11]; +}; + +struct root_struct { + enum e1 _1; + enum e2 _2; + e2_t _2_1; + e3_t _2_2; + struct struct_w_typedefs _3; + anon_struct_t _7; + struct struct_fwd *_8; + struct_fwd_t *_9; + struct_fwd_ptr_t _10; + union union_fwd *_11; + union_fwd_t *_12; + union_fwd_ptr_t _13; + struct struct_with_embedded_stuff _14; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + +int f(struct root_struct *s) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c new file mode 100644 index 000000000000..8f850823d35f --- /dev/null +++ b/tools/testing/selftests/bpf/test_btf_dump.c @@ -0,0 +1,143 @@ +#include +#include +#include +#include +#include +#include +#include + +#define CHECK(condition, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \ + fprintf(stderr, format); \ + } \ + __ret; \ +}) + +void btf_dump_printf(void *ctx, const char *fmt, va_list args) +{ + vfprintf(ctx, fmt, args); +} + +struct btf_dump_test_case { + const char *name; + struct btf_dump_opts opts; +} btf_dump_test_cases[] = { + {.name = "btf_dump_test_case_syntax", .opts = {}}, + {.name = "btf_dump_test_case_ordering", .opts = {}}, + {.name = "btf_dump_test_case_padding", .opts = {}}, + {.name = "btf_dump_test_case_packing", .opts = {}}, + {.name = "btf_dump_test_case_bitfields", .opts = {}}, + {.name = "btf_dump_test_case_multidim", .opts = {}}, + {.name = "btf_dump_test_case_namespacing", .opts = {}}, +}; + +static int btf_dump_all_types(const struct btf *btf, + const struct btf_dump_opts *opts) +{ + size_t type_cnt = btf__get_nr_types(btf); + struct btf_dump *d; + int err = 0, id; + + d = btf_dump__new(btf, NULL, opts, btf_dump_printf); + if (IS_ERR(d)) + return PTR_ERR(d); + + for (id = 1; id <= type_cnt; id++) { + err = btf_dump__dump_type(d, id); + if (err) + goto done; + } + +done: + btf_dump__free(d); + return err; +} + +int test_btf_dump_case(int n, struct btf_dump_test_case *test_case) +{ + char test_file[256], out_file[256], diff_cmd[1024]; + struct btf *btf = NULL; + int err = 0, fd = -1; + FILE *f = NULL; + + fprintf(stderr, "Test case #%d (%s): ", n, test_case->name); + + snprintf(test_file, sizeof(test_file), "%s.o", test_case->name); + + btf = btf__parse_elf(test_file, NULL); + if (CHECK(IS_ERR(btf), + "failed to load test BTF: %ld\n", PTR_ERR(btf))) { + err = -PTR_ERR(btf); + btf = NULL; + goto done; + } + + snprintf(out_file, sizeof(out_file), + "/tmp/%s.output.XXXXXX", test_case->name); + fd = mkstemp(out_file); + if (CHECK(fd < 0, "failed to create temp output file: %d\n", fd)) { + err = fd; + goto done; + } + f = fdopen(fd, "w"); + if (CHECK(f == NULL, "failed to open temp output file: %s(%d)\n", + strerror(errno), errno)) { + close(fd); + goto done; + } + + test_case->opts.ctx = f; + err = btf_dump_all_types(btf, &test_case->opts); + fclose(f); + close(fd); + if (CHECK(err, "failure during C dumping: %d\n", err)) { + goto done; + } + + snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); + /* + * Diff test output and expected test output, contained between + * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. + * For expected output lines, everything before '*' is stripped out. + * Also lines containing comment start and comment end markers are + * ignored. + */ + snprintf(diff_cmd, sizeof(diff_cmd), + "awk '/START-EXPECTED-OUTPUT/{out=1;next} " + "/END-EXPECTED-OUTPUT/{out=0} " + "/\\/\\*|\\*\\//{next} " /* ignore comment start/end lines */ + "out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'", + test_file, out_file); + err = system(diff_cmd); + if (CHECK(err, + "differing test output, output=%s, err=%d, diff cmd:\n%s\n", + out_file, err, diff_cmd)) + goto done; + + remove(out_file); + fprintf(stderr, "OK\n"); + +done: + btf__free(btf); + return err; +} + +int main() { + int test_case_cnt, i, err, failed = 0; + + test_case_cnt = sizeof(btf_dump_test_cases) / + sizeof(btf_dump_test_cases[0]); + + for (i = 0; i < test_case_cnt; i++) { + err = test_btf_dump_case(i, &btf_dump_test_cases[i]); + if (err) + failed++; + } + + fprintf(stderr, "%d tests succeeded, %d tests failed.\n", + test_case_cnt - failed, failed); + + return failed; +} From 2119f2189df1836d28909fe5a90b3d93638ecf65 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:05 -0700 Subject: [PATCH 19/83] bpftool: add C output format option to btf dump subcommand Utilize new libbpf's btf_dump API to emit BTF as a C definitions. Acked-by: Jakub Kicinski Signed-off-by: Andrii Nakryiko Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/btf.c | 75 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index a22ef6587ebe..1b8ec91899e6 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -340,11 +340,49 @@ static int dump_btf_raw(const struct btf *btf, return 0; } +static void __printf(2, 0) btf_dump_printf(void *ctx, + const char *fmt, va_list args) +{ + vfprintf(stdout, fmt, args); +} + +static int dump_btf_c(const struct btf *btf, + __u32 *root_type_ids, int root_type_cnt) +{ + struct btf_dump *d; + int err = 0, i; + + d = btf_dump__new(btf, NULL, NULL, btf_dump_printf); + if (IS_ERR(d)) + return PTR_ERR(d); + + if (root_type_cnt) { + for (i = 0; i < root_type_cnt; i++) { + err = btf_dump__dump_type(d, root_type_ids[i]); + if (err) + goto done; + } + } else { + int cnt = btf__get_nr_types(btf); + + for (i = 1; i <= cnt; i++) { + err = btf_dump__dump_type(d, i); + if (err) + goto done; + } + } + +done: + btf_dump__free(d); + return err; +} + static int do_dump(int argc, char **argv) { struct btf *btf = NULL; __u32 root_type_ids[2]; int root_type_cnt = 0; + bool dump_c = false; __u32 btf_id = -1; const char *src; int fd = -1; @@ -431,6 +469,29 @@ static int do_dump(int argc, char **argv) goto done; } + while (argc) { + if (is_prefix(*argv, "format")) { + NEXT_ARG(); + if (argc < 1) { + p_err("expecting value for 'format' option\n"); + goto done; + } + if (strcmp(*argv, "c") == 0) { + dump_c = true; + } else if (strcmp(*argv, "raw") == 0) { + dump_c = false; + } else { + p_err("unrecognized format specifier: '%s', possible values: raw, c", + *argv); + goto done; + } + NEXT_ARG(); + } else { + p_err("unrecognized option: '%s'", *argv); + goto done; + } + } + if (!btf) { err = btf__get_from_id(btf_id, &btf); if (err) { @@ -444,7 +505,16 @@ static int do_dump(int argc, char **argv) } } - dump_btf_raw(btf, root_type_ids, root_type_cnt); + if (dump_c) { + if (json_output) { + p_err("JSON output for C-syntax dump is not supported"); + err = -ENOTSUP; + goto done; + } + err = dump_btf_c(btf, root_type_ids, root_type_cnt); + } else { + err = dump_btf_raw(btf, root_type_ids, root_type_cnt); + } done: close(fd); @@ -460,10 +530,11 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s btf dump BTF_SRC\n" + "Usage: %s btf dump BTF_SRC [format FORMAT]\n" " %s btf help\n" "\n" " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n" + " FORMAT := { raw | c }\n" " " HELP_SPEC_MAP "\n" " " HELP_SPEC_PROGRAM "\n" " " HELP_SPEC_OPTIONS "\n" From 220ba451124e275ff548bec33b0dc01e40895b06 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:06 -0700 Subject: [PATCH 20/83] bpftool/docs: add description of btf dump C option Document optional **c** option for btf dump subcommand. Cc: Quentin Monnet Signed-off-by: Andrii Nakryiko Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- .../bpf/bpftool/Documentation/bpftool-btf.rst | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index 2dbc1413fabd..3daed9eba766 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -19,10 +19,11 @@ SYNOPSIS BTF COMMANDS ============= -| **bpftool** **btf dump** *BTF_SRC* +| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] | **bpftool** **btf help** | | *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* } +| *FORMAT* := { **raw** | **c** } | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } @@ -31,23 +32,27 @@ DESCRIPTION **bpftool btf dump** *BTF_SRC* Dump BTF entries from a given *BTF_SRC*. - When **id** is specified, BTF object with that ID will be - loaded and all its BTF types emitted. + When **id** is specified, BTF object with that ID will be + loaded and all its BTF types emitted. - When **map** is provided, it's expected that map has - associated BTF object with BTF types describing key and - value. It's possible to select whether to dump only BTF - type(s) associated with key (**key**), value (**value**), - both key and value (**kv**), or all BTF types present in - associated BTF object (**all**). If not specified, **kv** - is assumed. + When **map** is provided, it's expected that map has + associated BTF object with BTF types describing key and + value. It's possible to select whether to dump only BTF + type(s) associated with key (**key**), value (**value**), + both key and value (**kv**), or all BTF types present in + associated BTF object (**all**). If not specified, **kv** + is assumed. - When **prog** is provided, it's expected that program has - associated BTF object with BTF types. + When **prog** is provided, it's expected that program has + associated BTF object with BTF types. - When specifying *FILE*, an ELF file is expected, containing - .BTF section with well-defined BTF binary format data, - typically produced by clang or pahole. + When specifying *FILE*, an ELF file is expected, containing + .BTF section with well-defined BTF binary format data, + typically produced by clang or pahole. + + **format** option can be used to override default (raw) + output format. Raw (**raw**) or C-syntax (**c**) output + formats are supported. **bpftool btf help** Print short help message. From 90eea4086d5ed31936889a44d536bf77afa4ca8a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 24 May 2019 11:59:07 -0700 Subject: [PATCH 21/83] bpftool: update bash-completion w/ new c option for btf dump Add bash completion for new C btf dump option. Cc: Quentin Monnet Signed-off-by: Andrii Nakryiko Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/bash-completion/bpftool | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 50e402a5a9c8..75c01eafd3a1 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -638,11 +638,24 @@ _bpftool() esac return 0 ;; + format) + COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) ) + ;; *) - if [[ $cword == 6 ]] && [[ ${words[3]} == "map" ]]; then - COMPREPLY+=( $( compgen -W 'key value kv all' -- \ - "$cur" ) ) - fi + # emit extra options + case ${words[3]} in + id|file) + _bpftool_once_attr 'format' + ;; + map|prog) + if [[ ${words[3]} == "map" ]] && [[ $cword == 6 ]]; then + COMPREPLY+=( $( compgen -W "key value kv all" -- "$cur" ) ) + fi + _bpftool_once_attr 'format' + ;; + *) + ;; + esac return 0 ;; esac From 8b401f9ed2441ad9e219953927a842d24ed051fc Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 May 2019 14:47:45 -0700 Subject: [PATCH 22/83] bpf: implement bpf_send_signal() helper This patch tries to solve the following specific use case. Currently, bpf program can already collect stack traces through kernel function get_perf_callchain() when certain events happens (e.g., cache miss counter or cpu clock counter overflows). But such stack traces are not enough for jitted programs, e.g., hhvm (jited php). To get real stack trace, jit engine internal data structures need to be traversed in order to get the real user functions. bpf program itself may not be the best place to traverse the jit engine as the traversing logic could be complex and it is not a stable interface either. Instead, hhvm implements a signal handler, e.g. for SIGALARM, and a set of program locations which it can dump stack traces. When it receives a signal, it will dump the stack in next such program location. Such a mechanism can be implemented in the following way: . a perf ring buffer is created between bpf program and tracing app. . once a particular event happens, bpf program writes to the ring buffer and the tracing app gets notified. . the tracing app sends a signal SIGALARM to the hhvm. But this method could have large delays and causing profiling results skewed. This patch implements bpf_send_signal() helper to send a signal to hhvm in real time, resulting in intended stack traces. Acked-by: Andrii Nakryiko Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 17 +++++++++- kernel/trace/bpf_trace.c | 72 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 63e0cf66f01a..68d4470523a0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2672,6 +2672,20 @@ union bpf_attr { * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. + * + * int bpf_send_signal(u32 sig) + * Description + * Send signal *sig* to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2782,7 +2796,8 @@ union bpf_attr { FN(strtol), \ FN(strtoul), \ FN(sk_storage_get), \ - FN(sk_storage_delete), + FN(sk_storage_delete), \ + FN(send_signal), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f92d6ad5e080..70029eafc71f 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -567,6 +567,63 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = { .arg3_type = ARG_ANYTHING, }; +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; +}; + +static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); + +static void do_bpf_send_signal(struct irq_work *entry) +{ + struct send_signal_irq_work *work; + + work = container_of(entry, struct send_signal_irq_work, irq_work); + group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID); +} + +BPF_CALL_1(bpf_send_signal, u32, sig) +{ + struct send_signal_irq_work *work = NULL; + + /* Similar to bpf_probe_write_user, task needs to be + * in a sound condition and kernel memory access be + * permitted in order to send signal to the current + * task. + */ + if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) + return -EPERM; + if (unlikely(uaccess_kernel())) + return -EPERM; + if (unlikely(!nmi_uaccess_okay())) + return -EPERM; + + if (in_nmi()) { + work = this_cpu_ptr(&send_signal_work); + if (work->irq_work.flags & IRQ_WORK_BUSY) + return -EBUSY; + + /* Add the current task, which is the target of sending signal, + * to the irq_work. The current task may change when queued + * irq works get executed. + */ + work->task = current; + work->sig = sig; + irq_work_queue(&work->irq_work); + return 0; + } + + return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID); +} + +static const struct bpf_func_proto bpf_send_signal_proto = { + .func = bpf_send_signal, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -617,6 +674,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_current_cgroup_id: return &bpf_get_current_cgroup_id_proto; #endif + case BPF_FUNC_send_signal: + return &bpf_send_signal_proto; default: return NULL; } @@ -1343,5 +1402,18 @@ static int __init bpf_event_init(void) return 0; } +static int __init send_signal_irq_work_init(void) +{ + int cpu; + struct send_signal_irq_work *work; + + for_each_possible_cpu(cpu) { + work = per_cpu_ptr(&send_signal_work, cpu); + init_irq_work(&work->irq_work, do_bpf_send_signal); + } + return 0; +} + fs_initcall(bpf_event_init); +subsys_initcall(send_signal_irq_work_init); #endif /* CONFIG_MODULES */ From edaccf8985305967c22903a78283c8c837ea48dd Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 May 2019 14:47:46 -0700 Subject: [PATCH 23/83] tools/bpf: sync bpf uapi header bpf.h to tools directory The bpf uapi header include/uapi/linux/bpf.h is sync'ed to tools/include/uapi/linux/bpf.h. Acked-by: Andrii Nakryiko Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 63e0cf66f01a..68d4470523a0 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2672,6 +2672,20 @@ union bpf_attr { * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. + * + * int bpf_send_signal(u32 sig) + * Description + * Send signal *sig* to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2782,7 +2796,8 @@ union bpf_attr { FN(strtol), \ FN(strtoul), \ FN(sk_storage_get), \ - FN(sk_storage_delete), + FN(sk_storage_delete), \ + FN(send_signal), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 16f0efc3b46352018c297bbdb2c405e7d8a63095 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 May 2019 14:47:47 -0700 Subject: [PATCH 24/83] tools/bpf: add selftest in test_progs for bpf_send_signal() helper The test covered both nmi and tracepoint perf events. $ ./test_progs ... test_send_signal_tracepoint:PASS:tracepoint 0 nsec ... test_send_signal_common:PASS:tracepoint 0 nsec ... test_send_signal_common:PASS:perf_event 0 nsec ... test_send_signal:OK Acked-by: Andrii Nakryiko Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/bpf_helpers.h | 1 + .../selftests/bpf/prog_tests/send_signal.c | 198 ++++++++++++++++++ .../bpf/progs/test_send_signal_kern.c | 51 +++++ 3 files changed, 250 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/send_signal.c create mode 100644 tools/testing/selftests/bpf/progs/test_send_signal_kern.c diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 4b27840b8109..e6d243b7cd74 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -224,6 +224,7 @@ static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk, (void *) BPF_FUNC_sk_storage_get; static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) = (void *)BPF_FUNC_sk_storage_delete; +static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c new file mode 100644 index 000000000000..67cea1686305 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +static volatile int sigusr1_received = 0; + +static void sigusr1_handler(int signum) +{ + sigusr1_received++; +} + +static int test_send_signal_common(struct perf_event_attr *attr, + int prog_type, + const char *test_name) +{ + int err = -1, pmu_fd, prog_fd, info_map_fd, status_map_fd; + const char *file = "./test_send_signal_kern.o"; + struct bpf_object *obj = NULL; + int pipe_c2p[2], pipe_p2c[2]; + __u32 key = 0, duration = 0; + char buf[256]; + pid_t pid; + __u64 val; + + if (CHECK(pipe(pipe_c2p), test_name, + "pipe pipe_c2p error: %s\n", strerror(errno))) + goto no_fork_done; + + if (CHECK(pipe(pipe_p2c), test_name, + "pipe pipe_p2c error: %s\n", strerror(errno))) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + goto no_fork_done; + } + + pid = fork(); + if (CHECK(pid < 0, test_name, "fork error: %s\n", strerror(errno))) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + close(pipe_p2c[0]); + close(pipe_p2c[1]); + goto no_fork_done; + } + + if (pid == 0) { + /* install signal handler and notify parent */ + signal(SIGUSR1, sigusr1_handler); + + close(pipe_c2p[0]); /* close read */ + close(pipe_p2c[1]); /* close write */ + + /* notify parent signal handler is installed */ + write(pipe_c2p[1], buf, 1); + + /* make sure parent enabled bpf program to send_signal */ + read(pipe_p2c[0], buf, 1); + + /* wait a little for signal handler */ + sleep(1); + + if (sigusr1_received) + write(pipe_c2p[1], "2", 1); + else + write(pipe_c2p[1], "0", 1); + + /* wait for parent notification and exit */ + read(pipe_p2c[0], buf, 1); + + close(pipe_c2p[1]); + close(pipe_p2c[0]); + exit(0); + } + + close(pipe_c2p[1]); /* close write */ + close(pipe_p2c[0]); /* close read */ + + err = bpf_prog_load(file, prog_type, &obj, &prog_fd); + if (CHECK(err < 0, test_name, "bpf_prog_load error: %s\n", + strerror(errno))) + goto prog_load_failure; + + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, + -1 /* group id */, 0 /* flags */); + if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n", + strerror(errno))) { + err = -1; + goto close_prog; + } + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_enable error: %s\n", + strerror(errno))) + goto disable_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_set_bpf error: %s\n", + strerror(errno))) + goto disable_pmu; + + err = -1; + info_map_fd = bpf_object__find_map_fd_by_name(obj, "info_map"); + if (CHECK(info_map_fd < 0, test_name, "find map %s error\n", "info_map")) + goto disable_pmu; + + status_map_fd = bpf_object__find_map_fd_by_name(obj, "status_map"); + if (CHECK(status_map_fd < 0, test_name, "find map %s error\n", "status_map")) + goto disable_pmu; + + /* wait until child signal handler installed */ + read(pipe_c2p[0], buf, 1); + + /* trigger the bpf send_signal */ + key = 0; + val = (((__u64)(SIGUSR1)) << 32) | pid; + bpf_map_update_elem(info_map_fd, &key, &val, 0); + + /* notify child that bpf program can send_signal now */ + write(pipe_p2c[1], buf, 1); + + /* wait for result */ + err = read(pipe_c2p[0], buf, 1); + if (CHECK(err < 0, test_name, "reading pipe error: %s\n", strerror(errno))) + goto disable_pmu; + if (CHECK(err == 0, test_name, "reading pipe error: size 0\n")) { + err = -1; + goto disable_pmu; + } + + err = CHECK(buf[0] != '2', test_name, "incorrect result\n"); + + /* notify child safe to exit */ + write(pipe_p2c[1], buf, 1); + +disable_pmu: + close(pmu_fd); +close_prog: + bpf_object__close(obj); +prog_load_failure: + close(pipe_c2p[0]); + close(pipe_p2c[1]); + wait(NULL); +no_fork_done: + return err; +} + +static int test_send_signal_tracepoint(void) +{ + const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; + struct perf_event_attr attr = { + .type = PERF_TYPE_TRACEPOINT, + .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN, + .sample_period = 1, + .wakeup_events = 1, + }; + __u32 duration = 0; + int bytes, efd; + char buf[256]; + + efd = open(id_path, O_RDONLY, 0); + if (CHECK(efd < 0, "tracepoint", + "open syscalls/sys_enter_nanosleep/id failure: %s\n", + strerror(errno))) + return -1; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint", + "read syscalls/sys_enter_nanosleep/id failure: %s\n", + strerror(errno))) + return -1; + + attr.config = strtol(buf, NULL, 0); + + return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint"); +} + +static int test_send_signal_nmi(void) +{ + struct perf_event_attr attr = { + .sample_freq = 50, + .freq = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + + return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, "perf_event"); +} + +void test_send_signal(void) +{ + int ret = 0; + + ret |= test_send_signal_tracepoint(); + ret |= test_send_signal_nmi(); + if (!ret) + printf("test_send_signal:OK\n"); + else + printf("test_send_signal:FAIL\n"); +} diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c new file mode 100644 index 000000000000..45a1a1a2c345 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include +#include +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") info_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = 1, +}; + +BPF_ANNOTATE_KV_PAIR(info_map, __u32, __u64); + +struct bpf_map_def SEC("maps") status_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = 1, +}; + +BPF_ANNOTATE_KV_PAIR(status_map, __u32, __u64); + +SEC("send_signal_demo") +int bpf_send_signal_test(void *ctx) +{ + __u64 *info_val, *status_val; + __u32 key = 0, pid, sig; + int ret; + + status_val = bpf_map_lookup_elem(&status_map, &key); + if (!status_val || *status_val != 0) + return 0; + + info_val = bpf_map_lookup_elem(&info_map, &key); + if (!info_val || *info_val == 0) + return 0; + + sig = *info_val >> 32; + pid = *info_val & 0xffffFFFF; + + if ((bpf_get_current_pid_tgid() >> 32) == pid) { + ret = bpf_send_signal(sig); + if (ret == 0) + *status_val = 1; + } + + return 0; +} +char __license[] SEC("license") = "GPL"; From 5327ed3d44b754f5cc51d5b3f18e442eaebacff5 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:12 +0100 Subject: [PATCH 25/83] bpf: verifier: mark verified-insn with sub-register zext flag eBPF ISA specification requires high 32-bit cleared when low 32-bit sub-register is written. This applies to destination register of ALU32 etc. JIT back-ends must guarantee this semantic when doing code-gen. x86_64 and AArch64 ISA has the same semantics, so the corresponding JIT back-end doesn't need to do extra work. However, 32-bit arches (arm, x86, nfp etc.) and some other 64-bit arches (PowerPC, SPARC etc) need to do explicit zero extension to meet this requirement, otherwise code like the following will fail. u64_value = (u64) u32_value ... other uses of u64_value This is because compiler could exploit the semantic described above and save those zero extensions for extending u32_value to u64_value, these JIT back-ends are expected to guarantee this through inserting extra zero extensions which however could be a significant increase on the code size. Some benchmarks show there could be ~40% sub-register writes out of total insns, meaning at least ~40% extra code-gen. One observation is these extra zero extensions are not always necessary. Take above code snippet for example, it is possible u32_value will never be casted into a u64, the value of high 32-bit of u32_value then could be ignored and extra zero extension could be eliminated. This patch implements this idea, insns defining sub-registers will be marked when the high 32-bit of the defined sub-register matters. For those unmarked insns, it is safe to eliminate high 32-bit clearnace for them. Algo: - Split read flags into READ32 and READ64. - Record index of insn that does sub-register write. Keep the index inside reg state and update it during verifier insn walking. - A full register read on a sub-register marks its definition insn as needing zero extension on dst register. A new sub-register write overrides the old one. - When propagating read64 during path pruning, also mark any insn defining a sub-register that is read in the pruned path as full-register. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 14 ++- kernel/bpf/verifier.c | 173 ++++++++++++++++++++++++++++++++--- 2 files changed, 171 insertions(+), 16 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 405b502283c5..704ed7971472 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -36,9 +36,11 @@ */ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ - REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ - REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ - REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */ + REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ + REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ + REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, + REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ + REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; struct bpf_reg_state { @@ -131,6 +133,11 @@ struct bpf_reg_state { * pointing to bpf_func_state. */ u32 frameno; + /* Tracks subreg definition. The stored value is the insn_idx of the + * writing insn. This is safe because subreg_def is used before any insn + * patching which only happens after main verification finished. + */ + s32 subreg_def; enum bpf_reg_liveness live; }; @@ -233,6 +240,7 @@ struct bpf_insn_aux_data { int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ + bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ bool prune_point; unsigned int orig_idx; /* original instruction index */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 550091c7a46a..f6b4c7148c3e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -982,6 +982,7 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, __mark_reg_not_init(regs + regno); } +#define DEF_NOT_SUBREG (0) static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) { @@ -992,6 +993,7 @@ static void init_reg_state(struct bpf_verifier_env *env, mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; regs[i].parent = NULL; + regs[i].subreg_def = DEF_NOT_SUBREG; } /* frame pointer */ @@ -1137,7 +1139,7 @@ next: */ static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_reg_state *state, - struct bpf_reg_state *parent) + struct bpf_reg_state *parent, u8 flag) { bool writes = parent == state->parent; /* Observe write marks */ int cnt = 0; @@ -1152,17 +1154,26 @@ static int mark_reg_read(struct bpf_verifier_env *env, parent->var_off.value, parent->off); return -EFAULT; } - if (parent->live & REG_LIVE_READ) + /* The first condition is more likely to be true than the + * second, checked it first. + */ + if ((parent->live & REG_LIVE_READ) == flag || + parent->live & REG_LIVE_READ64) /* The parentage chain never changes and * this parent was already marked as LIVE_READ. * There is no need to keep walking the chain again and * keep re-marking all parents as LIVE_READ. * This case happens when the same register is read * multiple times without writes into it in-between. + * Also, if parent has the stronger REG_LIVE_READ64 set, + * then no need to set the weak REG_LIVE_READ32. */ break; /* ... then we depend on parent's value */ - parent->live |= REG_LIVE_READ; + parent->live |= flag; + /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ + if (flag == REG_LIVE_READ64) + parent->live &= ~REG_LIVE_READ32; state = parent; parent = state->parent; writes = true; @@ -1174,12 +1185,111 @@ static int mark_reg_read(struct bpf_verifier_env *env, return 0; } +/* This function is supposed to be used by the following 32-bit optimization + * code only. It returns TRUE if the source or destination register operates + * on 64-bit, otherwise return FALSE. + */ +static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, + u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) +{ + u8 code, class, op; + + code = insn->code; + class = BPF_CLASS(code); + op = BPF_OP(code); + if (class == BPF_JMP) { + /* BPF_EXIT for "main" will reach here. Return TRUE + * conservatively. + */ + if (op == BPF_EXIT) + return true; + if (op == BPF_CALL) { + /* BPF to BPF call will reach here because of marking + * caller saved clobber with DST_OP_NO_MARK for which we + * don't care the register def because they are anyway + * marked as NOT_INIT already. + */ + if (insn->src_reg == BPF_PSEUDO_CALL) + return false; + /* Helper call will reach here because of arg type + * check, conservatively return TRUE. + */ + if (t == SRC_OP) + return true; + + return false; + } + } + + if (class == BPF_ALU64 || class == BPF_JMP || + /* BPF_END always use BPF_ALU class. */ + (class == BPF_ALU && op == BPF_END && insn->imm == 64)) + return true; + + if (class == BPF_ALU || class == BPF_JMP32) + return false; + + if (class == BPF_LDX) { + if (t != SRC_OP) + return BPF_SIZE(code) == BPF_DW; + /* LDX source must be ptr. */ + return true; + } + + if (class == BPF_STX) { + if (reg->type != SCALAR_VALUE) + return true; + return BPF_SIZE(code) == BPF_DW; + } + + if (class == BPF_LD) { + u8 mode = BPF_MODE(code); + + /* LD_IMM64 */ + if (mode == BPF_IMM) + return true; + + /* Both LD_IND and LD_ABS return 32-bit data. */ + if (t != SRC_OP) + return false; + + /* Implicit ctx ptr. */ + if (regno == BPF_REG_6) + return true; + + /* Explicit source could be any width. */ + return true; + } + + if (class == BPF_ST) + /* The only source register for BPF_ST is a ptr. */ + return true; + + /* Conservatively return true at default. */ + return true; +} + +static void mark_insn_zext(struct bpf_verifier_env *env, + struct bpf_reg_state *reg) +{ + s32 def_idx = reg->subreg_def; + + if (def_idx == DEF_NOT_SUBREG) + return; + + env->insn_aux_data[def_idx - 1].zext_dst = true; + /* The dst will be zero extended, so won't be sub-register anymore. */ + reg->subreg_def = DEF_NOT_SUBREG; +} + static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; + struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; struct bpf_reg_state *reg, *regs = state->regs; + bool rw64; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); @@ -1187,6 +1297,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, } reg = ®s[regno]; + rw64 = is_reg64(env, insn, regno, reg, t); if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (reg->type == NOT_INIT) { @@ -1197,7 +1308,11 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, if (regno == BPF_REG_FP) return 0; - return mark_reg_read(env, reg, reg->parent); + if (rw64) + mark_insn_zext(env, reg); + + return mark_reg_read(env, reg, reg->parent, + rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { @@ -1205,6 +1320,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return -EACCES; } reg->live |= REG_LIVE_WRITTEN; + reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } @@ -1384,7 +1500,8 @@ static int check_stack_read(struct bpf_verifier_env *env, state->regs[value_regno].live |= REG_LIVE_WRITTEN; } mark_reg_read(env, ®_state->stack[spi].spilled_ptr, - reg_state->stack[spi].spilled_ptr.parent); + reg_state->stack[spi].spilled_ptr.parent, + REG_LIVE_READ64); return 0; } else { int zeros = 0; @@ -1401,7 +1518,8 @@ static int check_stack_read(struct bpf_verifier_env *env, return -EACCES; } mark_reg_read(env, ®_state->stack[spi].spilled_ptr, - reg_state->stack[spi].spilled_ptr.parent); + reg_state->stack[spi].spilled_ptr.parent, + REG_LIVE_READ64); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, @@ -2110,6 +2228,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn value_regno); if (reg_type_may_be_null(reg_type)) regs[value_regno].id = ++env->id_gen; + /* A load of ctx field could have different + * actual load size with the one encoded in the + * insn. When the dst is PTR, it is for sure not + * a sub-register. + */ + regs[value_regno].subreg_def = DEF_NOT_SUBREG; } regs[value_regno].type = reg_type; } @@ -2369,7 +2493,8 @@ mark: * the whole slot to be marked as 'read' */ mark_reg_read(env, &state->stack[spi].spilled_ptr, - state->stack[spi].spilled_ptr.parent); + state->stack[spi].spilled_ptr.parent, + REG_LIVE_READ64); } return update_stack_depth(env, state, min_off); } @@ -3333,6 +3458,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } + /* helper call returns 64-bit value. */ + regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ @@ -4264,6 +4392,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) */ *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; + dst_reg->subreg_def = DEF_NOT_SUBREG; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { @@ -4274,6 +4403,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } else if (src_reg->type == SCALAR_VALUE) { *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; + dst_reg->subreg_def = env->insn_idx + 1; } else { mark_reg_unknown(env, regs, insn->dst_reg); @@ -5353,6 +5483,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); + /* ld_abs load up to 32-bit skb data. */ + regs[BPF_REG_0].subreg_def = env->insn_idx + 1; return 0; } @@ -6309,20 +6441,33 @@ static bool states_equal(struct bpf_verifier_env *env, return true; } +/* Return 0 if no propagation happened. Return negative error code if error + * happened. Otherwise, return the propagated bit. + */ static int propagate_liveness_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, struct bpf_reg_state *parent_reg) { + u8 parent_flag = parent_reg->live & REG_LIVE_READ; + u8 flag = reg->live & REG_LIVE_READ; int err; - if (parent_reg->live & REG_LIVE_READ || !(reg->live & REG_LIVE_READ)) + /* When comes here, read flags of PARENT_REG or REG could be any of + * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need + * of propagation if PARENT_REG has strongest REG_LIVE_READ64. + */ + if (parent_flag == REG_LIVE_READ64 || + /* Or if there is no read flag from REG. */ + !flag || + /* Or if the read flag from REG is the same as PARENT_REG. */ + parent_flag == flag) return 0; - err = mark_reg_read(env, reg, parent_reg); + err = mark_reg_read(env, reg, parent_reg, flag); if (err) return err; - return 0; + return flag; } /* A write screens off any subsequent reads; but write marks come from the @@ -6356,8 +6501,10 @@ static int propagate_liveness(struct bpf_verifier_env *env, for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { err = propagate_liveness_reg(env, &state_reg[i], &parent_reg[i]); - if (err) + if (err < 0) return err; + if (err == REG_LIVE_READ64) + mark_insn_zext(env, &parent_reg[i]); } /* Propagate stack slots. */ @@ -6367,11 +6514,11 @@ static int propagate_liveness(struct bpf_verifier_env *env, state_reg = &state->stack[i].spilled_ptr; err = propagate_liveness_reg(env, state_reg, parent_reg); - if (err) + if (err < 0) return err; } } - return err; + return 0; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) From b325fbca4b136886885e51f4d36e2adab76596e3 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:13 +0100 Subject: [PATCH 26/83] bpf: verifier: mark patched-insn with sub-register zext flag Patched insns do not go through generic verification, therefore doesn't has zero extension information collected during insn walking. We don't bother analyze them at the moment, for any sub-register def comes from them, just conservatively mark it as needing zero extension. Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f6b4c7148c3e..a6af3166acae 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1269,6 +1269,24 @@ static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, return true; } +/* Return TRUE if INSN doesn't have explicit value define. */ +static bool insn_no_def(struct bpf_insn *insn) +{ + u8 class = BPF_CLASS(insn->code); + + return (class == BPF_JMP || class == BPF_JMP32 || + class == BPF_STX || class == BPF_ST); +} + +/* Return TRUE if INSN has defined any 32-bit value explicitly. */ +static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) +{ + if (insn_no_def(insn)) + return false; + + return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); +} + static void mark_insn_zext(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { @@ -7298,14 +7316,23 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ -static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, - u32 off, u32 cnt) +static int adjust_insn_aux_data(struct bpf_verifier_env *env, + struct bpf_prog *new_prog, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + struct bpf_insn *insn = new_prog->insnsi; + u32 prog_len; int i; + /* aux info at OFF always needs adjustment, no matter fast path + * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the + * original insn at old prog. + */ + old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); + if (cnt == 1) return 0; + prog_len = new_prog->len; new_data = vzalloc(array_size(prog_len, sizeof(struct bpf_insn_aux_data))); if (!new_data) @@ -7313,8 +7340,10 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); - for (i = off; i < off + cnt - 1; i++) + for (i = off; i < off + cnt - 1; i++) { new_data[i].seen = true; + new_data[i].zext_dst = insn_has_def32(env, insn + i); + } env->insn_aux_data = new_data; vfree(old_data); return 0; @@ -7347,7 +7376,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of env->insn_aux_data[off].orig_idx); return NULL; } - if (adjust_insn_aux_data(env, new_prog->len, off, len)) + if (adjust_insn_aux_data(env, new_prog, off, len)) return NULL; adjust_subprog_starts(env, off, len); return new_prog; From 7d134041a89610ae552501fc88652805addcdee4 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:14 +0100 Subject: [PATCH 27/83] bpf: introduce new mov32 variant for doing explicit zero extension The encoding for this new variant is based on BPF_X format. "imm" field was 0 only, now it could be 1 which means doing zero extension unconditionally .code = BPF_ALU | BPF_MOV | BPF_X .dst_reg = DST .src_reg = SRC .imm = 1 We use this new form for doing zero extension for which verifier will guarantee SRC == DST. Implications on JIT back-ends when doing code-gen for BPF_ALU | BPF_MOV | BPF_X: 1. No change if hardware already does zero extension unconditionally for sub-register write. 2. Otherwise, when seeing imm == 1, just generate insns to clear high 32-bit. No need to generate insns for the move because when imm == 1, dst_reg is the same as src_reg at the moment. Interpreter doesn't need change as well. It is doing unconditionally zero extension for mov32 already. One helper macro BPF_ZEXT_REG is added to help creating zero extension insn using this new mov32 variant. One helper function insn_is_zext is added for checking one insn is an zero extension on dst. This will be widely used by a few JIT back-ends in later patches in this set. Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/linux/filter.h b/include/linux/filter.h index 7148bab96943..bb10ffb88452 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -160,6 +160,20 @@ struct ctl_table_header; .off = 0, \ .imm = IMM }) +/* Special form of mov32, used for doing explicit zero extension on dst. */ +#define BPF_ZEXT_REG(DST) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = DST, \ + .off = 0, \ + .imm = 1 }) + +static inline bool insn_is_zext(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; +} + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) From a4b1d3c1ddf6cb441187b6c130a473c16a05a356 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:15 +0100 Subject: [PATCH 28/83] bpf: verifier: insert zero extension according to analysis result After previous patches, verifier will mark a insn if it really needs zero extension on dst_reg. It is then for back-ends to decide how to use such information to eliminate unnecessary zero extension code-gen during JIT compilation. One approach is verifier insert explicit zero extension for those insns that need zero extension in a generic way, JIT back-ends then do not generate zero extension for sub-register write at default. However, only those back-ends which do not have hardware zero extension want this optimization. Back-ends like x86_64 and AArch64 have hardware zero extension support that the insertion should be disabled. This patch introduces new target hook "bpf_jit_needs_zext" which returns false at default, meaning verifier zero extension insertion is disabled at default. A back-end could override this hook to return true if it doesn't have hardware support and want verifier insert zero extension explicitly. Offload targets do not use this native target hook, instead, they could get the optimization results using bpf_prog_offload_ops.finalize. NOTE: arches could have diversified features, it is possible for one arch to have hardware zero extension support for some sub-register write insns but not for all. For example, PowerPC, SPARC have zero extended loads, but not for alu32. So when verifier zero extension insertion enabled, these JIT back-ends need to peephole insns to remove those zero extension inserted for insn that actually has hardware zero extension support. The peephole could be as simple as looking the next insn, if it is a special zero extension insn then it is safe to eliminate it if the current insn has hardware zero extension support. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/linux/filter.h | 1 + kernel/bpf/core.c | 9 +++++++++ kernel/bpf/verifier.c | 41 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4fb3aa2dc975..d98141edb74b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -370,6 +370,7 @@ struct bpf_prog_aux { u32 id; u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ + bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ diff --git a/include/linux/filter.h b/include/linux/filter.h index bb10ffb88452..ba8b65270e0d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -825,6 +825,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); +bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(void) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 242a643af82f..3675b19ecb90 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2090,6 +2090,15 @@ bool __weak bpf_helper_changes_pkt_data(void *func) return false; } +/* Return TRUE if the JIT backend wants verifier to enable sub-register usage + * analysis code and wants explicit zero extension inserted by verifier. + * Otherwise, return FALSE. + */ +bool __weak bpf_jit_needs_zext(void) +{ + return false; +} + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a6af3166acae..d4394a84b9eb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7640,6 +7640,38 @@ static int opt_remove_nops(struct bpf_verifier_env *env) return 0; } +static int opt_subreg_zext_lo32(struct bpf_verifier_env *env) +{ + struct bpf_insn_aux_data *aux = env->insn_aux_data; + struct bpf_insn *insns = env->prog->insnsi; + int i, delta = 0, len = env->prog->len; + struct bpf_insn zext_patch[2]; + struct bpf_prog *new_prog; + + zext_patch[1] = BPF_ZEXT_REG(0); + for (i = 0; i < len; i++) { + int adj_idx = i + delta; + struct bpf_insn insn; + + if (!aux[adj_idx].zext_dst) + continue; + + insn = insns[adj_idx]; + zext_patch[0] = insn; + zext_patch[1].dst_reg = insn.dst_reg; + zext_patch[1].src_reg = insn.dst_reg; + new_prog = bpf_patch_insn_data(env, adj_idx, zext_patch, 2); + if (!new_prog) + return -ENOMEM; + env->prog = new_prog; + insns = new_prog->insnsi; + aux = env->insn_aux_data; + delta += 2; + } + + return 0; +} + /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff @@ -8490,6 +8522,15 @@ skip_full_check: if (ret == 0) ret = fixup_bpf_calls(env); + /* do 32-bit optimization after insn patching has done so those patched + * insns could be handled correctly. + */ + if (ret == 0 && bpf_jit_needs_zext() && + !bpf_prog_is_dev_bound(env->prog->aux)) { + ret = opt_subreg_zext_lo32(env); + env->prog->aux->verifier_zext = !ret; + } + if (ret == 0) ret = fixup_call_args(env); From c240eff63a1cf1c4edc768e0cfc374811c02f069 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:16 +0100 Subject: [PATCH 29/83] bpf: introduce new bpf prog load flags "BPF_F_TEST_RND_HI32" x86_64 and AArch64 perhaps are two arches that running bpf testsuite frequently, however the zero extension insertion pass is not enabled for them because of their hardware support. It is critical to guarantee the pass correction as it is supposed to be enabled at default for a couple of other arches, for example PowerPC, SPARC, arm, NFP etc. Therefore, it would be very useful if there is a way to test this pass on for example x86_64. The test methodology employed by this set is "poisoning" useless bits. High 32-bit of a definition is randomized if it is identified as not used by any later insn. Such randomization is only enabled under testing mode which is gated by the new bpf prog load flags "BPF_F_TEST_RND_HI32". Suggested-by: Alexei Starovoitov Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 18 ++++++++++++++++++ kernel/bpf/syscall.c | 4 +++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 68d4470523a0..7c6aef253173 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -260,6 +260,24 @@ enum bpf_attach_type { */ #define BPF_F_ANY_ALIGNMENT (1U << 1) +/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. + * Verifier does sub-register def/use analysis and identifies instructions whose + * def only matters for low 32-bit, high 32-bit is never referenced later + * through implicit zero extension. Therefore verifier notifies JIT back-ends + * that it is safe to ignore clearing high 32-bit for these instructions. This + * saves some back-ends a lot of code-gen. However such optimization is not + * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends + * hence hasn't used verifier's analysis result. But, we really want to have a + * way to be able to verify the correctness of the described optimization on + * x86_64 on which testsuites are frequently exercised. + * + * So, this flag is introduced. Once it is set, verifier will randomize high + * 32-bit for those instructions who has been identified as safe to ignore them. + * Then, if verifier is not doing correct analysis, such randomization will + * regress tests to expose bugs. + */ +#define BPF_F_TEST_RND_HI32 (1U << 2) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * two extensions: * diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cb5440b02e82..3d546b6f4646 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1604,7 +1604,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) if (CHECK_ATTR(BPF_PROG_LOAD)) return -EINVAL; - if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT)) + if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | + BPF_F_ANY_ALIGNMENT | + BPF_F_TEST_RND_HI32)) return -EINVAL; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && From 9ce33e337facc678a9578f256752e39643dedf21 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:17 +0100 Subject: [PATCH 30/83] tools: bpf: sync uapi header bpf.h Sync new bpf prog load flag "BPF_F_TEST_RND_HI32" to tools/. Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/include/uapi/linux/bpf.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 68d4470523a0..7c6aef253173 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -260,6 +260,24 @@ enum bpf_attach_type { */ #define BPF_F_ANY_ALIGNMENT (1U << 1) +/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. + * Verifier does sub-register def/use analysis and identifies instructions whose + * def only matters for low 32-bit, high 32-bit is never referenced later + * through implicit zero extension. Therefore verifier notifies JIT back-ends + * that it is safe to ignore clearing high 32-bit for these instructions. This + * saves some back-ends a lot of code-gen. However such optimization is not + * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends + * hence hasn't used verifier's analysis result. But, we really want to have a + * way to be able to verify the correctness of the described optimization on + * x86_64 on which testsuites are frequently exercised. + * + * So, this flag is introduced. Once it is set, verifier will randomize high + * 32-bit for those instructions who has been identified as safe to ignore them. + * Then, if verifier is not doing correct analysis, such randomization will + * regress tests to expose bugs. + */ +#define BPF_F_TEST_RND_HI32 (1U << 2) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * two extensions: * From d6c2308c742a655f4598364ab331959639aae166 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:18 +0100 Subject: [PATCH 31/83] bpf: verifier: randomize high 32-bit when BPF_F_TEST_RND_HI32 is set This patch randomizes high 32-bit of a definition when BPF_F_TEST_RND_HI32 is set. Suggested-by: Alexei Starovoitov Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 68 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d4394a84b9eb..2778417e6e0c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7640,33 +7640,79 @@ static int opt_remove_nops(struct bpf_verifier_env *env) return 0; } -static int opt_subreg_zext_lo32(struct bpf_verifier_env *env) +static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, + const union bpf_attr *attr) { + struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; struct bpf_insn_aux_data *aux = env->insn_aux_data; + int i, patch_len, delta = 0, len = env->prog->len; struct bpf_insn *insns = env->prog->insnsi; - int i, delta = 0, len = env->prog->len; - struct bpf_insn zext_patch[2]; struct bpf_prog *new_prog; + bool rnd_hi32; + rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; zext_patch[1] = BPF_ZEXT_REG(0); + rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); + rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); + rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); for (i = 0; i < len; i++) { int adj_idx = i + delta; struct bpf_insn insn; - if (!aux[adj_idx].zext_dst) + insn = insns[adj_idx]; + if (!aux[adj_idx].zext_dst) { + u8 code, class; + u32 imm_rnd; + + if (!rnd_hi32) + continue; + + code = insn.code; + class = BPF_CLASS(code); + if (insn_no_def(&insn)) + continue; + + /* NOTE: arg "reg" (the fourth one) is only used for + * BPF_STX which has been ruled out in above + * check, it is safe to pass NULL here. + */ + if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { + if (class == BPF_LD && + BPF_MODE(code) == BPF_IMM) + i++; + continue; + } + + /* ctx load could be transformed into wider load. */ + if (class == BPF_LDX && + aux[adj_idx].ptr_type == PTR_TO_CTX) + continue; + + imm_rnd = get_random_int(); + rnd_hi32_patch[0] = insn; + rnd_hi32_patch[1].imm = imm_rnd; + rnd_hi32_patch[3].dst_reg = insn.dst_reg; + patch = rnd_hi32_patch; + patch_len = 4; + goto apply_patch_buffer; + } + + if (!bpf_jit_needs_zext()) continue; - insn = insns[adj_idx]; zext_patch[0] = insn; zext_patch[1].dst_reg = insn.dst_reg; zext_patch[1].src_reg = insn.dst_reg; - new_prog = bpf_patch_insn_data(env, adj_idx, zext_patch, 2); + patch = zext_patch; + patch_len = 2; +apply_patch_buffer: + new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); if (!new_prog) return -ENOMEM; env->prog = new_prog; insns = new_prog->insnsi; aux = env->insn_aux_data; - delta += 2; + delta += patch_len - 1; } return 0; @@ -8525,10 +8571,10 @@ skip_full_check: /* do 32-bit optimization after insn patching has done so those patched * insns could be handled correctly. */ - if (ret == 0 && bpf_jit_needs_zext() && - !bpf_prog_is_dev_bound(env->prog->aux)) { - ret = opt_subreg_zext_lo32(env); - env->prog->aux->verifier_zext = !ret; + if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { + ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); + env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret + : false; } if (ret == 0) From 046561981b948e07df096a8402f9efc80bc784d9 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:19 +0100 Subject: [PATCH 32/83] libbpf: add "prog_flags" to bpf_program/bpf_prog_load_attr/bpf_load_program_attr libbpf doesn't allow passing "prog_flags" during bpf program load in a couple of load related APIs, "bpf_load_program_xattr", "load_program" and "bpf_prog_load_xattr". It makes sense to allow passing "prog_flags" which is useful for customizing program loading. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 1 + tools/lib/bpf/libbpf.c | 3 +++ tools/lib/bpf/libbpf.h | 1 + 4 files changed, 6 insertions(+) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index c4a48086dc9a..0d4b4fe10a84 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -256,6 +256,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, if (load_attr->name) memcpy(attr.prog_name, load_attr->name, min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1)); + attr.prog_flags = load_attr->prog_flags; fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 9593fec75652..ff42ca043dc8 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -87,6 +87,7 @@ struct bpf_load_program_attr { const void *line_info; __u32 line_info_cnt; __u32 log_level; + __u32 prog_flags; }; /* Flags to direct loading requirements */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 197b574406b3..ff149372b3c0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -188,6 +188,7 @@ struct bpf_program { void *line_info; __u32 line_info_rec_size; __u32 line_info_cnt; + __u32 prog_flags; }; enum libbpf_map_type { @@ -2076,6 +2077,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_rec_size = prog->line_info_rec_size; load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; + load_attr.prog_flags = prog->prog_flags; if (!load_attr.insns || !load_attr.insns_cnt) return -EINVAL; @@ -3521,6 +3523,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, expected_attach_type); prog->log_level = attr->log_level; + prog->prog_flags = attr->prog_flags; if (!first_prog) first_prog = prog; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c5ff00515ce7..5abc2375defd 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -320,6 +320,7 @@ struct bpf_prog_load_attr { enum bpf_attach_type expected_attach_type; int ifindex; int log_level; + int prog_flags; }; LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, From f3b55abb6d5a522228e136c3bc4a9a716d5d8a54 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:20 +0100 Subject: [PATCH 33/83] selftests: bpf: adjust several test_verifier helpers for insn insertion - bpf_fill_ld_abs_vlan_push_pop: Prevent zext happens inside PUSH_CNT loop. This could happen because of BPF_LD_ABS (32-bit def) + BPF_JMP (64-bit use), or BPF_LD_ABS + EXIT (64-bit use of R0). So, change BPF_JMP to BPF_JMP32 and redefine R0 at exit path to cut off the data-flow from inside the loop. - bpf_fill_jump_around_ld_abs: Jump range is limited to 16 bit. every ld_abs is replaced by 6 insns, but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted to extend the error value of the inlined ld_abs sequence which then contains 7 insns. so, set the dividend to 7 so the testcase could work on all arches. - bpf_fill_scale1/bpf_fill_scale2: Both contains ~1M BPF_ALU32_IMM which will trigger ~1M insn patcher call because of hi32 randomization later when BPF_F_TEST_RND_HI32 is set for bpf selftests. Insn patcher is not efficient that 1M call to it will hang computer. So , change to BPF_ALU64_IMM to avoid hi32 randomization. Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_verifier.c | 29 ++++++++++++++------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 6e2fec84c929..fa9b5bfe5d9f 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -138,32 +138,36 @@ static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) loop: for (j = 0; j < PUSH_CNT; j++) { insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2); + /* jump to error label */ + insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); i++; insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1); insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2); insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2); + insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); i++; } for (j = 0; j < PUSH_CNT; j++) { insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2); + insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); i++; insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_pop), - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2); + insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); i++; } if (++k < 5) goto loop; - for (; i < len - 1; i++) - insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef); + for (; i < len - 3; i++) + insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef); + insn[len - 3] = BPF_JMP_A(1); + /* error label */ + insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0); insn[len - 1] = BPF_EXIT_INSN(); self->prog_len = len; } @@ -171,8 +175,13 @@ loop: static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) { struct bpf_insn *insn = self->fill_insns; - /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */ - unsigned int len = (1 << 15) / 6; + /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns, + * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted + * to extend the error value of the inlined ld_abs sequence which then + * contains 7 insns. so, set the dividend to 7 so the testcase could + * work on all arches. + */ + unsigned int len = (1 << 15) / 7; int i = 0; insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); @@ -232,7 +241,7 @@ static void bpf_fill_scale1(struct bpf_test *self) * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT */ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1) - insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); + insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); insn[i] = BPF_EXIT_INSN(); self->prog_len = i + 1; self->retval = 42; @@ -264,7 +273,7 @@ static void bpf_fill_scale2(struct bpf_test *self) * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT */ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1) - insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); + insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); insn[i] = BPF_EXIT_INSN(); self->prog_len = i + 1; self->retval = 42; From 9d120b4127e8df04ee665241916056b0156b37c7 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:21 +0100 Subject: [PATCH 34/83] selftests: bpf: enable hi32 randomization for all tests The previous libbpf patch allows user to specify "prog_flags" to bpf program load APIs. To enable high 32-bit randomization for a test, we need to set BPF_F_TEST_RND_HI32 in "prog_flags". To enable such randomization for all tests, we need to make sure all places are passing BPF_F_TEST_RND_HI32. Changing them one by one is not convenient, also, it would be better if a test could be switched to "normal" running mode without code change. Given the program load APIs used across bpf selftests are mostly: bpf_prog_load: load from file bpf_load_program: load from raw insns A test_stub.c is implemented for bpf seltests, it offers two functions for testing purpose: bpf_prog_test_load bpf_test_load_program The are the same as "bpf_prog_load" and "bpf_load_program", except they also set BPF_F_TEST_RND_HI32. Given *_xattr functions are the APIs to customize any "prog_flags", it makes little sense to put these two functions into libbpf. Then, the following CFLAGS are passed to compilations for host programs: -Dbpf_prog_load=bpf_prog_test_load -Dbpf_load_program=bpf_test_load_program They migrate the used load APIs to the test version, hence enable high 32-bit randomization for these tests without changing source code. Besides all these, there are several testcases are using "bpf_prog_load_attr" directly, their call sites are updated to pass BPF_F_TEST_RND_HI32. Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 10 +++-- .../bpf/prog_tests/bpf_verif_scale.c | 1 + tools/testing/selftests/bpf/test_sock_addr.c | 1 + .../testing/selftests/bpf/test_sock_fields.c | 1 + .../selftests/bpf/test_socket_cookie.c | 1 + tools/testing/selftests/bpf/test_stub.c | 40 +++++++++++++++++++ tools/testing/selftests/bpf/test_verifier.c | 2 +- 7 files changed, 51 insertions(+), 5 deletions(-) create mode 100644 tools/testing/selftests/bpf/test_stub.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index cd23758e8b7a..fa002da36d0d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -15,7 +15,9 @@ LLC ?= llc LLVM_OBJCOPY ?= llvm-objcopy LLVM_READELF ?= llvm-readelf BTF_PAHOLE ?= pahole -CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include +CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include \ + -Dbpf_prog_load=bpf_prog_test_load \ + -Dbpf_load_program=bpf_test_load_program LDLIBS += -lcap -lelf -lrt -lpthread # Order correspond to 'make run_tests' order @@ -79,9 +81,9 @@ $(OUTPUT)/test_maps: map_tests/*.c BPFOBJ := $(OUTPUT)/libbpf.a -$(TEST_GEN_PROGS): $(BPFOBJ) +$(TEST_GEN_PROGS): test_stub.o $(BPFOBJ) -$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a +$(TEST_GEN_PROGS_EXTENDED): test_stub.o $(OUTPUT)/libbpf.a $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c $(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c @@ -177,7 +179,7 @@ $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\ $(ALU32_BUILD_DIR)/urandom_read $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \ -o $(ALU32_BUILD_DIR)/test_progs_32 \ - test_progs.c trace_helpers.c prog_tests/*.c \ + test_progs.c test_stub.c trace_helpers.c prog_tests/*.c \ $(OUTPUT)/libbpf.a $(LDLIBS) $(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index ce03289c9077..c0091137074b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -22,6 +22,7 @@ static int check_load(const char *file, enum bpf_prog_type type) attr.file = file; attr.prog_type = type; attr.log_level = 4; + attr.prog_flags = BPF_F_TEST_RND_HI32; err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); bpf_object__close(obj); if (err) diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 3f110eaaf29c..5d0c4f0baeff 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -745,6 +745,7 @@ static int load_path(const struct sock_addr_test *test, const char *path) attr.file = path; attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; attr.expected_attach_type = test->expected_attach_type; + attr.prog_flags = BPF_F_TEST_RND_HI32; if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { if (test->expected_result != LOAD_REJECT) diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c index e089477fa0a3..f0fc103261a4 100644 --- a/tools/testing/selftests/bpf/test_sock_fields.c +++ b/tools/testing/selftests/bpf/test_sock_fields.c @@ -414,6 +414,7 @@ int main(int argc, char **argv) struct bpf_prog_load_attr attr = { .file = "test_sock_fields_kern.o", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .prog_flags = BPF_F_TEST_RND_HI32, }; int cgroup_fd, egress_fd, ingress_fd, err; struct bpf_program *ingress_prog; diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index e51d63786ff8..cac8ee57a013 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c @@ -148,6 +148,7 @@ static int run_test(int cgfd) memset(&attr, 0, sizeof(attr)); attr.file = SOCKET_COOKIE_PROG; attr.prog_type = BPF_PROG_TYPE_UNSPEC; + attr.prog_flags = BPF_F_TEST_RND_HI32; err = bpf_prog_load_xattr(&attr, &pobj, &prog_fd); if (err) { diff --git a/tools/testing/selftests/bpf/test_stub.c b/tools/testing/selftests/bpf/test_stub.c new file mode 100644 index 000000000000..84e81a89e2f9 --- /dev/null +++ b/tools/testing/selftests/bpf/test_stub.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#include +#include +#include + +int bpf_prog_test_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_prog_load_attr attr; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = file; + attr.prog_type = type; + attr.expected_attach_type = 0; + attr.prog_flags = BPF_F_TEST_RND_HI32; + + return bpf_prog_load_xattr(&attr, pobj, prog_fd); +} + +int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz) +{ + struct bpf_load_program_attr load_attr; + + memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); + load_attr.prog_type = type; + load_attr.expected_attach_type = 0; + load_attr.name = NULL; + load_attr.insns = insns; + load_attr.insns_cnt = insns_cnt; + load_attr.license = license; + load_attr.kern_version = kern_version; + load_attr.prog_flags = BPF_F_TEST_RND_HI32; + + return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); +} diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index fa9b5bfe5d9f..cd0248c54e25 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -882,7 +882,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, if (fixup_skips != skips) return; - pflags = 0; + pflags = BPF_F_TEST_RND_HI32; if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) pflags |= BPF_F_STRICT_ALIGNMENT; if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) From 163541e6ba3426b21884af5a2f498c9d2ce77f00 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:22 +0100 Subject: [PATCH 35/83] arm: bpf: eliminate zero extension code-gen Cc: Shubham Bansal Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/arm/net/bpf_jit_32.c | 42 +++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c8bfbbfdfcc3..97a6b4b2a115 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -736,7 +736,8 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], /* ALU operation */ emit_alu_r(rd[1], rs, true, false, op, ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); } arm_bpf_put_reg64(dst, rd, ctx); @@ -758,8 +759,9 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], struct jit_ctx *ctx) { if (!is64) { emit_a32_mov_r(dst_lo, src_lo, ctx); - /* Zero out high 4 bytes */ - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + /* Zero out high 4 bytes */ + emit_a32_mov_i(dst_hi, 0, ctx); } else if (__LINUX_ARM_ARCH__ < 6 && ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { /* complete 8 byte move */ @@ -1060,17 +1062,20 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, case BPF_B: /* Load a Byte */ emit(ARM_LDRB_I(rd[1], rm, off), ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_H: /* Load a HalfWord */ emit(ARM_LDRH_I(rd[1], rm, off), ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_W: /* Load a Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_DW: /* Load a Double Word */ @@ -1359,6 +1364,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_MOV | BPF_X: switch (BPF_SRC(code)) { case BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ + emit_a32_mov_i(dst_hi, 0, ctx); + break; + } emit_a32_mov_r64(is64, dst, src, ctx); break; case BPF_K: @@ -1438,7 +1448,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) } emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); arm_bpf_put_reg32(dst_lo, rd_lo, ctx); - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_X: @@ -1453,7 +1464,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) return -EINVAL; if (imm) emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: @@ -1488,7 +1500,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = ~dst */ case BPF_ALU | BPF_NEG: emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = ~dst (64 bit) */ case BPF_ALU64 | BPF_NEG: @@ -1544,11 +1557,13 @@ emit_bswap_uxt: #else /* ARMv6+ */ emit(ARM_UXTH(rd[1], rd[1]), ctx); #endif - emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 32: /* zero-extend 32 bits into 64 bits */ - emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 64: /* nop */ @@ -1838,6 +1853,11 @@ void bpf_jit_compile(struct bpf_prog *prog) /* Nothing to do here. We support Internal BPF. */ } +bool bpf_jit_needs_zext(void) +{ + return true; +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; From a4c927733e74bb39f9964d8f451e717f8cf6d6cf Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:23 +0100 Subject: [PATCH 36/83] powerpc: bpf: eliminate zero extension code-gen Cc: Naveen N. Rao Cc: Sandipan Das Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/powerpc/net/bpf_jit_comp64.c | 36 ++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 21a1dcd4b156..0ebd946f178b 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -504,6 +504,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ /* slw clears top 32 bits */ PPC_SLW(dst_reg, dst_reg, src_reg); + /* skip zero extension move, but set address map. */ + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ PPC_SLD(dst_reg, dst_reg, src_reg); @@ -511,6 +514,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */ /* with imm 0, we still need to clear top 32 bits */ PPC_SLWI(dst_reg, dst_reg, imm); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */ if (imm != 0) @@ -518,12 +523,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ PPC_SRW(dst_reg, dst_reg, src_reg); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ PPC_SRD(dst_reg, dst_reg, src_reg); break; case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ PPC_SRWI(dst_reg, dst_reg, imm); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ if (imm != 0) @@ -548,6 +557,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, */ case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ + if (imm == 1) { + /* special mov32 for zext */ + PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31); + break; + } PPC_MR(dst_reg, src_reg); goto bpf_alu32_trunc; case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ @@ -555,11 +569,13 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LI32(dst_reg, imm); if (imm < 0) goto bpf_alu32_trunc; + else if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; bpf_alu32_trunc: /* Truncate to 32-bits */ - if (BPF_CLASS(code) == BPF_ALU) + if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext) PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31); break; @@ -618,10 +634,13 @@ emit_clear: case 16: /* zero-extend 16 bits into 64 bits */ PPC_RLDICL(dst_reg, dst_reg, 0, 48); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; case 32: - /* zero-extend 32 bits into 64 bits */ - PPC_RLDICL(dst_reg, dst_reg, 0, 32); + if (!fp->aux->verifier_zext) + /* zero-extend 32 bits into 64 bits */ + PPC_RLDICL(dst_reg, dst_reg, 0, 32); break; case 64: /* nop */ @@ -698,14 +717,20 @@ emit_clear: /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_B: PPC_LBZ(dst_reg, src_reg, off); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_H: PPC_LHZ(dst_reg, src_reg, off); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_W: PPC_LWZ(dst_reg, src_reg, off); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; break; /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_DW: @@ -1046,6 +1071,11 @@ struct powerpc64_jit_data { struct codegen_context ctx; }; +bool bpf_jit_needs_zext(void) +{ + return true; +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { u32 proglen; From 591006b9e754ca723e3deabeccdf56eb531f0b94 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:24 +0100 Subject: [PATCH 37/83] s390: bpf: eliminate zero extension code-gen Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 41 ++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 5e7c63033159..e636728ab452 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -299,9 +299,11 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT_ZERO(b1) \ ({ \ - /* llgfr %dst,%dst (zero extend to 64 bit) */ \ - EMIT4(0xb9160000, b1, b1); \ - REG_SET_SEEN(b1); \ + if (!fp->aux->verifier_zext) { \ + /* llgfr %dst,%dst (zero extend to 64 bit) */ \ + EMIT4(0xb9160000, b1, b1); \ + REG_SET_SEEN(b1); \ + } \ }) /* @@ -520,6 +522,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */ /* llgfr %dst,%src */ EMIT4(0xb9160000, dst_reg, src_reg); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ /* lgr %dst,%src */ @@ -528,6 +532,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */ /* llilf %dst,imm */ EMIT6_IMM(0xc00f0000, dst_reg, imm); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */ /* lgfi %dst,imm */ @@ -639,6 +645,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9970000, REG_W0, src_reg); /* llgfr %dst,%rc */ EMIT4(0xb9160000, dst_reg, rc_reg); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; } case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */ @@ -676,6 +684,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT_CONST_U32(imm)); /* llgfr %dst,%rc */ EMIT4(0xb9160000, dst_reg, rc_reg); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; } case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */ @@ -864,10 +874,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case 16: /* dst = (u16) cpu_to_be16(dst) */ /* llghr %dst,%dst */ EMIT4(0xb9850000, dst_reg, dst_reg); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case 32: /* dst = (u32) cpu_to_be32(dst) */ - /* llgfr %dst,%dst */ - EMIT4(0xb9160000, dst_reg, dst_reg); + if (!fp->aux->verifier_zext) + /* llgfr %dst,%dst */ + EMIT4(0xb9160000, dst_reg, dst_reg); break; case 64: /* dst = (u64) cpu_to_be64(dst) */ break; @@ -882,12 +895,15 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4_DISP(0x88000000, dst_reg, REG_0, 16); /* llghr %dst,%dst */ EMIT4(0xb9850000, dst_reg, dst_reg); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case 32: /* dst = (u32) cpu_to_le32(dst) */ /* lrvr %dst,%dst */ EMIT4(0xb91f0000, dst_reg, dst_reg); - /* llgfr %dst,%dst */ - EMIT4(0xb9160000, dst_reg, dst_reg); + if (!fp->aux->verifier_zext) + /* llgfr %dst,%dst */ + EMIT4(0xb9160000, dst_reg, dst_reg); break; case 64: /* dst = (u64) cpu_to_le64(dst) */ /* lrvgr %dst,%dst */ @@ -968,16 +984,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* llgc %dst,0(off,%src) */ EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off); jit->seen |= SEEN_MEM; + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ /* llgh %dst,0(off,%src) */ EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off); jit->seen |= SEEN_MEM; + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ /* llgf %dst,off(%src) */ jit->seen |= SEEN_MEM; EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off); + if (insn_is_zext(&insn[1])) + insn_count = 2; break; case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ /* lg %dst,0(off,%src) */ @@ -1282,6 +1304,11 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) return 0; } +bool bpf_jit_needs_zext(void) +{ + return true; +} + /* * Compile eBPF program "fp" */ From 3e2a33cf7e68b95c3afe11e967769341f6dc987d Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:25 +0100 Subject: [PATCH 38/83] sparc: bpf: eliminate zero extension code-gen Cc: David S. Miller Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/sparc/net/bpf_jit_comp_64.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 65428e79b2f3..3364e2a00989 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -908,6 +908,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: emit_alu3_K(SRL, src, 0, dst, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_ALU64 | BPF_MOV | BPF_X: emit_reg_move(src, dst, ctx); @@ -942,6 +944,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_DIV | BPF_X: emit_write_y(G0, ctx); emit_alu(DIV, src, dst, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_ALU64 | BPF_DIV | BPF_X: emit_alu(UDIVX, src, dst, ctx); @@ -975,6 +979,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; case BPF_ALU | BPF_RSH | BPF_X: emit_alu(SRL, src, dst, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_ALU64 | BPF_RSH | BPF_X: emit_alu(SRLX, src, dst, ctx); @@ -997,9 +1003,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case 16: emit_alu_K(SLL, dst, 16, ctx); emit_alu_K(SRL, dst, 16, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case 32: - emit_alu_K(SRL, dst, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_alu_K(SRL, dst, 0, ctx); break; case 64: /* nop */ @@ -1021,6 +1030,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit_alu3_K(AND, dst, 0xff, dst, ctx); emit_alu3_K(SLL, tmp, 8, tmp, ctx); emit_alu(OR, tmp, dst, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case 32: @@ -1037,6 +1048,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit_alu3_K(AND, dst, 0xff, dst, ctx); /* dst = dst & 0xff */ emit_alu3_K(SLL, dst, 24, dst, ctx); /* dst = dst << 24 */ emit_alu(OR, tmp, dst, ctx); /* dst = dst | tmp */ + if (insn_is_zext(&insn[1])) + return 1; break; case 64: @@ -1050,6 +1063,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = imm */ case BPF_ALU | BPF_MOV | BPF_K: emit_loadimm32(imm, dst, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_ALU64 | BPF_MOV | BPF_K: emit_loadimm_sext(imm, dst, ctx); @@ -1132,6 +1147,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; case BPF_ALU | BPF_RSH | BPF_K: emit_alu_K(SRL, dst, imm, ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_ALU64 | BPF_RSH | BPF_K: emit_alu_K(SRLX, dst, imm, ctx); @@ -1144,7 +1161,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; do_alu32_trunc: - if (BPF_CLASS(code) == BPF_ALU) + if (BPF_CLASS(code) == BPF_ALU && + !ctx->prog->aux->verifier_zext) emit_alu_K(SRL, dst, 0, ctx); break; @@ -1265,6 +1283,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) rs2 = RS2(tmp); } emit(opcode | RS1(src) | rs2 | RD(dst), ctx); + if (opcode != LD64 && insn_is_zext(&insn[1])) + return 1; break; } /* ST: *(size *)(dst + off) = imm */ @@ -1432,6 +1452,11 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = 0x91d02005; /* ta 5 */ } +bool bpf_jit_needs_zext(void) +{ + return true; +} + struct sparc64_jit_data { struct bpf_binary_header *header; u8 *image; From 836256bf5f379c298ffa99f62e329d9f40f060ac Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:26 +0100 Subject: [PATCH 39/83] x32: bpf: eliminate zero extension code-gen Cc: Wang YanQing Tested-by: Wang YanQing Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp32.c | 83 +++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 27 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index b29e82f190c7..133433d181ba 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -253,13 +253,14 @@ static inline void emit_ia32_mov_r(const u8 dst, const u8 src, bool dstk, /* dst = src */ static inline void emit_ia32_mov_r64(const bool is64, const u8 dst[], const u8 src[], bool dstk, - bool sstk, u8 **pprog) + bool sstk, u8 **pprog, + const struct bpf_prog_aux *aux) { emit_ia32_mov_r(dst_lo, src_lo, dstk, sstk, pprog); if (is64) /* complete 8 byte move */ emit_ia32_mov_r(dst_hi, src_hi, dstk, sstk, pprog); - else + else if (!aux->verifier_zext) /* zero out high 4 bytes */ emit_ia32_mov_i(dst_hi, 0, dstk, pprog); } @@ -313,7 +314,8 @@ static inline void emit_ia32_mul_r(const u8 dst, const u8 src, bool dstk, } static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val, - bool dstk, u8 **pprog) + bool dstk, u8 **pprog, + const struct bpf_prog_aux *aux) { u8 *prog = *pprog; int cnt = 0; @@ -334,12 +336,14 @@ static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val, */ EMIT2(0x0F, 0xB7); EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo)); - /* xor dreg_hi,dreg_hi */ - EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + if (!aux->verifier_zext) + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); break; case 32: - /* xor dreg_hi,dreg_hi */ - EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + if (!aux->verifier_zext) + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); break; case 64: /* nop */ @@ -358,7 +362,8 @@ static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val, } static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val, - bool dstk, u8 **pprog) + bool dstk, u8 **pprog, + const struct bpf_prog_aux *aux) { u8 *prog = *pprog; int cnt = 0; @@ -380,16 +385,18 @@ static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val, EMIT2(0x0F, 0xB7); EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo)); - /* xor dreg_hi,dreg_hi */ - EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + if (!aux->verifier_zext) + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); break; case 32: /* Emit 'bswap eax' to swap lower 4 bytes */ EMIT1(0x0F); EMIT1(add_1reg(0xC8, dreg_lo)); - /* xor dreg_hi,dreg_hi */ - EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + if (!aux->verifier_zext) + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); break; case 64: /* Emit 'bswap eax' to swap lower 4 bytes */ @@ -569,7 +576,7 @@ static inline void emit_ia32_alu_r(const bool is64, const bool hi, const u8 op, static inline void emit_ia32_alu_r64(const bool is64, const u8 op, const u8 dst[], const u8 src[], bool dstk, bool sstk, - u8 **pprog) + u8 **pprog, const struct bpf_prog_aux *aux) { u8 *prog = *pprog; @@ -577,7 +584,7 @@ static inline void emit_ia32_alu_r64(const bool is64, const u8 op, if (is64) emit_ia32_alu_r(is64, true, op, dst_hi, src_hi, dstk, sstk, &prog); - else + else if (!aux->verifier_zext) emit_ia32_mov_i(dst_hi, 0, dstk, &prog); *pprog = prog; } @@ -668,7 +675,8 @@ static inline void emit_ia32_alu_i(const bool is64, const bool hi, const u8 op, /* ALU operation (64 bit) */ static inline void emit_ia32_alu_i64(const bool is64, const u8 op, const u8 dst[], const u32 val, - bool dstk, u8 **pprog) + bool dstk, u8 **pprog, + const struct bpf_prog_aux *aux) { u8 *prog = *pprog; u32 hi = 0; @@ -679,7 +687,7 @@ static inline void emit_ia32_alu_i64(const bool is64, const u8 op, emit_ia32_alu_i(is64, false, op, dst_lo, val, dstk, &prog); if (is64) emit_ia32_alu_i(is64, true, op, dst_hi, hi, dstk, &prog); - else + else if (!aux->verifier_zext) emit_ia32_mov_i(dst_hi, 0, dstk, &prog); *pprog = prog; @@ -1713,8 +1721,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_MOV | BPF_X: switch (BPF_SRC(code)) { case BPF_X: - emit_ia32_mov_r64(is64, dst, src, dstk, - sstk, &prog); + if (imm32 == 1) { + /* Special mov32 for zext. */ + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + break; + } + emit_ia32_mov_r64(is64, dst, src, dstk, sstk, + &prog, bpf_prog->aux); break; case BPF_K: /* Sign-extend immediate value to dst reg */ @@ -1754,11 +1767,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, switch (BPF_SRC(code)) { case BPF_X: emit_ia32_alu_r64(is64, BPF_OP(code), dst, - src, dstk, sstk, &prog); + src, dstk, sstk, &prog, + bpf_prog->aux); break; case BPF_K: emit_ia32_alu_i64(is64, BPF_OP(code), dst, - imm32, dstk, &prog); + imm32, dstk, &prog, + bpf_prog->aux); break; } break; @@ -1777,7 +1792,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, false, &prog); break; } - emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + if (!bpf_prog->aux->verifier_zext) + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); break; case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_X: @@ -1797,7 +1813,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, &prog); break; } - emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + if (!bpf_prog->aux->verifier_zext) + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); break; /* dst = dst / src(imm) */ /* dst = dst % src(imm) */ @@ -1819,7 +1836,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, &prog); break; } - emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + if (!bpf_prog->aux->verifier_zext) + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); break; case BPF_ALU64 | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_X: @@ -1836,7 +1854,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); emit_ia32_shift_r(BPF_OP(code), dst_lo, IA32_ECX, dstk, false, &prog); - emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + if (!bpf_prog->aux->verifier_zext) + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); break; /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: @@ -1872,7 +1891,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU | BPF_NEG: emit_ia32_alu_i(is64, false, BPF_OP(code), dst_lo, 0, dstk, &prog); - emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + if (!bpf_prog->aux->verifier_zext) + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); break; /* dst = ~dst (64 bit) */ case BPF_ALU64 | BPF_NEG: @@ -1892,11 +1912,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, break; /* dst = htole(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: - emit_ia32_to_le_r64(dst, imm32, dstk, &prog); + emit_ia32_to_le_r64(dst, imm32, dstk, &prog, + bpf_prog->aux); break; /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_BE: - emit_ia32_to_be_r64(dst, imm32, dstk, &prog); + emit_ia32_to_be_r64(dst, imm32, dstk, &prog, + bpf_prog->aux); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: { @@ -2051,6 +2073,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_B: case BPF_H: case BPF_W: + if (!bpf_prog->aux->verifier_zext) + break; if (dstk) { EMIT3(0xC7, add_1reg(0x40, IA32_EBP), STACK_VAR(dst_hi)); @@ -2475,6 +2499,11 @@ notyet: return proglen; } +bool bpf_jit_needs_zext(void) +{ + return true; +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header = NULL; From 66d0d5a854a6625974e7de4b874e7934988b0ef8 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:27 +0100 Subject: [PATCH 40/83] riscv: bpf: eliminate zero extension code-gen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Björn Töpel Acked-by: Björn Töpel Tested-by: Björn Töpel Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit_comp.c | 43 ++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index 80b12aa5e10d..c4c836e3d318 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -731,6 +731,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || BPF_CLASS(insn->code) == BPF_JMP; + struct bpf_prog_aux *aux = ctx->prog->aux; int rvoff, i = insn - ctx->prog->insnsi; u8 rd = -1, rs = -1, code = insn->code; s16 off = insn->off; @@ -742,8 +743,13 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: case BPF_ALU64 | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ + emit_zext_32(rd, ctx); + break; + } emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -771,19 +777,19 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_X: emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_X: emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_LSH | BPF_X: @@ -867,7 +873,7 @@ out_be: case BPF_ALU | BPF_MOV | BPF_K: case BPF_ALU64 | BPF_MOV | BPF_K: emit_imm(rd, imm, ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -882,7 +888,7 @@ out_be: emit(is64 ? rv_add(rd, rd, RV_REG_T1) : rv_addw(rd, rd, RV_REG_T1), ctx); } - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_SUB | BPF_K: @@ -895,7 +901,7 @@ out_be: emit(is64 ? rv_sub(rd, rd, RV_REG_T1) : rv_subw(rd, rd, RV_REG_T1), ctx); } - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_AND | BPF_K: @@ -906,7 +912,7 @@ out_be: emit_imm(RV_REG_T1, imm, ctx); emit(rv_and(rd, rd, RV_REG_T1), ctx); } - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_OR | BPF_K: @@ -917,7 +923,7 @@ out_be: emit_imm(RV_REG_T1, imm, ctx); emit(rv_or(rd, rd, RV_REG_T1), ctx); } - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_XOR | BPF_K: @@ -928,7 +934,7 @@ out_be: emit_imm(RV_REG_T1, imm, ctx); emit(rv_xor(rd, rd, RV_REG_T1), ctx); } - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_MUL | BPF_K: @@ -936,7 +942,7 @@ out_be: emit_imm(RV_REG_T1, imm, ctx); emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : rv_mulw(rd, rd, RV_REG_T1), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_DIV | BPF_K: @@ -944,7 +950,7 @@ out_be: emit_imm(RV_REG_T1, imm, ctx); emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : rv_divuw(rd, rd, RV_REG_T1), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_MOD | BPF_K: @@ -952,7 +958,7 @@ out_be: emit_imm(RV_REG_T1, imm, ctx); emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : rv_remuw(rd, rd, RV_REG_T1), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_LSH | BPF_K: @@ -1239,6 +1245,8 @@ out_be: emit_imm(RV_REG_T1, off, ctx); emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_LDX | BPF_MEM | BPF_H: if (is_12b_int(off)) { @@ -1249,6 +1257,8 @@ out_be: emit_imm(RV_REG_T1, off, ctx); emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_LDX | BPF_MEM | BPF_W: if (is_12b_int(off)) { @@ -1259,6 +1269,8 @@ out_be: emit_imm(RV_REG_T1, off, ctx); emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + if (insn_is_zext(&insn[1])) + return 1; break; case BPF_LDX | BPF_MEM | BPF_DW: if (is_12b_int(off)) { @@ -1503,6 +1515,11 @@ static void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } +bool bpf_jit_needs_zext(void) +{ + return true; +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { bool tmp_blinded = false, extra_pass = false; From 0b4de1ff19bf878eb38f4f668ee15c9b9eed4240 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:28 +0100 Subject: [PATCH 41/83] nfp: bpf: eliminate zero extension code-gen This patch eliminate zero extension code-gen for instructions including both alu and load/store. The only exception is for ctx load, because offload target doesn't go through host ctx convert logic so we do customized load and ignores zext flag set by verifier. Cc: Jakub Kicinski Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 115 ++++++++++-------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 2 + .../net/ethernet/netronome/nfp/bpf/verifier.c | 12 ++ 3 files changed, 81 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index d4bf0e694541..4054b70d7719 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -622,6 +622,13 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) } } +static void +wrp_zext(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst) +{ + if (meta->flags & FLAG_INSN_DO_ZEXT) + wrp_immed(nfp_prog, reg_both(dst + 1), 0); +} + static void wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, enum nfp_relo_type relo) @@ -858,7 +865,8 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } static int -data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) +data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg offset, + u8 dst_gpr, int size) { unsigned int i; u16 shift, sz; @@ -881,14 +889,15 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); if (i < 2) - wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); + wrp_zext(nfp_prog, meta, dst_gpr); return 0; } static int -data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, - swreg lreg, swreg rreg, int size, enum cmd_mode mode) +data_ld_host_order(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u8 dst_gpr, swreg lreg, swreg rreg, int size, + enum cmd_mode mode) { unsigned int i; u8 mask, sz; @@ -911,33 +920,34 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); if (i < 2) - wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); + wrp_zext(nfp_prog, meta, dst_gpr); return 0; } static int -data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, - u8 dst_gpr, u8 size) +data_ld_host_order_addr32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u8 src_gpr, swreg offset, u8 dst_gpr, u8 size) { - return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, - size, CMD_MODE_32b); + return data_ld_host_order(nfp_prog, meta, dst_gpr, reg_a(src_gpr), + offset, size, CMD_MODE_32b); } static int -data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, - u8 dst_gpr, u8 size) +data_ld_host_order_addr40(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u8 src_gpr, swreg offset, u8 dst_gpr, u8 size) { swreg rega, regb; addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); - return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, + return data_ld_host_order(nfp_prog, meta, dst_gpr, rega, regb, size, CMD_MODE_40b_BA); } static int -construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) +construct_data_ind_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u16 offset, u16 src, u8 size) { swreg tmp_reg; @@ -953,10 +963,12 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); /* Load data */ - return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); + return data_ld(nfp_prog, meta, imm_b(nfp_prog), 0, size); } -static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) +static int +construct_data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u16 offset, u8 size) { swreg tmp_reg; @@ -967,7 +979,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) /* Load data */ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); - return data_ld(nfp_prog, tmp_reg, 0, size); + return data_ld(nfp_prog, meta, tmp_reg, 0, size); } static int @@ -1204,7 +1216,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } if (clr_gpr && size < 8) - wrp_immed(nfp_prog, reg_both(gpr + 1), 0); + wrp_zext(nfp_prog, meta, gpr); while (size) { u32 slice_end; @@ -1305,9 +1317,10 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, enum alu_op alu_op) { const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; - wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); - wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); + wrp_alu_imm(nfp_prog, dst, alu_op, insn->imm); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -1319,7 +1332,7 @@ wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); - wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -2396,12 +2409,14 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst = meta->insn.dst_reg * 2; emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); - wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } -static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +static int +__ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst, + u8 shift_amt) { if (shift_amt) { /* Set signedness bit (MSB of result). */ @@ -2410,7 +2425,7 @@ static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), SHF_SC_R_SHF, shift_amt); } - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -2425,7 +2440,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) umin = meta->umin_src; umax = meta->umax_src; if (umin == umax) - return __ashr_imm(nfp_prog, dst, umin); + return __ashr_imm(nfp_prog, meta, dst, umin); src = insn->src_reg * 2; /* NOTE: the first insn will set both indirect shift amount (source A) @@ -2434,7 +2449,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), SHF_SC_R_SHF); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -2444,15 +2459,17 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; u8 dst = insn->dst_reg * 2; - return __ashr_imm(nfp_prog, dst, insn->imm); + return __ashr_imm(nfp_prog, meta, dst, insn->imm); } -static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +static int +__shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst, + u8 shift_amt) { if (shift_amt) emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, reg_b(dst), SHF_SC_R_SHF, shift_amt); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -2461,7 +2478,7 @@ static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; u8 dst = insn->dst_reg * 2; - return __shr_imm(nfp_prog, dst, insn->imm); + return __shr_imm(nfp_prog, meta, dst, insn->imm); } static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2474,22 +2491,24 @@ static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) umin = meta->umin_src; umax = meta->umax_src; if (umin == umax) - return __shr_imm(nfp_prog, dst, umin); + return __shr_imm(nfp_prog, meta, dst, umin); src = insn->src_reg * 2; emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, reg_b(dst), SHF_SC_R_SHF); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } -static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +static int +__shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst, + u8 shift_amt) { if (shift_amt) emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, reg_b(dst), SHF_SC_L_SHF, shift_amt); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -2498,7 +2517,7 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; u8 dst = insn->dst_reg * 2; - return __shl_imm(nfp_prog, dst, insn->imm); + return __shl_imm(nfp_prog, meta, dst, insn->imm); } static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2511,11 +2530,11 @@ static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) umin = meta->umin_src; umax = meta->umax_src; if (umin == umax) - return __shl_imm(nfp_prog, dst, umin); + return __shl_imm(nfp_prog, meta, dst, umin); src = insn->src_reg * 2; shl_reg64_lt32_low(nfp_prog, dst, src); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + wrp_zext(nfp_prog, meta, dst); return 0; } @@ -2577,34 +2596,34 @@ static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return construct_data_ld(nfp_prog, meta->insn.imm, 1); + return construct_data_ld(nfp_prog, meta, meta->insn.imm, 1); } static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return construct_data_ld(nfp_prog, meta->insn.imm, 2); + return construct_data_ld(nfp_prog, meta, meta->insn.imm, 2); } static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return construct_data_ld(nfp_prog, meta->insn.imm, 4); + return construct_data_ld(nfp_prog, meta, meta->insn.imm, 4); } static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return construct_data_ind_ld(nfp_prog, meta->insn.imm, + return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm, meta->insn.src_reg * 2, 1); } static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return construct_data_ind_ld(nfp_prog, meta->insn.imm, + return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm, meta->insn.src_reg * 2, 2); } static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return construct_data_ind_ld(nfp_prog, meta->insn.imm, + return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm, meta->insn.src_reg * 2, 4); } @@ -2682,7 +2701,7 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); - return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, + return data_ld_host_order_addr32(nfp_prog, meta, meta->insn.src_reg * 2, tmp_reg, meta->insn.dst_reg * 2, size); } @@ -2694,7 +2713,7 @@ mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); - return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, + return data_ld_host_order_addr40(nfp_prog, meta, meta->insn.src_reg * 2, tmp_reg, meta->insn.dst_reg * 2, size); } @@ -2755,7 +2774,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); if (!len_mid) { - wrp_immed(nfp_prog, dst_hi, 0); + wrp_zext(nfp_prog, meta, dst_gpr); return 0; } @@ -2763,7 +2782,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, if (size <= REG_WIDTH) { wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); - wrp_immed(nfp_prog, dst_hi, 0); + wrp_zext(nfp_prog, meta, dst_gpr); } else { swreg src_hi = reg_xfer(idx + 2); @@ -2794,10 +2813,10 @@ mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, if (size < REG_WIDTH) { wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); - wrp_immed(nfp_prog, dst_hi, 0); + wrp_zext(nfp_prog, meta, dst_gpr); } else if (size == REG_WIDTH) { wrp_mov(nfp_prog, dst_lo, src_lo); - wrp_immed(nfp_prog, dst_hi, 0); + wrp_zext(nfp_prog, meta, dst_gpr); } else { swreg src_hi = reg_xfer(idx + 1); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index e54d1ac84df2..57d6ff51e980 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -238,6 +238,8 @@ struct nfp_bpf_reg_state { #define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4) /* Instruction is optimized by the verifier */ #define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5) +/* Instruction needs to zero extend to high 32-bit */ +#define FLAG_INSN_DO_ZEXT BIT(6) #define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \ FLAG_INSN_SKIP_PREC_DEPENDENT | \ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 36f56eb4cbe2..e92ee510fd52 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -744,6 +744,17 @@ continue_subprog: goto continue_subprog; } +static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog, + struct bpf_insn_aux_data *aux) +{ + struct nfp_insn_meta *meta; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (aux[meta->n].zext_dst) + meta->flags |= FLAG_INSN_DO_ZEXT; + } +} + int nfp_bpf_finalize(struct bpf_verifier_env *env) { struct bpf_subprog_info *info; @@ -784,6 +795,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env) return -EOPNOTSUPP; } + nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data); return 0; } From d9a6f413f8c7bd56295098f8b1bf8331af923490 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 24 May 2019 21:59:12 +0200 Subject: [PATCH 42/83] samples: bpf: add ibumad sample to .gitignore This commit adds ibumad to .gitignore which is currently ommited from the ignore file. Signed-off-by: Matteo Croce Signed-off-by: Alexei Starovoitov --- samples/bpf/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore index c7498457595a..74d31fd3c99c 100644 --- a/samples/bpf/.gitignore +++ b/samples/bpf/.gitignore @@ -1,6 +1,7 @@ cpustat fds_example hbm +ibumad lathist lwt_len_hist map_perf_test From 0d97dacc46d9af2daba1af224747d452bd988365 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 25 May 2019 17:01:01 -0700 Subject: [PATCH 43/83] bpftool: auto-complete BTF IDs for btf dump Auto-complete BTF IDs for `btf dump id` sub-command. List of possible BTF IDs is scavenged from loaded BPF programs that have associated BTFs, as there is currently no API in libbpf to fetch list of all BTFs in the system. Suggested-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/bash-completion/bpftool | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 75c01eafd3a1..fbbad0ed8e82 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -71,6 +71,12 @@ _bpftool_get_prog_tags() command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) ) } +_bpftool_get_btf_ids() +{ + COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ + command sed -n 's/.*"btf_id": \(.*\),\?$/\1/p' )" -- "$cur" ) ) +} + _bpftool_get_obj_map_names() { local obj @@ -635,6 +641,9 @@ _bpftool() map) _bpftool_get_map_ids ;; + dump) + _bpftool_get_btf_ids + ;; esac return 0 ;; From e1afb70252a8614e1ef7aec05ff1b84fd324b782 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Sat, 25 May 2019 11:57:53 -0700 Subject: [PATCH 44/83] bpf: check signal validity in nmi for bpf_send_signal() helper Commit 8b401f9ed244 ("bpf: implement bpf_send_signal() helper") introduced bpf_send_signal() helper. If the context is nmi, the sending signal work needs to be deferred to irq_work. If the signal is invalid, the error will appear in irq_work and it won't be propagated to user. This patch did an early check in the helper itself to notify user invalid signal, as suggested by Daniel. Suggested-by: Daniel Borkmann Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/trace/bpf_trace.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 70029eafc71f..fe73926a07cd 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -600,6 +600,12 @@ BPF_CALL_1(bpf_send_signal, u32, sig) return -EPERM; if (in_nmi()) { + /* Do an early check on signal validity. Otherwise, + * the error is lost in deferred irq_work. + */ + if (unlikely(!valid_signal(sig))) + return -EINVAL; + work = this_cpu_ptr(&send_signal_work); if (work->irq_work.flags & IRQ_WORK_BUSY) return -EBUSY; From 92bd6820f2b11c2decdd92e1f9020e804dbc2ed1 Mon Sep 17 00:00:00 2001 From: Chang-Hsien Tsai Date: Sun, 26 May 2019 10:32:11 +0000 Subject: [PATCH 45/83] bpf: style fix in while(!feof()) loop Use fgets() as the while loop condition. Signed-off-by: Chang-Hsien Tsai Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/xlated_dumper.c | 4 +--- tools/testing/selftests/bpf/trace_helpers.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index 0bb17bf88b18..494d7ae3614d 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -31,9 +31,7 @@ void kernel_syms_load(struct dump_data *dd) if (!fp) return; - while (!feof(fp)) { - if (!fgets(buff, sizeof(buff), fp)) - break; + while (fgets(buff, sizeof(buff), fp)) { tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1, sizeof(*dd->sym_mapping)); if (!tmp) { diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 9a9fc6c9b70b..b47f205f0310 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -30,9 +30,7 @@ int load_kallsyms(void) if (!f) return -ENOENT; - while (!feof(f)) { - if (!fgets(buf, sizeof(buf), f)) - break; + while (fgets(buf, sizeof(buf), f)) { if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3) break; if (!addr) From d98363b510ce9938a19b07fe3c02357d8c65d52a Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Sat, 25 May 2019 14:32:57 +0530 Subject: [PATCH 46/83] libbpf: fix warning that PTR_ERR_OR_ZERO can be used Fix below warning reported by coccicheck: /tools/lib/bpf/libbpf.c:3461:1-3: WARNING: PTR_ERR_OR_ZERO can be used Signed-off-by: Hariprasad Kelam Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ff149372b3c0..fd5905ac2ec3 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3460,9 +3460,7 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) long libbpf_get_error(const void *ptr) { - if (IS_ERR(ptr)) - return PTR_ERR(ptr); - return 0; + return PTR_ERR_OR_ZERO(ptr); } int bpf_prog_load(const char *file, enum bpf_prog_type type, From 775bc8ada89b376b4bbbce31aba47f4117fe1d9c Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Fri, 24 May 2019 11:36:46 +0100 Subject: [PATCH 47/83] tools: bpftool: add -d option to get debug output from libbpf libbpf has three levels of priority for output messages: warn, info, debug. By default, debug output is not printed to the console. Add a new "--debug" (short name: "-d") option to bpftool to print libbpf logs for all three levels. Internally, we simply use the function provided by libbpf to replace the default printing function by one that prints logs regardless of their level. v2: - Remove the possibility to select the log-levels to use (v1 offered a combination of "warn", "info" and "debug"). - Rename option and offer a short name: -d|--debug. - Add option description to all bpftool manual pages (instead of bpftool-prog.rst only), as all commands use libbpf. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Documentation/bpftool-btf.rst | 4 ++++ tools/bpf/bpftool/Documentation/bpftool-cgroup.rst | 4 ++++ .../bpf/bpftool/Documentation/bpftool-feature.rst | 4 ++++ tools/bpf/bpftool/Documentation/bpftool-map.rst | 4 ++++ tools/bpf/bpftool/Documentation/bpftool-net.rst | 4 ++++ tools/bpf/bpftool/Documentation/bpftool-perf.rst | 4 ++++ tools/bpf/bpftool/Documentation/bpftool-prog.rst | 4 ++++ tools/bpf/bpftool/Documentation/bpftool.rst | 3 +++ tools/bpf/bpftool/bash-completion/bpftool | 2 +- tools/bpf/bpftool/main.c | 14 +++++++++++++- 10 files changed, 45 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index 3daed9eba766..6694a0fc8f99 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -72,6 +72,10 @@ OPTIONS -p, --pretty Generate human-readable JSON output. Implies **-j**. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + EXAMPLES ======== **# bpftool btf dump id 1226** diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index ac26876389c2..36807735e2a5 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -113,6 +113,10 @@ OPTIONS -f, --bpffs Show file names of pinned programs. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + EXAMPLES ======== | diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index 14180e887082..4d08f35034a2 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -73,6 +73,10 @@ OPTIONS -p, --pretty Generate human-readable JSON output. Implies **-j**. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + SEE ALSO ======== **bpf**\ (2), diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 13ef27b39f20..490b4501cb6e 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -152,6 +152,10 @@ OPTIONS Do not automatically attempt to mount any virtual file system (such as tracefs or BPF virtual file system) when necessary. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + EXAMPLES ======== **# bpftool map show** diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 934580850f42..d8e5237a2085 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -65,6 +65,10 @@ OPTIONS -p, --pretty Generate human-readable JSON output. Implies **-j**. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + EXAMPLES ======== diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst index 0c7576523a21..e252bd0bc434 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst @@ -53,6 +53,10 @@ OPTIONS -p, --pretty Generate human-readable JSON output. Implies **-j**. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + EXAMPLES ======== diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index e8118544d118..9a92614569e6 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -174,6 +174,10 @@ OPTIONS Do not automatically attempt to mount any virtual file system (such as tracefs or BPF virtual file system) when necessary. + -d, --debug + Print all logs available from libbpf, including debug-level + information. + EXAMPLES ======== **# bpftool prog show** diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 3e562d7fd56f..43dba0717953 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -66,6 +66,9 @@ OPTIONS Do not automatically attempt to mount any virtual file system (such as tracefs or BPF virtual file system) when necessary. + -d, --debug + Print all logs available from libbpf, including debug-level + information. SEE ALSO ======== diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index fbbad0ed8e82..2725e27dfa42 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -187,7 +187,7 @@ _bpftool() # Deal with options if [[ ${words[cword]} == -* ]]; then - local c='--version --json --pretty --bpffs --mapcompat' + local c='--version --json --pretty --bpffs --mapcompat --debug' COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) return 0 fi diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 1ac1fc520e6a..d74293938a05 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -10,6 +10,7 @@ #include #include +#include #include "main.h" @@ -77,6 +78,13 @@ static int do_version(int argc, char **argv) return 0; } +static int __printf(2, 0) +print_all_levels(__maybe_unused enum libbpf_print_level level, + const char *format, va_list args) +{ + return vfprintf(stderr, format, args); +} + int cmd_select(const struct cmd *cmds, int argc, char **argv, int (*help)(int argc, char **argv)) { @@ -317,6 +325,7 @@ int main(int argc, char **argv) { "bpffs", no_argument, NULL, 'f' }, { "mapcompat", no_argument, NULL, 'm' }, { "nomount", no_argument, NULL, 'n' }, + { "debug", no_argument, NULL, 'd' }, { 0 } }; int opt, ret; @@ -332,7 +341,7 @@ int main(int argc, char **argv) hash_init(map_table.table); opterr = 0; - while ((opt = getopt_long(argc, argv, "Vhpjfmn", + while ((opt = getopt_long(argc, argv, "Vhpjfmnd", options, NULL)) >= 0) { switch (opt) { case 'V': @@ -362,6 +371,9 @@ int main(int argc, char **argv) case 'n': block_mount = true; break; + case 'd': + libbpf_set_print(print_all_levels); + break; default: p_err("unrecognized option '%s'", argv[optind - 1]); if (json_output) From 60276f9849988d3d3a54943c9ec27222c5819dae Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Fri, 24 May 2019 11:36:47 +0100 Subject: [PATCH 48/83] libbpf: add bpf_object__load_xattr() API function to pass log_level libbpf was recently made aware of the log_level attribute for programs, used to specify the level of information expected to be dumped by the verifier. Function bpf_prog_load_xattr() got support for this log_level parameter. But some applications using libbpf rely on another function to load programs, bpf_object__load(), which does accept any parameter for log level. Create an API function based on bpf_object__load(), but accepting an "attr" object as a parameter. Then add a log_level field to that object, so that applications calling the new bpf_object__load_xattr() can pick the desired log level. v3: - Rewrite commit log. v2: - We are in a new cycle, bump libbpf extraversion number. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/lib/bpf/Makefile | 2 +- tools/lib/bpf/libbpf.c | 20 +++++++++++++++++--- tools/lib/bpf/libbpf.h | 6 ++++++ tools/lib/bpf/libbpf.map | 1 + 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index a2aceadf68db..9312066a1ae3 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -3,7 +3,7 @@ BPF_VERSION = 0 BPF_PATCHLEVEL = 0 -BPF_EXTRAVERSION = 3 +BPF_EXTRAVERSION = 4 MAKEFLAGS += --no-print-directory diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fd5905ac2ec3..ca4432f5b067 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2224,7 +2224,7 @@ static bool bpf_program__is_function_storage(struct bpf_program *prog, } static int -bpf_object__load_progs(struct bpf_object *obj) +bpf_object__load_progs(struct bpf_object *obj, int log_level) { size_t i; int err; @@ -2232,6 +2232,7 @@ bpf_object__load_progs(struct bpf_object *obj) for (i = 0; i < obj->nr_programs; i++) { if (bpf_program__is_function_storage(&obj->programs[i], obj)) continue; + obj->programs[i].log_level = log_level; err = bpf_program__load(&obj->programs[i], obj->license, obj->kern_version); @@ -2383,10 +2384,14 @@ int bpf_object__unload(struct bpf_object *obj) return 0; } -int bpf_object__load(struct bpf_object *obj) +int bpf_object__load_xattr(struct bpf_object_load_attr *attr) { + struct bpf_object *obj; int err; + if (!attr) + return -EINVAL; + obj = attr->obj; if (!obj) return -EINVAL; @@ -2399,7 +2404,7 @@ int bpf_object__load(struct bpf_object *obj) CHECK_ERR(bpf_object__create_maps(obj), err, out); CHECK_ERR(bpf_object__relocate(obj), err, out); - CHECK_ERR(bpf_object__load_progs(obj), err, out); + CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out); return 0; out: @@ -2408,6 +2413,15 @@ out: return err; } +int bpf_object__load(struct bpf_object *obj) +{ + struct bpf_object_load_attr attr = { + .obj = obj, + }; + + return bpf_object__load_xattr(&attr); +} + static int check_path(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 5abc2375defd..1af0d48178c8 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -89,8 +89,14 @@ LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); LIBBPF_API void bpf_object__close(struct bpf_object *object); +struct bpf_object_load_attr { + struct bpf_object *obj; + int log_level; +}; + /* Load/unload object into/from kernel */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); +LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); LIBBPF_API int bpf_object__unload(struct bpf_object *obj); LIBBPF_API const char *bpf_object__name(struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8bf51d0a6072..46dcda89df21 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -171,4 +171,5 @@ LIBBPF_0.0.4 { btf_dump__free; btf_dump__new; btf__parse_elf; + bpf_object__load_xattr; } LIBBPF_0.0.3; From 55d778076b0354b088a9a16d9ff584c887e17f42 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Fri, 24 May 2019 11:36:48 +0100 Subject: [PATCH 49/83] tools: bpftool: make -d option print debug output from verifier The "-d" option is used to require all logs available for bpftool. So far it meant telling libbpf to print even debug-level information. But there is another source of info that can be made more verbose: when we attemt to load programs with bpftool, we can pass a log_level parameter to the verifier in order to control the amount of information that is printed to the console. Reuse the "-d" option to print all information the verifier can tell. At this time, this means logs related to BPF_LOG_LEVEL1, BPF_LOG_LEVEL2 and BPF_LOG_STATS. As mentioned in the discussion on the first version of this set, these macros are internal to the kernel (include/linux/bpf_verifier.h) and are not meant to be part of the stable user API, therefore we simply use the related constants to print whatever we can at this time, without trying to tell users what is log_level1 or what is statistics. Verifier logs are only used when loading programs for now (In the future: for loading BTF objects with bpftool? Although libbpf does not currently offer to print verifier info at debug level if no error occurred when loading BTF objects), so bpftool.rst and bpftool-prog.rst are the only man pages to get the update. v3: - Add details on log level and BTF loading at the end of commit log. v2: - Remove the possibility to select the log levels to use (v1 offered a combination of "log_level1", "log_level2" and "stats"). - The macros from kernel header bpf_verifier.h are not used (and therefore not moved to UAPI header). - In v1 this was a distinct option, but is now merged in the only "-d" switch to activate libbpf and verifier debug-level logs all at the same time. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- .../bpftool/Documentation/bpftool-prog.rst | 5 ++-- tools/bpf/bpftool/Documentation/bpftool.rst | 5 ++-- tools/bpf/bpftool/main.c | 2 ++ tools/bpf/bpftool/main.h | 1 + tools/bpf/bpftool/prog.c | 27 ++++++++++++------- 5 files changed, 26 insertions(+), 14 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 9a92614569e6..228a5c863cc7 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -175,8 +175,9 @@ OPTIONS (such as tracefs or BPF virtual file system) when necessary. -d, --debug - Print all logs available from libbpf, including debug-level - information. + Print all logs available, even debug-level information. This + includes logs from libbpf as well as from the verifier, when + attempting to load programs. EXAMPLES ======== diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 43dba0717953..6a9c52ef84a9 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -67,8 +67,9 @@ OPTIONS (such as tracefs or BPF virtual file system) when necessary. -d, --debug - Print all logs available from libbpf, including debug-level - information. + Print all logs available, even debug-level information. This + includes logs from libbpf as well as from the verifier, when + attempting to load programs. SEE ALSO ======== diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index d74293938a05..4879f6395c7e 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -26,6 +26,7 @@ bool pretty_output; bool json_output; bool show_pinned; bool block_mount; +bool verifier_logs; int bpf_flags; struct pinned_obj_table prog_table; struct pinned_obj_table map_table; @@ -373,6 +374,7 @@ int main(int argc, char **argv) break; case 'd': libbpf_set_print(print_all_levels); + verifier_logs = true; break; default: p_err("unrecognized option '%s'", argv[optind - 1]); diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 3d63feb7f852..28a2a5857e14 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -91,6 +91,7 @@ extern json_writer_t *json_wtr; extern bool json_output; extern bool show_pinned; extern bool block_mount; +extern bool verifier_logs; extern int bpf_flags; extern struct pinned_obj_table prog_table; extern struct pinned_obj_table map_table; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 26336bad0442..1f209c80d906 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -750,10 +750,11 @@ static int do_detach(int argc, char **argv) static int load_with_options(int argc, char **argv, bool first_prog_only) { - enum bpf_attach_type expected_attach_type; - struct bpf_object_open_attr attr = { - .prog_type = BPF_PROG_TYPE_UNSPEC, + struct bpf_object_load_attr load_attr = { 0 }; + struct bpf_object_open_attr open_attr = { + .prog_type = BPF_PROG_TYPE_UNSPEC, }; + enum bpf_attach_type expected_attach_type; struct map_replace *map_replace = NULL; struct bpf_program *prog = NULL, *pos; unsigned int old_map_fds = 0; @@ -767,7 +768,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) if (!REQ_ARGS(2)) return -1; - attr.file = GET_ARG(); + open_attr.file = GET_ARG(); pinfile = GET_ARG(); while (argc) { @@ -776,7 +777,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) NEXT_ARG(); - if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) { + if (open_attr.prog_type != BPF_PROG_TYPE_UNSPEC) { p_err("program type already specified"); goto err_free_reuse_maps; } @@ -793,7 +794,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) strcat(type, *argv); strcat(type, "/"); - err = libbpf_prog_type_by_name(type, &attr.prog_type, + err = libbpf_prog_type_by_name(type, + &open_attr.prog_type, &expected_attach_type); free(type); if (err < 0) @@ -881,16 +883,16 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) set_max_rlimit(); - obj = __bpf_object__open_xattr(&attr, bpf_flags); + obj = __bpf_object__open_xattr(&open_attr, bpf_flags); if (IS_ERR_OR_NULL(obj)) { p_err("failed to open object file"); goto err_free_reuse_maps; } bpf_object__for_each_program(pos, obj) { - enum bpf_prog_type prog_type = attr.prog_type; + enum bpf_prog_type prog_type = open_attr.prog_type; - if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) { + if (open_attr.prog_type == BPF_PROG_TYPE_UNSPEC) { const char *sec_name = bpf_program__title(pos, false); err = libbpf_prog_type_by_name(sec_name, &prog_type, @@ -960,7 +962,12 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) goto err_close_obj; } - err = bpf_object__load(obj); + load_attr.obj = obj; + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + load_attr.log_level = 1 + 2 + 4; + + err = bpf_object__load_xattr(&load_attr); if (err) { p_err("failed to load object file"); goto err_close_obj; From 486d3f22c0cab818ba6b4324fd8eda643d5f16b2 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Fri, 24 May 2019 15:28:56 -0700 Subject: [PATCH 50/83] selftests/bpf: fail test_tunnel.sh if subtests fail Right now test_tunnel.sh always exits with success even if some of the subtests fail. Since the output is very verbose, it's hard to spot the issues with subtests. Let's fail the script if any subtest fails. Signed-off-by: Stanislav Fomichev Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_tunnel.sh | 32 ++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index 546aee3e9fb4..bd12ec97a44d 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -696,30 +696,57 @@ check_err() bpf_tunnel_test() { + local errors=0 + echo "Testing GRE tunnel..." test_gre + errors=$(( $errors + $? )) + echo "Testing IP6GRE tunnel..." test_ip6gre + errors=$(( $errors + $? )) + echo "Testing IP6GRETAP tunnel..." test_ip6gretap + errors=$(( $errors + $? )) + echo "Testing ERSPAN tunnel..." test_erspan v2 + errors=$(( $errors + $? )) + echo "Testing IP6ERSPAN tunnel..." test_ip6erspan v2 + errors=$(( $errors + $? )) + echo "Testing VXLAN tunnel..." test_vxlan + errors=$(( $errors + $? )) + echo "Testing IP6VXLAN tunnel..." test_ip6vxlan + errors=$(( $errors + $? )) + echo "Testing GENEVE tunnel..." test_geneve + errors=$(( $errors + $? )) + echo "Testing IP6GENEVE tunnel..." test_ip6geneve + errors=$(( $errors + $? )) + echo "Testing IPIP tunnel..." test_ipip + errors=$(( $errors + $? )) + echo "Testing IPIP6 tunnel..." test_ipip6 + errors=$(( $errors + $? )) + echo "Testing IPSec tunnel..." test_xfrm_tunnel + errors=$(( $errors + $? )) + + return $errors } trap cleanup 0 3 6 @@ -728,4 +755,9 @@ trap cleanup_exit 2 9 cleanup bpf_tunnel_test +if [ $? -ne 0 ]; then + echo -e "$(basename $0): ${RED}FAIL${NC}" + exit 1 +fi +echo -e "$(basename $0): ${GREEN}PASS${NC}" exit 0 From 37b54aed123faa19eb21d7ef2534756c5a152a7c Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Thu, 23 May 2019 16:24:48 +0900 Subject: [PATCH 51/83] samples/bpf: fix a couple of style issues in bpf_load This commit fixes a few style problems in samples/bpf/bpf_load.c: - Magic string use of 'DEBUGFS' - Useless zero initialization of a global variable - Minor style fix with whitespace Signed-off-by: Daniel T. Lee Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- samples/bpf/bpf_load.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index eae7b635343d..1734ade04f7f 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -40,7 +40,7 @@ int prog_cnt; int prog_array_fd = -1; struct bpf_map_data map_data[MAX_MAPS]; -int map_data_count = 0; +int map_data_count; static int populate_prog_array(const char *event, int prog_fd) { @@ -65,7 +65,7 @@ static int write_kprobe_events(const char *val) else flags = O_WRONLY | O_APPEND; - fd = open("/sys/kernel/debug/tracing/kprobe_events", flags); + fd = open(DEBUGFS "kprobe_events", flags); ret = write(fd, val, strlen(val)); close(fd); @@ -490,8 +490,8 @@ static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx, /* Verify no newer features were requested */ if (validate_zero) { - addr = (unsigned char*) def + map_sz_copy; - end = (unsigned char*) def + map_sz_elf; + addr = (unsigned char *) def + map_sz_copy; + end = (unsigned char *) def + map_sz_elf; for (; addr < end; addr++) { if (*addr != 0) { free(sym); From 4bfc0bb2c60e2f4cc8eb60f03cf8dfa72336272a Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Sat, 25 May 2019 09:37:39 -0700 Subject: [PATCH 52/83] bpf: decouple the lifetime of cgroup_bpf from cgroup itself Currently the lifetime of bpf programs attached to a cgroup is bound to the lifetime of the cgroup itself. It means that if a user forgets (or intentionally avoids) to detach a bpf program before removing the cgroup, it will stay attached up to the release of the cgroup. Since the cgroup can stay in the dying state (the state between being rmdir()'ed and being released) for a very long time, it leads to a waste of memory. Also, it blocks a possibility to implement the memcg-based memory accounting for bpf objects, because a circular reference dependency will occur. Charged memory pages are pinning the corresponding memory cgroup, and if the memory cgroup is pinning the attached bpf program, nothing will be ever released. A dying cgroup can not contain any processes, so the only chance for an attached bpf program to be executed is a live socket associated with the cgroup. So in order to release all bpf data early, let's count associated sockets using a new percpu refcounter. On cgroup removal the counter is transitioned to the atomic mode, and as soon as it reaches 0, all bpf programs are detached. Because cgroup_bpf_release() can block, it can't be called from the percpu ref counter callback directly, so instead an asynchronous work is scheduled. The reference counter is not socket specific, and can be used for any other types of programs, which can be executed from a cgroup-bpf hook outside of the process context, had such a need arise in the future. Signed-off-by: Roman Gushchin Cc: jolsa@redhat.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf-cgroup.h | 11 ++++++++-- include/linux/cgroup.h | 18 +++++++++++++++ kernel/bpf/cgroup.c | 45 +++++++++++++++++++++++++++++++++----- kernel/cgroup/cgroup.c | 11 +++++++--- 4 files changed, 74 insertions(+), 11 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index cb3c6b3b89c8..9f100fc422c3 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -72,10 +73,16 @@ struct cgroup_bpf { /* temp storage for effective prog array used by prog_attach/detach */ struct bpf_prog_array __rcu *inactive; + + /* reference counter used to detach bpf programs after cgroup removal */ + struct percpu_ref refcnt; + + /* cgroup_bpf is released using a work queue */ + struct work_struct release_work; }; -void cgroup_bpf_put(struct cgroup *cgrp); int cgroup_bpf_inherit(struct cgroup *cgrp); +void cgroup_bpf_offline(struct cgroup *cgrp); int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); @@ -283,8 +290,8 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, struct bpf_prog; struct cgroup_bpf {}; -static inline void cgroup_bpf_put(struct cgroup *cgrp) {} static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c0077adeea83..49e8facf7c4a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -924,4 +924,22 @@ static inline bool cgroup_task_frozen(struct task_struct *task) #endif /* !CONFIG_CGROUPS */ +#ifdef CONFIG_CGROUP_BPF +static inline void cgroup_bpf_get(struct cgroup *cgrp) +{ + percpu_ref_get(&cgrp->bpf.refcnt); +} + +static inline void cgroup_bpf_put(struct cgroup *cgrp) +{ + percpu_ref_put(&cgrp->bpf.refcnt); +} + +#else /* CONFIG_CGROUP_BPF */ + +static inline void cgroup_bpf_get(struct cgroup *cgrp) {} +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} + +#endif /* CONFIG_CGROUP_BPF */ + #endif /* _LINUX_CGROUP_H */ diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index fcde0f7b2585..d995edbe816d 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -22,12 +22,21 @@ DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); EXPORT_SYMBOL(cgroup_bpf_enabled_key); -/** - * cgroup_bpf_put() - put references of all bpf programs - * @cgrp: the cgroup to modify - */ -void cgroup_bpf_put(struct cgroup *cgrp) +void cgroup_bpf_offline(struct cgroup *cgrp) { + cgroup_get(cgrp); + percpu_ref_kill(&cgrp->bpf.refcnt); +} + +/** + * cgroup_bpf_release() - put references of all bpf programs and + * release all cgroup bpf data + * @work: work structure embedded into the cgroup to modify + */ +static void cgroup_bpf_release(struct work_struct *work) +{ + struct cgroup *cgrp = container_of(work, struct cgroup, + bpf.release_work); enum bpf_cgroup_storage_type stype; unsigned int type; @@ -47,6 +56,22 @@ void cgroup_bpf_put(struct cgroup *cgrp) } bpf_prog_array_free(cgrp->bpf.effective[type]); } + + percpu_ref_exit(&cgrp->bpf.refcnt); + cgroup_put(cgrp); +} + +/** + * cgroup_bpf_release_fn() - callback used to schedule releasing + * of bpf cgroup data + * @ref: percpu ref counter structure + */ +static void cgroup_bpf_release_fn(struct percpu_ref *ref) +{ + struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); + + INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); + queue_work(system_wq, &cgrp->bpf.release_work); } /* count number of elements in the list. @@ -167,7 +192,12 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) */ #define NR ARRAY_SIZE(cgrp->bpf.effective) struct bpf_prog_array __rcu *arrays[NR] = {}; - int i; + int ret, i; + + ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, + GFP_KERNEL); + if (ret) + return ret; for (i = 0; i < NR; i++) INIT_LIST_HEAD(&cgrp->bpf.progs[i]); @@ -183,6 +213,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) cleanup: for (i = 0; i < NR; i++) bpf_prog_array_free(arrays[i]); + + percpu_ref_exit(&cgrp->bpf.refcnt); + return -ENOMEM; } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 217cec4e22c6..ef9cfbfc82a9 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4955,8 +4955,6 @@ static void css_release_work_fn(struct work_struct *work) if (cgrp->kn) RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); - - cgroup_bpf_put(cgrp); } mutex_unlock(&cgroup_mutex); @@ -5482,6 +5480,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) cgroup1_check_for_release(parent); + cgroup_bpf_offline(cgrp); + /* put the base reference */ percpu_ref_kill(&cgrp->self.refcnt); @@ -6221,6 +6221,7 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) * Don't use cgroup_get_live(). */ cgroup_get(sock_cgroup_ptr(skcd)); + cgroup_bpf_get(sock_cgroup_ptr(skcd)); return; } @@ -6232,6 +6233,7 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) cset = task_css_set(current); if (likely(cgroup_tryget(cset->dfl_cgrp))) { skcd->val = (unsigned long)cset->dfl_cgrp; + cgroup_bpf_get(cset->dfl_cgrp); break; } cpu_relax(); @@ -6242,7 +6244,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) void cgroup_sk_free(struct sock_cgroup_data *skcd) { - cgroup_put(sock_cgroup_ptr(skcd)); + struct cgroup *cgrp = sock_cgroup_ptr(skcd); + + cgroup_bpf_put(cgrp); + cgroup_put(cgrp); } #endif /* CONFIG_SOCK_CGROUP_DATA */ From ba0c0cc05dda2c56e23c88e4610ce2ac65ff86ec Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Sat, 25 May 2019 09:37:40 -0700 Subject: [PATCH 53/83] selftests/bpf: convert test_cgrp2_attach2 example into kselftest Convert test_cgrp2_attach2 example into a proper test_cgroup_attach kselftest. It's better because we do run kselftest on a constant basis, so there are better chances to spot a potential regression. Also make it slightly less verbose to conform kselftests output style. Output example: $ ./test_cgroup_attach #override:PASS #multi:PASS test_cgroup_attach:PASS Signed-off-by: Roman Gushchin Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- samples/bpf/Makefile | 2 - tools/testing/selftests/bpf/.gitignore | 1 + tools/testing/selftests/bpf/Makefile | 3 +- .../selftests/bpf/test_cgroup_attach.c | 50 ++++++++++++------- 4 files changed, 36 insertions(+), 20 deletions(-) rename samples/bpf/test_cgrp2_attach2.c => tools/testing/selftests/bpf/test_cgroup_attach.c (91%) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 4f0a1cdbfe7c..253e5a2856be 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -26,7 +26,6 @@ hostprogs-y += map_perf_test hostprogs-y += test_overhead hostprogs-y += test_cgrp2_array_pin hostprogs-y += test_cgrp2_attach -hostprogs-y += test_cgrp2_attach2 hostprogs-y += test_cgrp2_sock hostprogs-y += test_cgrp2_sock2 hostprogs-y += xdp1 @@ -81,7 +80,6 @@ map_perf_test-objs := bpf_load.o map_perf_test_user.o test_overhead-objs := bpf_load.o test_overhead_user.o test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o test_cgrp2_attach-objs := test_cgrp2_attach.o -test_cgrp2_attach2-objs := test_cgrp2_attach2.o $(CGROUP_HELPERS) test_cgrp2_sock-objs := test_cgrp2_sock.o test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o xdp1-objs := xdp1_user.o diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index b3da2ffdc158..b2a9902f11c5 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -22,6 +22,7 @@ test_lirc_mode2_user get_cgroup_id_user test_skb_cgroup_id_user test_socket_cookie +test_cgroup_attach test_cgroup_storage test_select_reuseport test_flow_dissector diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index fa002da36d0d..9b21391c4966 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -26,7 +26,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ - test_btf_dump + test_btf_dump test_cgroup_attach BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) TEST_GEN_FILES = $(BPF_OBJ_FILES) @@ -99,6 +99,7 @@ $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c $(OUTPUT)/test_netcnt: cgroup_helpers.c $(OUTPUT)/test_sock_fields: cgroup_helpers.c $(OUTPUT)/test_sysctl: cgroup_helpers.c +$(OUTPUT)/test_cgroup_attach: cgroup_helpers.c .PHONY: force diff --git a/samples/bpf/test_cgrp2_attach2.c b/tools/testing/selftests/bpf/test_cgroup_attach.c similarity index 91% rename from samples/bpf/test_cgrp2_attach2.c rename to tools/testing/selftests/bpf/test_cgroup_attach.c index 0bb6507256b7..2d6d57f50e10 100644 --- a/samples/bpf/test_cgrp2_attach2.c +++ b/tools/testing/selftests/bpf/test_cgroup_attach.c @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 + /* eBPF example program: * * - Creates arraymap in kernel with 4 bytes keys and 8 byte values @@ -25,20 +27,27 @@ #include #include #include +#include #include #include -#include "bpf_insn.h" +#include "bpf_util.h" #include "bpf_rlimit.h" #include "cgroup_helpers.h" #define FOO "/foo" #define BAR "/foo/bar/" -#define PING_CMD "ping -c1 -w1 127.0.0.1 > /dev/null" +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" char bpf_log_buf[BPF_LOG_BUF_SIZE]; +#ifdef DEBUG +#define debug(args...) printf(args) +#else +#define debug(args...) +#endif + static int prog_load(int verdict) { int ret; @@ -89,7 +98,7 @@ static int test_foo_bar(void) goto err; } - printf("Attached DROP prog. This ping in cgroup /foo should fail...\n"); + debug("Attached DROP prog. This ping in cgroup /foo should fail...\n"); assert(system(PING_CMD) != 0); /* Create cgroup /foo/bar, get fd, and join it */ @@ -100,7 +109,7 @@ static int test_foo_bar(void) if (join_cgroup(BAR)) goto err; - printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); + debug("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); assert(system(PING_CMD) != 0); if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, @@ -109,7 +118,7 @@ static int test_foo_bar(void) goto err; } - printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); + debug("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); assert(system(PING_CMD) == 0); if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { @@ -117,7 +126,7 @@ static int test_foo_bar(void) goto err; } - printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n" + debug("Detached PASS from /foo/bar while DROP is attached to /foo.\n" "This ping in cgroup /foo/bar should fail...\n"); assert(system(PING_CMD) != 0); @@ -132,7 +141,7 @@ static int test_foo_bar(void) goto err; } - printf("Attached PASS from /foo/bar and detached DROP from /foo.\n" + debug("Attached PASS from /foo/bar and detached DROP from /foo.\n" "This ping in cgroup /foo/bar should pass...\n"); assert(system(PING_CMD) == 0); @@ -199,9 +208,9 @@ out: close(bar); cleanup_cgroup_environment(); if (!rc) - printf("### override:PASS\n"); + printf("#override:PASS\n"); else - printf("### override:FAIL\n"); + printf("#override:FAIL\n"); return rc; } @@ -441,19 +450,26 @@ out: close(cg5); cleanup_cgroup_environment(); if (!rc) - printf("### multi:PASS\n"); + printf("#multi:PASS\n"); else - printf("### multi:FAIL\n"); + printf("#multi:FAIL\n"); return rc; } -int main(int argc, char **argv) +int main(void) { - int rc = 0; + int (*tests[])(void) = {test_foo_bar, test_multiprog}; + int errors = 0; + int i; - rc = test_foo_bar(); - if (rc) - return rc; + for (i = 0; i < ARRAY_SIZE(tests); i++) + if (tests[i]()) + errors++; - return test_multiprog(); + if (errors) + printf("test_cgroup_attach:FAIL\n"); + else + printf("test_cgroup_attach:PASS\n"); + + return errors ? EXIT_FAILURE : EXIT_SUCCESS; } From 596092ef8beaa73891da4f406446b283cc566cdb Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Sat, 25 May 2019 09:37:41 -0700 Subject: [PATCH 54/83] selftests/bpf: enable all available cgroup v2 controllers Enable all available cgroup v2 controllers when setting up the environment for the bpf kselftests. It's required to properly test the bpf prog auto-detach feature. Also it will generally increase the code coverage. Signed-off-by: Roman Gushchin Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/cgroup_helpers.c | 57 ++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 6692a40a6979..0d89f0396be4 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -33,6 +33,60 @@ snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \ CGROUP_WORK_DIR, path) +/** + * enable_all_controllers() - Enable all available cgroup v2 controllers + * + * Enable all available cgroup v2 controllers in order to increase + * the code coverage. + * + * If successful, 0 is returned. + */ +int enable_all_controllers(char *cgroup_path) +{ + char path[PATH_MAX + 1]; + char buf[PATH_MAX]; + char *c, *c2; + int fd, cfd; + size_t len; + + snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path); + fd = open(path, O_RDONLY); + if (fd < 0) { + log_err("Opening cgroup.controllers: %s", path); + return 1; + } + + len = read(fd, buf, sizeof(buf) - 1); + if (len < 0) { + close(fd); + log_err("Reading cgroup.controllers: %s", path); + return 1; + } + buf[len] = 0; + close(fd); + + /* No controllers available? We're probably on cgroup v1. */ + if (len == 0) + return 0; + + snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path); + cfd = open(path, O_RDWR); + if (cfd < 0) { + log_err("Opening cgroup.subtree_control: %s", path); + return 1; + } + + for (c = strtok_r(buf, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) { + if (dprintf(cfd, "+%s\n", c) <= 0) { + log_err("Enabling controller %s: %s", c, path); + close(cfd); + return 1; + } + } + close(cfd); + return 0; +} + /** * setup_cgroup_environment() - Setup the cgroup environment * @@ -71,6 +125,9 @@ int setup_cgroup_environment(void) return 1; } + if (enable_all_controllers(cgroup_workdir)) + return 1; + return 0; } From d5506591d54b915aaa9d489c9289af2cc88f47a4 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Sat, 25 May 2019 09:37:42 -0700 Subject: [PATCH 55/83] selftests/bpf: add auto-detach test Add a kselftest to cover bpf auto-detachment functionality. The test creates a cgroup, associates some resources with it, attaches a couple of bpf programs and deletes the cgroup. Then it checks that bpf programs are going away in 5 seconds. Expected output: $ ./test_cgroup_attach #override:PASS #multi:PASS #autodetach:PASS test_cgroup_attach:PASS On a kernel without auto-detaching: $ ./test_cgroup_attach #override:PASS #multi:PASS #autodetach:FAIL test_cgroup_attach:FAIL Signed-off-by: Roman Gushchin Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/test_cgroup_attach.c | 98 ++++++++++++++++++- 1 file changed, 97 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_cgroup_attach.c b/tools/testing/selftests/bpf/test_cgroup_attach.c index 2d6d57f50e10..7671909ee1cb 100644 --- a/tools/testing/selftests/bpf/test_cgroup_attach.c +++ b/tools/testing/selftests/bpf/test_cgroup_attach.c @@ -456,9 +456,105 @@ out: return rc; } +static int test_autodetach(void) +{ + __u32 prog_cnt = 4, attach_flags; + int allow_prog[2] = {0}; + __u32 prog_ids[2] = {0}; + int cg = 0, i, rc = -1; + void *ptr = NULL; + int attempts; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load_cnt(1, 1 << i); + if (!allow_prog[i]) + goto err; + } + + if (setup_cgroup_environment()) + goto err; + + /* create a cgroup, attach two programs and remember their ids */ + cg = create_and_get_cgroup("/cg_autodetach"); + if (cg < 0) + goto err; + + if (join_cgroup("/cg_autodetach")) + goto err; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + if (bpf_prog_attach(allow_prog[i], cg, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI)) { + log_err("Attaching prog[%d] to cg:egress", i); + goto err; + } + } + + /* make sure that programs are attached and run some traffic */ + assert(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, + prog_ids, &prog_cnt) == 0); + assert(system(PING_CMD) == 0); + + /* allocate some memory (4Mb) to pin the original cgroup */ + ptr = malloc(4 * (1 << 20)); + if (!ptr) + goto err; + + /* close programs and cgroup fd */ + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + close(allow_prog[i]); + allow_prog[i] = 0; + } + + close(cg); + cg = 0; + + /* leave the cgroup and remove it. don't detach programs */ + cleanup_cgroup_environment(); + + /* wait for the asynchronous auto-detachment. + * wait for no more than 5 sec and give up. + */ + for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { + for (attempts = 5; attempts >= 0; attempts--) { + int fd = bpf_prog_get_fd_by_id(prog_ids[i]); + + if (fd < 0) + break; + + /* don't leave the fd open */ + close(fd); + + if (!attempts) + goto err; + + sleep(1); + } + } + + rc = 0; +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] > 0) + close(allow_prog[i]); + if (cg) + close(cg); + free(ptr); + cleanup_cgroup_environment(); + if (!rc) + printf("#autodetach:PASS\n"); + else + printf("#autodetach:FAIL\n"); + return rc; +} + int main(void) { - int (*tests[])(void) = {test_foo_bar, test_multiprog}; + int (*tests[])(void) = { + test_foo_bar, + test_multiprog, + test_autodetach, + }; int errors = 0; int i; From fe937ea12ec83da6c6bbbdb54a03062155318d0e Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Wed, 29 May 2019 10:48:14 +0100 Subject: [PATCH 56/83] selftests/bpf: fix compilation error for flow_dissector.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When building the tools/testing/selftest/bpf subdirectory, (running both a local directory "make" and a "make -C tools/testing/selftests/bpf") I keep hitting the following compilation error: prog_tests/flow_dissector.c: In function ‘create_tap’: prog_tests/flow_dissector.c:150:38: error: ‘IFF_NAPI’ undeclared (first use in this function) .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, ^ prog_tests/flow_dissector.c:150:38: note: each undeclared identifier is reported only once for each function it appears in prog_tests/flow_dissector.c:150:49: error: ‘IFF_NAPI_FRAGS’ undeclared Adding include/uapi/linux/if_tun.h to tools/include/uapi/linux resolves the problem and ensures the compilation of the file does not depend on having up-to-date kernel headers locally. Signed-off-by: Alan Maguire Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/if_tun.h | 114 ++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 tools/include/uapi/linux/if_tun.h diff --git a/tools/include/uapi/linux/if_tun.h b/tools/include/uapi/linux/if_tun.h new file mode 100644 index 000000000000..454ae31b93c7 --- /dev/null +++ b/tools/include/uapi/linux/if_tun.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Universal TUN/TAP device driver. + * Copyright (C) 1999-2000 Maxim Krasnyansky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UAPI__IF_TUN_H +#define _UAPI__IF_TUN_H + +#include +#include +#include + +/* Read queue size */ +#define TUN_READQ_SIZE 500 +/* TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. */ +#define TUN_TUN_DEV IFF_TUN +#define TUN_TAP_DEV IFF_TAP +#define TUN_TYPE_MASK 0x000f + +/* Ioctl defines */ +#define TUNSETNOCSUM _IOW('T', 200, int) +#define TUNSETDEBUG _IOW('T', 201, int) +#define TUNSETIFF _IOW('T', 202, int) +#define TUNSETPERSIST _IOW('T', 203, int) +#define TUNSETOWNER _IOW('T', 204, int) +#define TUNSETLINK _IOW('T', 205, int) +#define TUNSETGROUP _IOW('T', 206, int) +#define TUNGETFEATURES _IOR('T', 207, unsigned int) +#define TUNSETOFFLOAD _IOW('T', 208, unsigned int) +#define TUNSETTXFILTER _IOW('T', 209, unsigned int) +#define TUNGETIFF _IOR('T', 210, unsigned int) +#define TUNGETSNDBUF _IOR('T', 211, int) +#define TUNSETSNDBUF _IOW('T', 212, int) +#define TUNATTACHFILTER _IOW('T', 213, struct sock_fprog) +#define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog) +#define TUNGETVNETHDRSZ _IOR('T', 215, int) +#define TUNSETVNETHDRSZ _IOW('T', 216, int) +#define TUNSETQUEUE _IOW('T', 217, int) +#define TUNSETIFINDEX _IOW('T', 218, unsigned int) +#define TUNGETFILTER _IOR('T', 219, struct sock_fprog) +#define TUNSETVNETLE _IOW('T', 220, int) +#define TUNGETVNETLE _IOR('T', 221, int) +/* The TUNSETVNETBE and TUNGETVNETBE ioctls are for cross-endian support on + * little-endian hosts. Not all kernel configurations support them, but all + * configurations that support SET also support GET. + */ +#define TUNSETVNETBE _IOW('T', 222, int) +#define TUNGETVNETBE _IOR('T', 223, int) +#define TUNSETSTEERINGEBPF _IOR('T', 224, int) +#define TUNSETFILTEREBPF _IOR('T', 225, int) +#define TUNSETCARRIER _IOW('T', 226, int) +#define TUNGETDEVNETNS _IO('T', 227) + +/* TUNSETIFF ifr flags */ +#define IFF_TUN 0x0001 +#define IFF_TAP 0x0002 +#define IFF_NAPI 0x0010 +#define IFF_NAPI_FRAGS 0x0020 +#define IFF_NO_PI 0x1000 +/* This flag has no real effect */ +#define IFF_ONE_QUEUE 0x2000 +#define IFF_VNET_HDR 0x4000 +#define IFF_TUN_EXCL 0x8000 +#define IFF_MULTI_QUEUE 0x0100 +#define IFF_ATTACH_QUEUE 0x0200 +#define IFF_DETACH_QUEUE 0x0400 +/* read-only flag */ +#define IFF_PERSIST 0x0800 +#define IFF_NOFILTER 0x1000 + +/* Socket options */ +#define TUN_TX_TIMESTAMP 1 + +/* Features for GSO (TUNSETOFFLOAD). */ +#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ +#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ +#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ +#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ +#define TUN_F_UFO 0x10 /* I can handle UFO packets */ + +/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */ +#define TUN_PKT_STRIP 0x0001 +struct tun_pi { + __u16 flags; + __be16 proto; +}; + +/* + * Filter spec (used for SETXXFILTER ioctls) + * This stuff is applicable only to the TAP (Ethernet) devices. + * If the count is zero the filter is disabled and the driver accepts + * all packets (promisc mode). + * If the filter is enabled in order to accept broadcast packets + * broadcast addr must be explicitly included in the addr list. + */ +#define TUN_FLT_ALLMULTI 0x0001 /* Accept all multicast packets */ +struct tun_filter { + __u16 flags; /* TUN_FLT_ flags see above */ + __u16 count; /* Number of addresses */ + __u8 addr[0][ETH_ALEN]; +}; + +#endif /* _UAPI__IF_TUN_H */ From 54e9c9d4b506b611228890752d1cfa960e0965e1 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 28 May 2019 14:14:41 -0700 Subject: [PATCH 57/83] bpf: remove __rcu annotations from bpf_prog_array Drop __rcu annotations and rcu read sections from bpf_prog_array helper functions. They are not needed since all existing callers call those helpers from the rcu update side while holding a mutex. This guarantees that use-after-free could not happen. In the next patches I'll fix the callers with missing rcu_dereference_protected to make sparse/lockdep happy, the proper way to use these helpers is: struct bpf_prog_array __rcu *progs = ...; struct bpf_prog_array *p; mutex_lock(&mtx); p = rcu_dereference_protected(progs, lockdep_is_held(&mtx)); bpf_prog_array_length(p); bpf_prog_array_copy_to_user(p, ...); bpf_prog_array_delete_safe(p, ...); bpf_prog_array_copy_info(p, ...); bpf_prog_array_copy(p, ...); bpf_prog_array_free(p); mutex_unlock(&mtx); No functional changes! rcu_dereference_protected with lockdep_is_held should catch any cases where we update prog array without a mutex (I've looked at existing call sites and I think we hold a mutex everywhere). Motivation is to fix sparse warnings: kernel/bpf/core.c:1803:9: warning: incorrect type in argument 1 (different address spaces) kernel/bpf/core.c:1803:9: expected struct callback_head *head kernel/bpf/core.c:1803:9: got struct callback_head [noderef] * kernel/bpf/core.c:1877:44: warning: incorrect type in initializer (different address spaces) kernel/bpf/core.c:1877:44: expected struct bpf_prog_array_item *item kernel/bpf/core.c:1877:44: got struct bpf_prog_array_item [noderef] * kernel/bpf/core.c:1901:26: warning: incorrect type in assignment (different address spaces) kernel/bpf/core.c:1901:26: expected struct bpf_prog_array_item *existing kernel/bpf/core.c:1901:26: got struct bpf_prog_array_item [noderef] * kernel/bpf/core.c:1935:26: warning: incorrect type in assignment (different address spaces) kernel/bpf/core.c:1935:26: expected struct bpf_prog_array_item *[assigned] existing kernel/bpf/core.c:1935:26: got struct bpf_prog_array_item [noderef] * v2: * remove comment about potential race; that can't happen because all callers are in rcu-update section Cc: Roman Gushchin Acked-by: Roman Gushchin Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 12 ++++++------ kernel/bpf/core.c | 37 +++++++++++++------------------------ 2 files changed, 19 insertions(+), 30 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d98141edb74b..ff3e00ff84d2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -514,17 +514,17 @@ struct bpf_prog_array { }; struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); -void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); -int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_free(struct bpf_prog_array *progs); +int bpf_prog_array_length(struct bpf_prog_array *progs); +int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, __u32 __user *prog_ids, u32 cnt); -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); -int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, +int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); -int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, +int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, struct bpf_prog_array **new_array); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 3675b19ecb90..33fb292f2e30 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1795,38 +1795,33 @@ struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) return &empty_prog_array.hdr; } -void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) +void bpf_prog_array_free(struct bpf_prog_array *progs) { - if (!progs || - progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) + if (!progs || progs == &empty_prog_array.hdr) return; kfree_rcu(progs, rcu); } -int bpf_prog_array_length(struct bpf_prog_array __rcu *array) +int bpf_prog_array_length(struct bpf_prog_array *array) { struct bpf_prog_array_item *item; u32 cnt = 0; - rcu_read_lock(); - item = rcu_dereference(array)->items; - for (; item->prog; item++) + for (item = array->items; item->prog; item++) if (item->prog != &dummy_bpf_prog.prog) cnt++; - rcu_read_unlock(); return cnt; } -static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, +static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt) { struct bpf_prog_array_item *item; int i = 0; - item = rcu_dereference_check(array, 1)->items; - for (; item->prog; item++) { + for (item = array->items; item->prog; item++) { if (item->prog == &dummy_bpf_prog.prog) continue; prog_ids[i] = item->prog->aux->id; @@ -1839,7 +1834,7 @@ static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, return !!(item->prog); } -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, +int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, __u32 __user *prog_ids, u32 cnt) { unsigned long err = 0; @@ -1850,18 +1845,12 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, * cnt = bpf_prog_array_length(); * if (cnt > 0) * bpf_prog_array_copy_to_user(..., cnt); - * so below kcalloc doesn't need extra cnt > 0 check, but - * bpf_prog_array_length() releases rcu lock and - * prog array could have been swapped with empty or larger array, - * so always copy 'cnt' prog_ids to the user. - * In a rare race the user will see zero prog_ids + * so below kcalloc doesn't need extra cnt > 0 check. */ ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); if (!ids) return -ENOMEM; - rcu_read_lock(); nospc = bpf_prog_array_copy_core(array, ids, cnt); - rcu_read_unlock(); err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); kfree(ids); if (err) @@ -1871,19 +1860,19 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, return 0; } -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, +void bpf_prog_array_delete_safe(struct bpf_prog_array *array, struct bpf_prog *old_prog) { - struct bpf_prog_array_item *item = array->items; + struct bpf_prog_array_item *item; - for (; item->prog; item++) + for (item = array->items; item->prog; item++) if (item->prog == old_prog) { WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); break; } } -int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, +int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, struct bpf_prog_array **new_array) @@ -1947,7 +1936,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, return 0; } -int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, +int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt) { From 02205d2ed6fe26a8f4fd9e9cec251d1dc7f79316 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 28 May 2019 14:14:42 -0700 Subject: [PATCH 58/83] bpf: media: properly use bpf_prog_array api Now that we don't have __rcu markers on the bpf_prog_array helpers, let's use proper rcu_dereference_protected to obtain array pointer under mutex. Cc: linux-media@vger.kernel.org Cc: Mauro Carvalho Chehab Cc: Sean Young Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- drivers/media/rc/bpf-lirc.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index ee657003c1a1..0a0ce620e4a2 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -8,6 +8,9 @@ #include #include "rc-core-priv.h" +#define lirc_rcu_dereference(p) \ + rcu_dereference_protected(p, lockdep_is_held(&ir_raw_handler_lock)) + /* * BPF interface for raw IR */ @@ -136,7 +139,7 @@ const struct bpf_verifier_ops lirc_mode2_verifier_ops = { static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog) { - struct bpf_prog_array __rcu *old_array; + struct bpf_prog_array *old_array; struct bpf_prog_array *new_array; struct ir_raw_event_ctrl *raw; int ret; @@ -154,12 +157,12 @@ static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog) goto unlock; } - if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) { + old_array = lirc_rcu_dereference(raw->progs); + if (old_array && bpf_prog_array_length(old_array) >= BPF_MAX_PROGS) { ret = -E2BIG; goto unlock; } - old_array = raw->progs; ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); if (ret < 0) goto unlock; @@ -174,7 +177,7 @@ unlock: static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog) { - struct bpf_prog_array __rcu *old_array; + struct bpf_prog_array *old_array; struct bpf_prog_array *new_array; struct ir_raw_event_ctrl *raw; int ret; @@ -192,7 +195,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog) goto unlock; } - old_array = raw->progs; + old_array = lirc_rcu_dereference(raw->progs); ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array); /* * Do not use bpf_prog_array_delete_safe() as we would end up @@ -223,21 +226,22 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample) /* * This should be called once the rc thread has been stopped, so there can be * no concurrent bpf execution. + * + * Should be called with the ir_raw_handler_lock held. */ void lirc_bpf_free(struct rc_dev *rcdev) { struct bpf_prog_array_item *item; + struct bpf_prog_array *array; - if (!rcdev->raw->progs) + array = lirc_rcu_dereference(rcdev->raw->progs); + if (!array) return; - item = rcu_dereference(rcdev->raw->progs)->items; - while (item->prog) { + for (item = array->items; item->prog; item++) bpf_prog_put(item->prog); - item++; - } - bpf_prog_array_free(rcdev->raw->progs); + bpf_prog_array_free(array); } int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) @@ -290,7 +294,7 @@ int lirc_prog_detach(const union bpf_attr *attr) int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); - struct bpf_prog_array __rcu *progs; + struct bpf_prog_array *progs; struct rc_dev *rcdev; u32 cnt, flags = 0; int ret; @@ -311,7 +315,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) if (ret) goto put; - progs = rcdev->raw->progs; + progs = lirc_rcu_dereference(rcdev->raw->progs); cnt = progs ? bpf_prog_array_length(progs) : 0; if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) { From dbcc1ba26e43bd32cb308e50ac4cb4a29d2f5967 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 28 May 2019 14:14:43 -0700 Subject: [PATCH 59/83] bpf: cgroup: properly use bpf_prog_array api Now that we don't have __rcu markers on the bpf_prog_array helpers, let's use proper rcu_dereference_protected to obtain array pointer under mutex. We also don't need __rcu annotations on cgroup_bpf.inactive since it's not read/updated concurrently. v4: * drop cgroup_rcu_xyz wrappers and use rcu APIs directly; presumably should be more clear to understand which mutex/refcount protects each particular place v3: * amend cgroup_rcu_dereference to include percpu_ref_is_dying; cgroup_bpf is now reference counted and we don't hold cgroup_mutex anymore in cgroup_bpf_release v2: * replace xchg with rcu_swap_protected Cc: Roman Gushchin Signed-off-by: Stanislav Fomichev Acked-by: Roman Gushchin Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 2 +- kernel/bpf/cgroup.c | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 9f100fc422c3..b631ee75762d 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -72,7 +72,7 @@ struct cgroup_bpf { u32 flags[MAX_BPF_ATTACH_TYPE]; /* temp storage for effective prog array used by prog_attach/detach */ - struct bpf_prog_array __rcu *inactive; + struct bpf_prog_array *inactive; /* reference counter used to detach bpf programs after cgroup removal */ struct percpu_ref refcnt; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index d995edbe816d..ff594eb86fd7 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -38,6 +38,7 @@ static void cgroup_bpf_release(struct work_struct *work) struct cgroup *cgrp = container_of(work, struct cgroup, bpf.release_work); enum bpf_cgroup_storage_type stype; + struct bpf_prog_array *old_array; unsigned int type; for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { @@ -54,7 +55,10 @@ static void cgroup_bpf_release(struct work_struct *work) kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } - bpf_prog_array_free(cgrp->bpf.effective[type]); + old_array = rcu_dereference_protected( + cgrp->bpf.effective[type], + percpu_ref_is_dying(&cgrp->bpf.refcnt)); + bpf_prog_array_free(old_array); } percpu_ref_exit(&cgrp->bpf.refcnt); @@ -126,7 +130,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp, */ static int compute_effective_progs(struct cgroup *cgrp, enum bpf_attach_type type, - struct bpf_prog_array __rcu **array) + struct bpf_prog_array **array) { enum bpf_cgroup_storage_type stype; struct bpf_prog_array *progs; @@ -164,17 +168,16 @@ static int compute_effective_progs(struct cgroup *cgrp, } } while ((p = cgroup_parent(p))); - rcu_assign_pointer(*array, progs); + *array = progs; return 0; } static void activate_effective_progs(struct cgroup *cgrp, enum bpf_attach_type type, - struct bpf_prog_array __rcu *array) + struct bpf_prog_array *old_array) { - struct bpf_prog_array __rcu *old_array; - - old_array = xchg(&cgrp->bpf.effective[type], array); + rcu_swap_protected(cgrp->bpf.effective[type], old_array, + lockdep_is_held(&cgroup_mutex)); /* free prog array after grace period, since __cgroup_bpf_run_*() * might be still walking the array */ @@ -191,7 +194,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) * that array below is variable length */ #define NR ARRAY_SIZE(cgrp->bpf.effective) - struct bpf_prog_array __rcu *arrays[NR] = {}; + struct bpf_prog_array *arrays[NR] = {}; int ret, i; ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, @@ -477,10 +480,14 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, enum bpf_attach_type type = attr->query.attach_type; struct list_head *progs = &cgrp->bpf.progs[type]; u32 flags = cgrp->bpf.flags[type]; + struct bpf_prog_array *effective; int cnt, ret = 0, i; + effective = rcu_dereference_protected(cgrp->bpf.effective[type], + lockdep_is_held(&cgroup_mutex)); + if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) - cnt = bpf_prog_array_length(cgrp->bpf.effective[type]); + cnt = bpf_prog_array_length(effective); else cnt = prog_list_length(progs); @@ -497,8 +504,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, } if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { - return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type], - prog_ids, cnt); + return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); } else { struct bpf_prog_list *pl; u32 id; From e672db03ab0e43e41ab6f8b2156a10d6e40f243d Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 28 May 2019 14:14:44 -0700 Subject: [PATCH 60/83] bpf: tracing: properly use bpf_prog_array api Now that we don't have __rcu markers on the bpf_prog_array helpers, let's use proper rcu_dereference_protected to obtain array pointer under mutex. Cc: Steven Rostedt Cc: Ingo Molnar Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- kernel/trace/bpf_trace.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index fe73926a07cd..3994a231eb92 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -19,6 +19,9 @@ #include "trace_probe.h" #include "trace.h" +#define bpf_event_rcu_dereference(p) \ + rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) + #ifdef CONFIG_MODULES struct bpf_trace_module { struct module *module; @@ -1099,7 +1102,7 @@ static DEFINE_MUTEX(bpf_event_mutex); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) { - struct bpf_prog_array __rcu *old_array; + struct bpf_prog_array *old_array; struct bpf_prog_array *new_array; int ret = -EEXIST; @@ -1117,7 +1120,7 @@ int perf_event_attach_bpf_prog(struct perf_event *event, if (event->prog) goto unlock; - old_array = event->tp_event->prog_array; + old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); if (old_array && bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { ret = -E2BIG; @@ -1140,7 +1143,7 @@ unlock: void perf_event_detach_bpf_prog(struct perf_event *event) { - struct bpf_prog_array __rcu *old_array; + struct bpf_prog_array *old_array; struct bpf_prog_array *new_array; int ret; @@ -1149,7 +1152,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event) if (!event->prog) goto unlock; - old_array = event->tp_event->prog_array; + old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); if (ret == -ENOENT) goto unlock; @@ -1171,6 +1174,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) { struct perf_event_query_bpf __user *uquery = info; struct perf_event_query_bpf query = {}; + struct bpf_prog_array *progs; u32 *ids, prog_cnt, ids_len; int ret; @@ -1195,10 +1199,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) */ mutex_lock(&bpf_event_mutex); - ret = bpf_prog_array_copy_info(event->tp_event->prog_array, - ids, - ids_len, - &prog_cnt); + progs = bpf_event_rcu_dereference(event->tp_event->prog_array); + ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); mutex_unlock(&bpf_event_mutex); if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || From 501b125a29f7cd8983960c5f3c80b337b1359cc3 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 29 May 2019 15:26:41 +0100 Subject: [PATCH 61/83] libbpf: prevent overwriting of log_level in bpf_object__load_progs() There are two functions in libbpf that support passing a log_level parameter for the verifier for loading programs: bpf_object__load_xattr() and bpf_prog_load_xattr(). Both accept an attribute object containing the log_level, and apply it to the programs to load. It turns out that to effectively load the programs, the latter function eventually relies on the former. This was not taken into account when adding support for log_level in bpf_object__load_xattr(), and the log_level passed to bpf_prog_load_xattr() later gets overwritten with a zero value, thus disabling verifier logs for the program in all cases: bpf_prog_load_xattr() // prog->log_level = attr1->log_level; -> bpf_object__load() // attr2->log_level = 0; -> bpf_object__load_xattr() // -> bpf_object__load_progs() // prog->log_level = attr2->log_level; Fix this by OR-ing the log_level in bpf_object__load_progs(), instead of overwriting it. v2: Fix commit log description (confusion on function names in v1). Fixes: 60276f984998 ("libbpf: add bpf_object__load_xattr() API function to pass log_level") Reported-by: Alexei Starovoitov Signed-off-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ca4432f5b067..30cb08e2eb75 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2232,7 +2232,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) for (i = 0; i < obj->nr_programs; i++) { if (bpf_program__is_function_storage(&obj->programs[i], obj)) continue; - obj->programs[i].log_level = log_level; + obj->programs[i].log_level |= log_level; err = bpf_program__load(&obj->programs[i], obj->license, obj->kern_version); From 8ca990ce0d402d5aaf05f7a33ff025fcbcbb5f93 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:03 -0700 Subject: [PATCH 62/83] libbpf: fix detection of corrupted BPF instructions section Ensure that size of a section w/ BPF instruction is exactly a multiple of BPF instruction size. Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 30cb08e2eb75..c985a7916e35 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -349,8 +349,11 @@ static int bpf_program__init(void *data, size_t size, char *section_name, int idx, struct bpf_program *prog) { - if (size < sizeof(struct bpf_insn)) { - pr_warning("corrupted section '%s'\n", section_name); + const size_t bpf_insn_sz = sizeof(struct bpf_insn); + + if (size == 0 || size % bpf_insn_sz) { + pr_warning("corrupted section '%s', size: %zu\n", + section_name, size); return -EINVAL; } @@ -376,9 +379,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx, section_name); goto errout; } - prog->insns_cnt = size / sizeof(struct bpf_insn); - memcpy(prog->insns, data, - prog->insns_cnt * sizeof(struct bpf_insn)); + prog->insns_cnt = size / bpf_insn_sz; + memcpy(prog->insns, data, size); prog->idx = idx; prog->instances.fds = NULL; prog->instances.nr = -1; From be5c5d4e9d8cbdd7fcdcbc45e86e34bc0bd1cefd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:04 -0700 Subject: [PATCH 63/83] libbpf: preserve errno before calling into user callback pr_warning ultimately may call into user-provided callback function, which can clobber errno value, so we need to save it before that. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index c985a7916e35..40690b7e500e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -565,12 +565,12 @@ static int bpf_object__elf_init(struct bpf_object *obj) } else { obj->efile.fd = open(obj->path, O_RDONLY); if (obj->efile.fd < 0) { - char errmsg[STRERR_BUFSIZE]; - char *cp = libbpf_strerror_r(errno, errmsg, - sizeof(errmsg)); + char errmsg[STRERR_BUFSIZE], *cp; + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warning("failed to open %s: %s\n", obj->path, cp); - return -errno; + return err; } obj->efile.elf = elf_begin(obj->efile.fd, From 12ef5634a855eb642b4d28781526fbd830aa7f7a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:05 -0700 Subject: [PATCH 64/83] libbpf: simplify endianness check Rewrite endianness check to use "more canonical" way, using compiler-defined macros, similar to few other places in libbpf. It also is more obvious and shorter. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 35 +++++++++++------------------------ 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 40690b7e500e..fb296eadc3f2 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -607,31 +607,18 @@ errout: return err; } -static int -bpf_object__check_endianness(struct bpf_object *obj) +static int bpf_object__check_endianness(struct bpf_object *obj) { - static unsigned int const endian = 1; - - switch (obj->efile.ehdr.e_ident[EI_DATA]) { - case ELFDATA2LSB: - /* We are big endian, BPF obj is little endian. */ - if (*(unsigned char const *)&endian != 1) - goto mismatch; - break; - - case ELFDATA2MSB: - /* We are little endian, BPF obj is big endian. */ - if (*(unsigned char const *)&endian != 0) - goto mismatch; - break; - default: - return -LIBBPF_ERRNO__ENDIAN; - } - - return 0; - -mismatch: - pr_warning("Error: endianness mismatch.\n"); +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) + return 0; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) + return 0; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif + pr_warning("endianness mismatch.\n"); return -LIBBPF_ERRNO__ENDIAN; } From c51829bb6e921337014d9ac3712b0b7e8cae9f3d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:06 -0700 Subject: [PATCH 65/83] libbpf: check map name retrieved from ELF Validate there was no error retrieving symbol name corresponding to a BPF map. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fb296eadc3f2..60ec9694923a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -920,6 +920,11 @@ bpf_object__init_maps(struct bpf_object *obj, int flags) map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, sym.st_name); + if (!map_name) { + pr_warning("failed to get map #%d name sym string for obj %s\n", + map_idx, obj->path); + return -LIBBPF_ERRNO__FORMAT; + } obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC; obj->maps[map_idx].offset = sym.st_value; From f102154d3158c8fe9713f03bd3492f9cb8409a5e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:07 -0700 Subject: [PATCH 66/83] libbpf: fix error code returned on corrupted ELF All of libbpf errors are negative, except this one. Fix it. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 60ec9694923a..9f421206d3e7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1221,7 +1221,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { pr_warning("Corrupted ELF file: index of strtab invalid\n"); - return LIBBPF_ERRNO__FORMAT; + return -LIBBPF_ERRNO__FORMAT; } if (btf_data) { obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); From fba01a0689a98613f5ee3b1569fd9321f3fde7bd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:08 -0700 Subject: [PATCH 67/83] libbpf: use negative fd to specify missing BTF 0 is a valid FD, so it's better to initialize it to -1, as is done in other places. Also, technically, BTF type ID 0 is valid (it's a VOID type), so it's more reliable to check btf_fd, instead of btf_key_type_id, to determine if there is any BTF associated with a map. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9f421206d3e7..b43913fc8b1b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1751,7 +1751,7 @@ bpf_object__create_maps(struct bpf_object *obj) create_attr.key_size = def->key_size; create_attr.value_size = def->value_size; create_attr.max_entries = def->max_entries; - create_attr.btf_fd = 0; + create_attr.btf_fd = -1; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; if (bpf_map_type__is_map_in_map(def->type) && @@ -1765,11 +1765,11 @@ bpf_object__create_maps(struct bpf_object *obj) } *pfd = bpf_create_map_xattr(&create_attr); - if (*pfd < 0 && create_attr.btf_key_type_id) { + if (*pfd < 0 && create_attr.btf_fd >= 0) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", map->name, cp, errno); - create_attr.btf_fd = 0; + create_attr.btf_fd = -1; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; @@ -2053,6 +2053,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *log_buf; int ret; + if (!insns || !insns_cnt) + return -EINVAL; + memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = prog->type; load_attr.expected_attach_type = prog->expected_attach_type; @@ -2063,7 +2066,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.license = license; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; - load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; + load_attr.prog_btf_fd = prog->btf_fd; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; load_attr.func_info_cnt = prog->func_info_cnt; @@ -2072,8 +2075,6 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; - if (!load_attr.insns || !load_attr.insns_cnt) - return -EINVAL; retry_load: log_buf = malloc(log_buf_size); From 7e8c328c4e96a0fc68d9f1264656683f7f1e12c1 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:09 -0700 Subject: [PATCH 68/83] libbpf: simplify two pieces of logic Extra check for type is unnecessary in first case. Extra zeroing is unnecessary, as snprintf guarantees that it will zero-terminate string. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b43913fc8b1b..33cd59bf9e72 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1430,8 +1430,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, if (maps[map_idx].libbpf_type != type) continue; if (type != LIBBPF_MAP_UNSPEC || - (type == LIBBPF_MAP_UNSPEC && - maps[map_idx].offset == sym.st_value)) { + maps[map_idx].offset == sym.st_value) { pr_debug("relocation: find map %zd (%s) for insn %u\n", map_idx, maps[map_idx].name, insn_idx); break; @@ -2354,7 +2353,6 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf, snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", (unsigned long)obj_buf, (unsigned long)obj_buf_sz); - tmp_name[sizeof(tmp_name) - 1] = '\0'; name = tmp_name; } pr_debug("loading object '%s' from buffer\n", From 76e1022b9653b5b5254e2f99ac467354b9c08de6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:10 -0700 Subject: [PATCH 69/83] libbpf: typo and formatting fixes A bunch of typo and formatting fixes. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 33cd59bf9e72..a889925ecbcb 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -505,7 +505,7 @@ static struct bpf_object *bpf_object__new(const char *path, obj->efile.fd = -1; /* - * Caller of this function should also calls + * Caller of this function should also call * bpf_object__elf_finish() after data collection to return * obj_buf to user. If not, we should duplicate the buffer to * avoid user freeing them before elf finish. @@ -574,8 +574,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) } obj->efile.elf = elf_begin(obj->efile.fd, - LIBBPF_ELF_C_READ_MMAP, - NULL); + LIBBPF_ELF_C_READ_MMAP, NULL); } if (!obj->efile.elf) { @@ -594,9 +593,9 @@ static int bpf_object__elf_init(struct bpf_object *obj) ep = &obj->efile.ehdr; /* Old LLVM set e_machine to EM_NONE */ - if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { - pr_warning("%s is not an eBPF object file\n", - obj->path); + if (ep->e_type != ET_REL || + (ep->e_machine && ep->e_machine != EM_BPF)) { + pr_warning("%s is not an eBPF object file\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } @@ -1438,7 +1437,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, } if (map_idx >= nr_maps) { - pr_warning("bpf relocation: map_idx %d large than %d\n", + pr_warning("bpf relocation: map_idx %d larger than %d\n", (int)map_idx, (int)nr_maps - 1); return -LIBBPF_ERRNO__RELOC; } @@ -1797,7 +1796,7 @@ err_out: } } - pr_debug("create map %s: fd=%d\n", map->name, *pfd); + pr_debug("created map %s: fd=%d\n", map->name, *pfd); } return 0; From 399dc65e9ca0c35c8ceea7130b25e7be8f229a52 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2019 10:36:11 -0700 Subject: [PATCH 70/83] libbpf: reduce unnecessary line wrapping There are a bunch of lines of code or comments that are unnecessary wrapped into multi-lines. Fix that without violating any code guidelines. Acked-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 52 +++++++++++++----------------------------- 1 file changed, 16 insertions(+), 36 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a889925ecbcb..ba89d9727137 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -497,8 +497,7 @@ static struct bpf_object *bpf_object__new(const char *path, strcpy(obj->path, path); /* Using basename() GNU version which doesn't modify arg. */ - strncpy(obj->name, basename((void *)path), - sizeof(obj->name) - 1); + strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1); end = strchr(obj->name, '.'); if (end) *end = 0; @@ -578,15 +577,13 @@ static int bpf_object__elf_init(struct bpf_object *obj) } if (!obj->efile.elf) { - pr_warning("failed to open %s as ELF file\n", - obj->path); + pr_warning("failed to open %s as ELF file\n", obj->path); err = -LIBBPF_ERRNO__LIBELF; goto errout; } if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { - pr_warning("failed to get EHDR from %s\n", - obj->path); + pr_warning("failed to get EHDR from %s\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } @@ -622,18 +619,15 @@ static int bpf_object__check_endianness(struct bpf_object *obj) } static int -bpf_object__init_license(struct bpf_object *obj, - void *data, size_t size) +bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) { - memcpy(obj->license, data, - min(size, sizeof(obj->license) - 1)); + memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); pr_debug("license of %s is %s\n", obj->path, obj->license); return 0; } static int -bpf_object__init_kversion(struct bpf_object *obj, - void *data, size_t size) +bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) { __u32 kver; @@ -643,8 +637,7 @@ bpf_object__init_kversion(struct bpf_object *obj, } memcpy(&kver, data, sizeof(kver)); obj->kern_version = kver; - pr_debug("kernel version of %s is %x\n", obj->path, - obj->kern_version); + pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); return 0; } @@ -800,8 +793,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, def->key_size = sizeof(int); def->value_size = data->d_size; def->max_entries = 1; - def->map_flags = type == LIBBPF_MAP_RODATA ? - BPF_F_RDONLY_PROG : 0; + def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0; if (data_buff) { *data_buff = malloc(data->d_size); if (!*data_buff) { @@ -816,8 +808,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, return 0; } -static int -bpf_object__init_maps(struct bpf_object *obj, int flags) +static int bpf_object__init_maps(struct bpf_object *obj, int flags) { int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0; bool strict = !(flags & MAPS_RELAX_COMPAT); @@ -1098,8 +1089,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) /* Elf is corrupted/truncated, avoid calling elf_strptr. */ if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { - pr_warning("failed to get e_shstrndx from %s\n", - obj->path); + pr_warning("failed to get e_shstrndx from %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } @@ -1340,8 +1330,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, size_t nr_maps = obj->nr_maps; int i, nrels; - pr_debug("collecting relocating info for: '%s'\n", - prog->section_name); + pr_debug("collecting relocating info for: '%s'\n", prog->section_name); nrels = shdr->sh_size / shdr->sh_entsize; prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); @@ -1366,9 +1355,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, return -LIBBPF_ERRNO__FORMAT; } - if (!gelf_getsym(symbols, - GELF_R_SYM(rel.r_info), - &sym)) { + if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { pr_warning("relocation: symbol %"PRIx64" not found\n", GELF_R_SYM(rel.r_info)); return -LIBBPF_ERRNO__FORMAT; @@ -1817,18 +1804,14 @@ check_btf_ext_reloc_err(struct bpf_program *prog, int err, if (btf_prog_info) { /* * Some info has already been found but has problem - * in the last btf_ext reloc. Must have to error - * out. + * in the last btf_ext reloc. Must have to error out. */ pr_warning("Error in relocating %s for sec %s.\n", info_name, prog->section_name); return err; } - /* - * Have problem loading the very first info. Ignore - * the rest. - */ + /* Have problem loading the very first info. Ignore the rest. */ pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", info_name, prog->section_name, info_name); return 0; @@ -2032,9 +2015,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj) return -LIBBPF_ERRNO__RELOC; } - err = bpf_program__collect_reloc(prog, - shdr, data, - obj); + err = bpf_program__collect_reloc(prog, shdr, data, obj); if (err) return err; } @@ -2354,8 +2335,7 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf, (unsigned long)obj_buf_sz); name = tmp_name; } - pr_debug("loading object '%s' from buffer\n", - name); + pr_debug("loading object '%s' from buffer\n", name); return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); } From 1f52f6c0b0e846908e9c1082dab1b3f7088b82ac Mon Sep 17 00:00:00 2001 From: brakmo Date: Tue, 28 May 2019 16:59:35 -0700 Subject: [PATCH 71/83] bpf: Create BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY Create new macro BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY() to be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs so BPF programs can request cwr for TCP packets. Current cgroup skb programs can only return 0 or 1 (0 to drop the packet. This macro changes the behavior so the low order bit indicates whether the packet should be dropped (0) or not (1) and the next bit is used for congestion notification (cn). Hence, new allowed return values of CGROUP EGRESS BPF programs are: 0: drop packet 1: keep packet 2: drop packet and call cwr 3: keep packet and call cwr This macro then converts it to one of NET_XMIT values or -EPERM that has the effect of dropping the packet with no cn. 0: NET_XMIT_SUCCESS skb should be transmitted (no cn) 1: NET_XMIT_DROP skb should be dropped and cwr called 2: NET_XMIT_CN skb should be transmitted and cwr called 3: -EPERM skb should be dropped (no cn) Note that when more than one BPF program is called, the packet is dropped if at least one of programs requests it be dropped, and there is cn if at least one program returns cn. Signed-off-by: Lawrence Brakmo Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ff3e00ff84d2..2cc58fc0f413 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -552,6 +552,56 @@ _out: \ _ret; \ }) +/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs + * so BPF programs can request cwr for TCP packets. + * + * Current cgroup skb programs can only return 0 or 1 (0 to drop the + * packet. This macro changes the behavior so the low order bit + * indicates whether the packet should be dropped (0) or not (1) + * and the next bit is a congestion notification bit. This could be + * used by TCP to call tcp_enter_cwr() + * + * Hence, new allowed return values of CGROUP EGRESS BPF programs are: + * 0: drop packet + * 1: keep packet + * 2: drop packet and cn + * 3: keep packet and cn + * + * This macro then converts it to one of the NET_XMIT or an error + * code that is then interpreted as drop packet (and no cn): + * 0: NET_XMIT_SUCCESS skb should be transmitted + * 1: NET_XMIT_DROP skb should be dropped and cn + * 2: NET_XMIT_CN skb should be transmitted and cn + * 3: -EPERM skb should be dropped + */ +#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 ret; \ + u32 _ret = 1; \ + u32 _cn = 0; \ + preempt_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + ret = func(_prog, ctx); \ + _ret &= (ret & 1); \ + _cn |= (ret & 2); \ + _item++; \ + } \ + rcu_read_unlock(); \ + preempt_enable(); \ + if (_ret) \ + _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ + else \ + _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret; \ + }) + #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ __BPF_PROG_RUN_ARRAY(array, ctx, func, false) From 5cf1e91456301f8c4f6bbc63ff76cff12f92f31b Mon Sep 17 00:00:00 2001 From: brakmo Date: Tue, 28 May 2019 16:59:36 -0700 Subject: [PATCH 72/83] bpf: cgroup inet skb programs can return 0 to 3 Allows cgroup inet skb programs to return values in the range [0, 3]. The second bit is used to deterine if congestion occurred and higher level protocol should decrease rate. E.g. TCP would call tcp_enter_cwr() The bpf_prog must set expected_attach_type to BPF_CGROUP_INET_EGRESS at load time if it uses the new return values (i.e. 2 or 3). The expected_attach_type is currently not enforced for BPF_PROG_TYPE_CGROUP_SKB. e.g Meaning the current bpf_prog with expected_attach_type setting to BPF_CGROUP_INET_EGRESS can attach to BPF_CGROUP_INET_INGRESS. Blindly enforcing expected_attach_type will break backward compatibility. This patch adds a enforce_expected_attach_type bit to only enforce the expected_attach_type when it uses the new return value. Signed-off-by: Lawrence Brakmo Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 3 ++- kernel/bpf/syscall.c | 12 ++++++++++++ kernel/bpf/verifier.c | 16 +++++++++++++--- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index ba8b65270e0d..43b45d6db36d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -526,7 +526,8 @@ struct bpf_prog { blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ - has_callchain_buf:1; /* callchain buffer allocated? */ + has_callchain_buf:1, /* callchain buffer allocated? */ + enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 3d546b6f4646..1539774d78c7 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1585,6 +1585,14 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, default: return -EINVAL; } + case BPF_PROG_TYPE_CGROUP_SKB: + switch (expected_attach_type) { + case BPF_CGROUP_INET_INGRESS: + case BPF_CGROUP_INET_EGRESS: + return 0; + default: + return -EINVAL; + } default: return 0; } @@ -1836,6 +1844,10 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: return attach_type == prog->expected_attach_type ? 0 : -EINVAL; + case BPF_PROG_TYPE_CGROUP_SKB: + return prog->enforce_expected_attach_type && + prog->expected_attach_type != attach_type ? + -EINVAL : 0; default: return 0; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2778417e6e0c..5c2cb5bd84ce 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5508,11 +5508,16 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) static int check_return_code(struct bpf_verifier_env *env) { + struct tnum enforce_attach_type_range = tnum_unknown; struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: + if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { + range = tnum_range(0, 3); + enforce_attach_type_range = tnum_range(2, 3); + } case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_SOCK_OPS: @@ -5531,18 +5536,23 @@ static int check_return_code(struct bpf_verifier_env *env) } if (!tnum_in(range, reg->var_off)) { + char tn_buf[48]; + verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { - char tn_buf[48]; - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } - verbose(env, " should have been 0 or 1\n"); + tnum_strn(tn_buf, sizeof(tn_buf), range); + verbose(env, " should have been %s\n", tn_buf); return -EINVAL; } + + if (!tnum_is_unknown(enforce_attach_type_range) && + tnum_in(enforce_attach_type_range, reg->var_off)) + env->prog->enforce_expected_attach_type = 1; return 0; } From e7a3160d092aa4dd7fb2ef23335cb3a98400aec6 Mon Sep 17 00:00:00 2001 From: brakmo Date: Tue, 28 May 2019 16:59:37 -0700 Subject: [PATCH 73/83] bpf: Update __cgroup_bpf_run_filter_skb with cn For egress packets, __cgroup_bpf_fun_filter_skb() will now call BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY() instead of PROG_CGROUP_RUN_ARRAY() in order to propagate congestion notifications (cn) requests to TCP callers. For egress packets, this function can return: NET_XMIT_SUCCESS (0) - continue with packet output NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr NET_XMIT_CN (2) - continue with packet output and notify TCP to call cwr -EPERM - drop packet For ingress packets, this function will return -EPERM if any attached program was found and if it returned != 1 during execution. Otherwise 0 is returned. Signed-off-by: Lawrence Brakmo Signed-off-by: Alexei Starovoitov --- kernel/bpf/cgroup.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ff594eb86fd7..1b65ab0df457 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -587,8 +587,16 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, * The program type passed in via @type must be suitable for network * filtering. No further check is performed to assert that. * - * This function will return %-EPERM if any if an attached program was found - * and if it returned != 1 during execution. In all other cases, 0 is returned. + * For egress packets, this function can return: + * NET_XMIT_SUCCESS (0) - continue with packet output + * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr + * NET_XMIT_CN (2) - continue with packet output and notify TCP + * to call cwr + * -EPERM - drop packet + * + * For ingress packets, this function will return -EPERM if any + * attached program was found and if it returned != 1 during execution. + * Otherwise 0 is returned. */ int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, @@ -614,12 +622,19 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, /* compute pointers for the bpf prog */ bpf_compute_and_save_data_end(skb, &saved_data_end); - ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, - __bpf_prog_run_save_cb); + if (type == BPF_CGROUP_INET_EGRESS) { + ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( + cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); + } else { + ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, + __bpf_prog_run_save_cb); + ret = (ret == 1 ? 0 : -EPERM); + } bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); skb->sk = save_sk; - return ret == 1 ? 0 : -EPERM; + + return ret; } EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); From 956fe2190820df3a6ee530204e059da508159319 Mon Sep 17 00:00:00 2001 From: brakmo Date: Tue, 28 May 2019 16:59:38 -0700 Subject: [PATCH 74/83] bpf: Update BPF_CGROUP_RUN_PROG_INET_EGRESS calls Update BPF_CGROUP_RUN_PROG_INET_EGRESS() callers to support returning congestion notifications from the BPF programs. Signed-off-by: Lawrence Brakmo Signed-off-by: Alexei Starovoitov --- net/ipv4/ip_output.c | 34 +++++++++++++++++++++++----------- net/ipv6/ip6_output.c | 26 +++++++++++++++++--------- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index bfd0ca554977..1217a53381c2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -287,16 +287,9 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, return ret; } -static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) +static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { unsigned int mtu; - int ret; - - ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); - if (ret) { - kfree_skb(skb); - return ret; - } #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ @@ -315,18 +308,37 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk return ip_finish_output2(net, sk, skb); } +static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + int ret; + + ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); + switch (ret) { + case NET_XMIT_SUCCESS: + return __ip_finish_output(net, sk, skb); + case NET_XMIT_CN: + return __ip_finish_output(net, sk, skb) ? : ret; + default: + kfree_skb(skb); + return ret; + } +} + static int ip_mc_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { int ret; ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); - if (ret) { + switch (ret) { + case NET_XMIT_SUCCESS: + return dev_loopback_xmit(net, sk, skb); + case NET_XMIT_CN: + return dev_loopback_xmit(net, sk, skb) ? : ret; + default: kfree_skb(skb); return ret; } - - return dev_loopback_xmit(net, sk, skb); } int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index adef2236abe2..a75bc21d8c88 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -128,16 +128,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * return -EINVAL; } -static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) +static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - int ret; - - ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); - if (ret) { - kfree_skb(skb); - return ret; - } - #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { @@ -154,6 +146,22 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s return ip6_finish_output2(net, sk, skb); } +static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + int ret; + + ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); + switch (ret) { + case NET_XMIT_SUCCESS: + return __ip6_finish_output(net, sk, skb); + case NET_XMIT_CN: + return __ip6_finish_output(net, sk, skb) ? : ret; + default: + kfree_skb(skb); + return ret; + } +} + int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; From ffd81558d56c611b1e93f856c77f42046a2deab5 Mon Sep 17 00:00:00 2001 From: brakmo Date: Tue, 28 May 2019 16:59:39 -0700 Subject: [PATCH 75/83] bpf: Add cn support to hbm_out_kern.c Update hbm_out_kern.c to support returning cn notifications. Also updates relevant files to allow disabling cn notifications. Signed-off-by: Lawrence Brakmo Signed-off-by: Alexei Starovoitov --- samples/bpf/do_hbm_test.sh | 10 +++++++--- samples/bpf/hbm.c | 18 +++++++++++++++--- samples/bpf/hbm.h | 3 ++- samples/bpf/hbm_out_kern.c | 26 +++++++++++++++++++++----- 4 files changed, 45 insertions(+), 12 deletions(-) diff --git a/samples/bpf/do_hbm_test.sh b/samples/bpf/do_hbm_test.sh index 56c8b4115c95..e48b047d4646 100755 --- a/samples/bpf/do_hbm_test.sh +++ b/samples/bpf/do_hbm_test.sh @@ -13,10 +13,10 @@ Usage() { echo "egress or ingress bandwidht. It then uses iperf3 or netperf to create" echo "loads. The output is the goodput in Mbps (unless -D was used)." echo "" - echo "USAGE: $name [out] [-b=|--bpf=] [-c=|--cc=] [-D]" - echo " [-d=|--delay=] [--debug] [-E]" + echo "USAGE: $name [out] [-b=|--bpf=] [-c=|--cc=]" + echo " [-D] [-d=|--delay=] [--debug] [-E]" echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=|--id=]" - echo " [-l] [-N] [-p=|--port=] [-P]" + echo " [-l] [-N] [--no_cn] [-p=|--port=] [-P]" echo " [-q=] [-R] [-s=|--server=|--time=