mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 00:04:00 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2018-02-22 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) two urgent fixes for bpf_tail_call logic for x64 and arm64 JITs, from Daniel. 2) cond_resched points in percpu array alloc/free paths, from Eric. 3) lockdep and other minor fixes, from Yonghong, Arnd, Anders, Li. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2217009443
@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
||||
off = offsetof(struct bpf_array, map.max_entries);
|
||||
emit_a64_mov_i64(tmp, off, ctx);
|
||||
emit(A64_LDR32(tmp, r2, tmp), ctx);
|
||||
emit(A64_MOV(0, r3, r3), ctx);
|
||||
emit(A64_CMP(0, r3, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
|
||||
|
||||
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
||||
*/
|
||||
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
|
||||
emit(A64_CMP(1, tcc, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
|
||||
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
|
||||
|
||||
/* prog = array->ptrs[index];
|
||||
|
@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
||||
* for the following assembly:
|
||||
*
|
||||
* With retpolines configured:
|
||||
*
|
||||
* callq do_rop
|
||||
* spec_trap:
|
||||
* pause
|
||||
* lfence
|
||||
* jmp spec_trap
|
||||
* do_rop:
|
||||
* mov %rax,(%rsp)
|
||||
* retq
|
||||
*
|
||||
* Without retpolines configured:
|
||||
*
|
||||
* jmp *%rax
|
||||
*/
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
||||
/* spec_trap: */ \
|
||||
EMIT2(0xF3, 0x90); /* pause */ \
|
||||
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
||||
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
||||
/* do_rop: */ \
|
||||
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
|
||||
EMIT1(0xC3); /* retq */
|
||||
#else
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT2(0xFF, 0xE0); /* jmp *%rax */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
/*
|
||||
@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
EMIT2(0x89, 0xD2); /* mov edx, edx */
|
||||
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
#define OFFSET1 43 /* number of bytes to jump */
|
||||
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
|
||||
EMIT2(X86_JBE, OFFSET1); /* jbe out */
|
||||
label1 = cnt;
|
||||
|
||||
@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
*/
|
||||
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
#define OFFSET2 32
|
||||
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JA, OFFSET2); /* ja out */
|
||||
label2 = cnt;
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
|
||||
#define OFFSET3 10
|
||||
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JE, OFFSET3); /* je out */
|
||||
label3 = cnt;
|
||||
|
||||
@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
* rdi == ctx (1st arg)
|
||||
* rax == prog->bpf_func + prologue_size
|
||||
*/
|
||||
EMIT2(0xFF, 0xE0); /* jmp rax */
|
||||
RETPOLINE_RAX_BPF_JIT();
|
||||
|
||||
/* out: */
|
||||
BUILD_BUG_ON(cnt - label1 != OFFSET1);
|
||||
|
@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
for (i = 0; i < array->map.max_entries; i++) {
|
||||
free_percpu(array->pptrs[i]);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
return -ENOMEM;
|
||||
}
|
||||
array->pptrs[i] = ptr;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -569,8 +569,7 @@ static void trie_free(struct bpf_map *map)
|
||||
slot = &trie->root;
|
||||
|
||||
for (;;) {
|
||||
node = rcu_dereference_protected(*slot,
|
||||
lockdep_is_held(&trie->lock));
|
||||
node = rcu_dereference_protected(*slot, 1);
|
||||
if (!node)
|
||||
goto out;
|
||||
|
||||
|
@ -3381,17 +3381,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
struct sock *sk = bpf_sock->sk;
|
||||
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
|
||||
|
||||
if (!sk_fullsock(sk))
|
||||
if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
if (val)
|
||||
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
|
||||
|
||||
return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
|
||||
|
1
tools/testing/selftests/bpf/.gitignore
vendored
1
tools/testing/selftests/bpf/.gitignore
vendored
@ -11,3 +11,4 @@ test_progs
|
||||
test_tcpbpf_user
|
||||
test_verifier_log
|
||||
feature
|
||||
test_libbpf_open
|
||||
|
@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data)
|
||||
fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
|
||||
2, map_flags);
|
||||
if (fd < 0) {
|
||||
if (errno == ENOMEM)
|
||||
return;
|
||||
printf("Failed to create hashmap key=%d value=%d '%s'\n",
|
||||
i, j, strerror(errno));
|
||||
exit(1);
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/tcp.h>
|
||||
|
@ -2586,6 +2586,32 @@ static struct bpf_test tests[] = {
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"runtime/jit: pass negative index to tail_call",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_3, -1),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_tail_call),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_prog = { 1 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"runtime/jit: pass > 32bit index to tail_call",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_tail_call),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_prog = { 2 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"stack pointer arithmetic",
|
||||
.insns = {
|
||||
|
Loading…
Reference in New Issue
Block a user