2021-08-24 15:39:31 +08:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
2017-10-10 01:30:12 +08:00
|
|
|
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
|
|
|
* Copyright (c) 2016 Facebook
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
|
|
|
|
#include "disasm.h"
|
|
|
|
|
|
|
|
#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
|
|
|
|
static const char * const func_id_str[] = {
|
|
|
|
__BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
|
|
|
|
};
|
|
|
|
#undef __BPF_FUNC_STR_FN
|
|
|
|
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
|
|
|
|
const struct bpf_insn *insn,
|
|
|
|
char *buff, size_t len)
|
2017-10-10 01:30:12 +08:00
|
|
|
{
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
|
|
|
|
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-25 09:51:42 +08:00
|
|
|
if (!insn->src_reg &&
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
|
|
|
|
func_id_str[insn->imm])
|
|
|
|
return func_id_str[insn->imm];
|
|
|
|
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-25 09:51:42 +08:00
|
|
|
if (cbs && cbs->cb_call) {
|
|
|
|
const char *res;
|
|
|
|
|
|
|
|
res = cbs->cb_call(cbs->private_data, insn);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
}
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
|
|
snprintf(buff, len, "%+d", insn->imm);
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-25 09:51:42 +08:00
|
|
|
else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
|
|
|
|
snprintf(buff, len, "kernel-function");
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
|
|
|
|
return buff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
|
|
|
|
const struct bpf_insn *insn,
|
|
|
|
u64 full_imm, char *buff, size_t len)
|
|
|
|
{
|
|
|
|
if (cbs && cbs->cb_imm)
|
|
|
|
return cbs->cb_imm(cbs->private_data, insn, full_imm);
|
|
|
|
|
|
|
|
snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
|
|
|
|
return buff;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *func_id_name(int id)
|
|
|
|
{
|
2017-10-10 01:30:12 +08:00
|
|
|
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
|
|
|
|
return func_id_str[id];
|
|
|
|
else
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *const bpf_class_string[8] = {
|
|
|
|
[BPF_LD] = "ld",
|
|
|
|
[BPF_LDX] = "ldx",
|
|
|
|
[BPF_ST] = "st",
|
|
|
|
[BPF_STX] = "stx",
|
|
|
|
[BPF_ALU] = "alu",
|
|
|
|
[BPF_JMP] = "jmp",
|
2019-01-27 01:26:02 +08:00
|
|
|
[BPF_JMP32] = "jmp32",
|
2017-10-10 01:30:12 +08:00
|
|
|
[BPF_ALU64] = "alu64",
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *const bpf_alu_string[16] = {
|
|
|
|
[BPF_ADD >> 4] = "+=",
|
|
|
|
[BPF_SUB >> 4] = "-=",
|
|
|
|
[BPF_MUL >> 4] = "*=",
|
|
|
|
[BPF_DIV >> 4] = "/=",
|
|
|
|
[BPF_OR >> 4] = "|=",
|
|
|
|
[BPF_AND >> 4] = "&=",
|
|
|
|
[BPF_LSH >> 4] = "<<=",
|
|
|
|
[BPF_RSH >> 4] = ">>=",
|
|
|
|
[BPF_NEG >> 4] = "neg",
|
|
|
|
[BPF_MOD >> 4] = "%=",
|
|
|
|
[BPF_XOR >> 4] = "^=",
|
|
|
|
[BPF_MOV >> 4] = "=",
|
|
|
|
[BPF_ARSH >> 4] = "s>>=",
|
|
|
|
[BPF_END >> 4] = "endian",
|
|
|
|
};
|
|
|
|
|
2023-08-03 10:31:28 +08:00
|
|
|
static const char *const bpf_alu_sign_string[16] = {
|
2023-06-29 06:29:51 +08:00
|
|
|
[BPF_DIV >> 4] = "s/=",
|
|
|
|
[BPF_MOD >> 4] = "s%=",
|
|
|
|
};
|
|
|
|
|
2023-08-03 10:31:28 +08:00
|
|
|
static const char *const bpf_movsx_string[4] = {
|
2023-06-29 06:29:51 +08:00
|
|
|
[0] = "(s8)",
|
|
|
|
[1] = "(s16)",
|
|
|
|
[3] = "(s32)",
|
|
|
|
};
|
|
|
|
|
2021-01-15 02:17:49 +08:00
|
|
|
static const char *const bpf_atomic_alu_string[16] = {
|
|
|
|
[BPF_ADD >> 4] = "add",
|
|
|
|
[BPF_AND >> 4] = "and",
|
|
|
|
[BPF_OR >> 4] = "or",
|
2021-03-25 21:41:41 +08:00
|
|
|
[BPF_XOR >> 4] = "xor",
|
2021-01-15 02:17:49 +08:00
|
|
|
};
|
|
|
|
|
2017-10-10 01:30:12 +08:00
|
|
|
static const char *const bpf_ldst_string[] = {
|
|
|
|
[BPF_W >> 3] = "u32",
|
|
|
|
[BPF_H >> 3] = "u16",
|
|
|
|
[BPF_B >> 3] = "u8",
|
|
|
|
[BPF_DW >> 3] = "u64",
|
|
|
|
};
|
|
|
|
|
2023-06-29 06:29:51 +08:00
|
|
|
static const char *const bpf_ldsx_string[] = {
|
|
|
|
[BPF_W >> 3] = "s32",
|
|
|
|
[BPF_H >> 3] = "s16",
|
|
|
|
[BPF_B >> 3] = "s8",
|
|
|
|
};
|
|
|
|
|
2017-10-10 01:30:12 +08:00
|
|
|
static const char *const bpf_jmp_string[16] = {
|
|
|
|
[BPF_JA >> 4] = "jmp",
|
|
|
|
[BPF_JEQ >> 4] = "==",
|
|
|
|
[BPF_JGT >> 4] = ">",
|
|
|
|
[BPF_JLT >> 4] = "<",
|
|
|
|
[BPF_JGE >> 4] = ">=",
|
|
|
|
[BPF_JLE >> 4] = "<=",
|
|
|
|
[BPF_JSET >> 4] = "&",
|
|
|
|
[BPF_JNE >> 4] = "!=",
|
|
|
|
[BPF_JSGT >> 4] = "s>",
|
|
|
|
[BPF_JSLT >> 4] = "s<",
|
|
|
|
[BPF_JSGE >> 4] = "s>=",
|
|
|
|
[BPF_JSLE >> 4] = "s<=",
|
|
|
|
[BPF_CALL >> 4] = "call",
|
|
|
|
[BPF_EXIT >> 4] = "exit",
|
|
|
|
};
|
|
|
|
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
static void print_bpf_end_insn(bpf_insn_print_t verbose,
|
2018-03-23 18:41:28 +08:00
|
|
|
void *private_data,
|
2017-10-10 01:30:12 +08:00
|
|
|
const struct bpf_insn *insn)
|
|
|
|
{
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(private_data, "(%02x) r%d = %s%d r%d\n",
|
|
|
|
insn->code, insn->dst_reg,
|
2017-10-10 01:30:12 +08:00
|
|
|
BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
|
|
|
|
insn->imm, insn->dst_reg);
|
|
|
|
}
|
|
|
|
|
2023-06-29 06:29:51 +08:00
|
|
|
static void print_bpf_bswap_insn(bpf_insn_print_t verbose,
|
|
|
|
void *private_data,
|
|
|
|
const struct bpf_insn *insn)
|
|
|
|
{
|
|
|
|
verbose(private_data, "(%02x) r%d = bswap%d r%d\n",
|
|
|
|
insn->code, insn->dst_reg,
|
|
|
|
insn->imm, insn->dst_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_sdiv_smod(const struct bpf_insn *insn)
|
|
|
|
{
|
|
|
|
return (BPF_OP(insn->code) == BPF_DIV || BPF_OP(insn->code) == BPF_MOD) &&
|
|
|
|
insn->off == 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_movsx(const struct bpf_insn *insn)
|
|
|
|
{
|
2023-08-01 04:45:34 +08:00
|
|
|
return BPF_OP(insn->code) == BPF_MOV &&
|
|
|
|
(insn->off == 8 || insn->off == 16 || insn->off == 32);
|
2023-06-29 06:29:51 +08:00
|
|
|
}
|
|
|
|
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
|
|
|
const struct bpf_insn *insn,
|
|
|
|
bool allow_ptr_leaks)
|
2017-10-10 01:30:12 +08:00
|
|
|
{
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
const bpf_insn_print_t verbose = cbs->cb_print;
|
2017-10-10 01:30:12 +08:00
|
|
|
u8 class = BPF_CLASS(insn->code);
|
|
|
|
|
|
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
|
|
if (BPF_OP(insn->code) == BPF_END) {
|
|
|
|
if (class == BPF_ALU64)
|
2023-06-29 06:29:51 +08:00
|
|
|
print_bpf_bswap_insn(verbose, cbs->private_data, insn);
|
2017-10-10 01:30:12 +08:00
|
|
|
else
|
2018-03-23 18:41:28 +08:00
|
|
|
print_bpf_end_insn(verbose, cbs->private_data, insn);
|
2017-10-10 01:30:12 +08:00
|
|
|
} else if (BPF_OP(insn->code) == BPF_NEG) {
|
2019-01-27 01:26:02 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n",
|
|
|
|
insn->code, class == BPF_ALU ? 'w' : 'r',
|
|
|
|
insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->dst_reg);
|
|
|
|
} else if (BPF_SRC(insn->code) == BPF_X) {
|
2023-06-29 06:29:51 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n",
|
2019-01-27 01:26:02 +08:00
|
|
|
insn->code, class == BPF_ALU ? 'w' : 'r',
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->dst_reg,
|
2023-06-29 06:29:51 +08:00
|
|
|
is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
|
|
|
|
: bpf_alu_string[BPF_OP(insn->code) >> 4],
|
|
|
|
is_movsx(insn) ? bpf_movsx_string[(insn->off >> 3) - 1] : "",
|
2019-01-27 01:26:02 +08:00
|
|
|
class == BPF_ALU ? 'w' : 'r',
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->src_reg);
|
|
|
|
} else {
|
2019-01-27 01:26:02 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) %c%d %s %d\n",
|
|
|
|
insn->code, class == BPF_ALU ? 'w' : 'r',
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->dst_reg,
|
2023-06-29 06:29:51 +08:00
|
|
|
is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
|
|
|
|
: bpf_alu_string[BPF_OP(insn->code) >> 4],
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->imm);
|
|
|
|
}
|
|
|
|
} else if (class == BPF_STX) {
|
|
|
|
if (BPF_MODE(insn->code) == BPF_MEM)
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code,
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->dst_reg,
|
|
|
|
insn->off, insn->src_reg);
|
2021-01-15 02:17:44 +08:00
|
|
|
else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
2021-01-27 10:25:07 +08:00
|
|
|
(insn->imm == BPF_ADD || insn->imm == BPF_AND ||
|
2021-01-15 02:17:49 +08:00
|
|
|
insn->imm == BPF_OR || insn->imm == BPF_XOR)) {
|
|
|
|
verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code,
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->dst_reg, insn->off,
|
2021-01-15 02:17:49 +08:00
|
|
|
bpf_alu_string[BPF_OP(insn->imm) >> 4],
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->src_reg);
|
2021-01-15 02:17:46 +08:00
|
|
|
} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
2021-01-15 02:17:49 +08:00
|
|
|
(insn->imm == (BPF_ADD | BPF_FETCH) ||
|
|
|
|
insn->imm == (BPF_AND | BPF_FETCH) ||
|
|
|
|
insn->imm == (BPF_OR | BPF_FETCH) ||
|
|
|
|
insn->imm == (BPF_XOR | BPF_FETCH))) {
|
|
|
|
verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n",
|
2021-01-15 02:17:46 +08:00
|
|
|
insn->code, insn->src_reg,
|
|
|
|
BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
|
2021-01-15 02:17:49 +08:00
|
|
|
bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4],
|
2021-01-15 02:17:46 +08:00
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->dst_reg, insn->off, insn->src_reg);
|
2021-01-15 02:17:47 +08:00
|
|
|
} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
|
|
|
insn->imm == BPF_CMPXCHG) {
|
|
|
|
verbose(cbs->private_data, "(%02x) r0 = atomic%s_cmpxchg((%s *)(r%d %+d), r0, r%d)\n",
|
|
|
|
insn->code,
|
|
|
|
BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->dst_reg, insn->off,
|
|
|
|
insn->src_reg);
|
|
|
|
} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
|
|
|
insn->imm == BPF_XCHG) {
|
|
|
|
verbose(cbs->private_data, "(%02x) r%d = atomic%s_xchg((%s *)(r%d %+d), r%d)\n",
|
|
|
|
insn->code, insn->src_reg,
|
|
|
|
BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->dst_reg, insn->off, insn->src_reg);
|
2021-01-15 02:17:44 +08:00
|
|
|
} else {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
|
2021-01-15 02:17:44 +08:00
|
|
|
}
|
2017-10-10 01:30:12 +08:00
|
|
|
} else if (class == BPF_ST) {
|
2021-07-13 16:18:31 +08:00
|
|
|
if (BPF_MODE(insn->code) == BPF_MEM) {
|
|
|
|
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
|
|
|
insn->code,
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->dst_reg,
|
|
|
|
insn->off, insn->imm);
|
|
|
|
} else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
|
|
|
|
verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
|
|
|
|
} else {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
|
2017-10-10 01:30:12 +08:00
|
|
|
}
|
|
|
|
} else if (class == BPF_LDX) {
|
2023-06-29 06:29:51 +08:00
|
|
|
if (BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
|
2017-10-10 01:30:12 +08:00
|
|
|
return;
|
|
|
|
}
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code, insn->dst_reg,
|
2023-06-29 06:29:51 +08:00
|
|
|
BPF_MODE(insn->code) == BPF_MEM ?
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3] :
|
|
|
|
bpf_ldsx_string[BPF_SIZE(insn->code) >> 3],
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->src_reg, insn->off);
|
|
|
|
} else if (class == BPF_LD) {
|
|
|
|
if (BPF_MODE(insn->code) == BPF_ABS) {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code,
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->imm);
|
|
|
|
} else if (BPF_MODE(insn->code) == BPF_IND) {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code,
|
|
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
|
|
insn->src_reg, insn->imm);
|
|
|
|
} else if (BPF_MODE(insn->code) == BPF_IMM &&
|
|
|
|
BPF_SIZE(insn->code) == BPF_DW) {
|
|
|
|
/* At this point, we already made sure that the second
|
|
|
|
* part of the ldimm64 insn is accessible.
|
|
|
|
*/
|
|
|
|
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
|
bpf: implement lookup-free direct value access for maps
This generic extension to BPF maps allows for directly loading
an address residing inside a BPF map value as a single BPF
ldimm64 instruction!
The idea is similar to what BPF_PSEUDO_MAP_FD does today, which
is a special src_reg flag for ldimm64 instruction that indicates
that inside the first part of the double insns's imm field is a
file descriptor which the verifier then replaces as a full 64bit
address of the map into both imm parts. For the newly added
BPF_PSEUDO_MAP_VALUE src_reg flag, the idea is the following:
the first part of the double insns's imm field is again a file
descriptor corresponding to the map, and the second part of the
imm field is an offset into the value. The verifier will then
replace both imm parts with an address that points into the BPF
map value at the given value offset for maps that support this
operation. Currently supported is array map with single entry.
It is possible to support more than just single map element by
reusing both 16bit off fields of the insns as a map index, so
full array map lookup could be expressed that way. It hasn't
been implemented here due to lack of concrete use case, but
could easily be done so in future in a compatible way, since
both off fields right now have to be 0 and would correctly
denote a map index 0.
The BPF_PSEUDO_MAP_VALUE is a distinct flag as otherwise with
BPF_PSEUDO_MAP_FD we could not differ offset 0 between load of
map pointer versus load of map's value at offset 0, and changing
BPF_PSEUDO_MAP_FD's encoding into off by one to differ between
regular map pointer and map value pointer would add unnecessary
complexity and increases barrier for debugability thus less
suitable. Using the second part of the imm field as an offset
into the value does /not/ come with limitations since maximum
possible value size is in u32 universe anyway.
This optimization allows for efficiently retrieving an address
to a map value memory area without having to issue a helper call
which needs to prepare registers according to calling convention,
etc, without needing the extra NULL test, and without having to
add the offset in an additional instruction to the value base
pointer. The verifier then treats the destination register as
PTR_TO_MAP_VALUE with constant reg->off from the user passed
offset from the second imm field, and guarantees that this is
within bounds of the map value. Any subsequent operations are
normally treated as typical map value handling without anything
extra needed from verification side.
The two map operations for direct value access have been added to
array map for now. In future other types could be supported as
well depending on the use case. The main use case for this commit
is to allow for BPF loader support for global variables that
reside in .data/.rodata/.bss sections such that we can directly
load the address of them with minimal additional infrastructure
required. Loader support has been added in subsequent commits for
libbpf library.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:03 +08:00
|
|
|
bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD ||
|
|
|
|
insn->src_reg == BPF_PSEUDO_MAP_VALUE;
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
char tmp[64];
|
2017-10-10 01:30:12 +08:00
|
|
|
|
bpf: implement lookup-free direct value access for maps
This generic extension to BPF maps allows for directly loading
an address residing inside a BPF map value as a single BPF
ldimm64 instruction!
The idea is similar to what BPF_PSEUDO_MAP_FD does today, which
is a special src_reg flag for ldimm64 instruction that indicates
that inside the first part of the double insns's imm field is a
file descriptor which the verifier then replaces as a full 64bit
address of the map into both imm parts. For the newly added
BPF_PSEUDO_MAP_VALUE src_reg flag, the idea is the following:
the first part of the double insns's imm field is again a file
descriptor corresponding to the map, and the second part of the
imm field is an offset into the value. The verifier will then
replace both imm parts with an address that points into the BPF
map value at the given value offset for maps that support this
operation. Currently supported is array map with single entry.
It is possible to support more than just single map element by
reusing both 16bit off fields of the insns as a map index, so
full array map lookup could be expressed that way. It hasn't
been implemented here due to lack of concrete use case, but
could easily be done so in future in a compatible way, since
both off fields right now have to be 0 and would correctly
denote a map index 0.
The BPF_PSEUDO_MAP_VALUE is a distinct flag as otherwise with
BPF_PSEUDO_MAP_FD we could not differ offset 0 between load of
map pointer versus load of map's value at offset 0, and changing
BPF_PSEUDO_MAP_FD's encoding into off by one to differ between
regular map pointer and map value pointer would add unnecessary
complexity and increases barrier for debugability thus less
suitable. Using the second part of the imm field as an offset
into the value does /not/ come with limitations since maximum
possible value size is in u32 universe anyway.
This optimization allows for efficiently retrieving an address
to a map value memory area without having to issue a helper call
which needs to prepare registers according to calling convention,
etc, without needing the extra NULL test, and without having to
add the offset in an additional instruction to the value base
pointer. The verifier then treats the destination register as
PTR_TO_MAP_VALUE with constant reg->off from the user passed
offset from the second imm field, and guarantees that this is
within bounds of the map value. Any subsequent operations are
normally treated as typical map value handling without anything
extra needed from verification side.
The two map operations for direct value access have been added to
array map for now. In future other types could be supported as
well depending on the use case. The main use case for this commit
is to allow for BPF loader support for global variables that
reside in .data/.rodata/.bss sections such that we can directly
load the address of them with minimal additional infrastructure
required. Loader support has been added in subsequent commits for
libbpf library.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:03 +08:00
|
|
|
if (is_ptr && !allow_ptr_leaks)
|
2017-10-10 01:30:12 +08:00
|
|
|
imm = 0;
|
|
|
|
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) r%d = %s\n",
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
insn->code, insn->dst_reg,
|
|
|
|
__func_imm_name(cbs, insn, imm,
|
|
|
|
tmp, sizeof(tmp)));
|
2017-10-10 01:30:12 +08:00
|
|
|
} else {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
|
2017-10-10 01:30:12 +08:00
|
|
|
return;
|
|
|
|
}
|
2019-01-27 01:26:02 +08:00
|
|
|
} else if (class == BPF_JMP32 || class == BPF_JMP) {
|
2017-10-10 01:30:12 +08:00
|
|
|
u8 opcode = BPF_OP(insn->code);
|
|
|
|
|
|
|
|
if (opcode == BPF_CALL) {
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
char tmp[64];
|
|
|
|
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL) {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) call pc%s\n",
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
insn->code,
|
|
|
|
__func_get_name(cbs, insn,
|
|
|
|
tmp, sizeof(tmp)));
|
|
|
|
} else {
|
|
|
|
strcpy(tmp, "unknown");
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code,
|
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't
correlate used helpers as well as maps. The prog info lists
involved map ids, however there's no correlation of where in the
program they are used as of today. Likewise, bpftool does not
correlate helper calls with the target functions.
The latter can be done w/o any kernel changes through kallsyms,
and also has the advantage that this works with inlined helpers
and BPF calls.
Example, via interpreter:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1
* Output before patch (calls/maps remain unclear):
# bpftool prog dump xlated id 1 <-- dump prog id:1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = 0xffff95c47a8d4800
6: (85) call unknown#73040
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call unknown#73040
12: (15) if r0 == 0x0 goto pc+23
[...]
* Output after patch:
# bpftool prog dump xlated id 1
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call bpf_map_lookup_elem#73424 <-- helper call
7: (15) if r0 == 0x0 goto pc+18
8: (bf) r2 = r10
9: (07) r2 += -4
10: (bf) r1 = r0
11: (85) call bpf_map_lookup_elem#73424
12: (15) if r0 == 0x0 goto pc+23
[...]
# bpftool map show id 2 <-- show/dump/etc map id:2
2: hash_of_maps flags 0x0
key 4B value 4B max_entries 3 memlock 4096B
Example, JITed, same prog:
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf chain 0
filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \
direct-action not_in_hw id 3 tag c74773051b364165 jited
# bpftool prog show id 3
3: sched_cls tag c74773051b364165
loaded_at Dec 19/13:48 uid 0
xlated 384B jited 257B memlock 4096B map_ids 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2] <-- map id:2
6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite
7: (15) if r0 == 0x0 goto pc+2 |
8: (07) r0 += 56 |
9: (79) r0 = *(u64 *)(r0 +0) <-+
10: (15) if r0 == 0x0 goto pc+24
11: (bf) r2 = r10
12: (07) r2 += -4
[...]
Example, same prog, but kallsyms disabled (in that case we are
also not allowed to pass any relative offsets, etc, so prog
becomes pointer sanitized on dump):
# sysctl kernel.kptr_restrict=2
kernel.kptr_restrict = 2
# bpftool prog dump xlated id 3
0: (b7) r1 = 2
1: (63) *(u32 *)(r10 -4) = r1
2: (bf) r2 = r10
3: (07) r2 += -4
4: (18) r1 = map[id:2]
6: (85) call bpf_unspec#0
7: (15) if r0 == 0x0 goto pc+2
[...]
Example, BPF calls via interpreter:
# bpftool prog dump xlated id 1
0: (85) call pc+2#__bpf_prog_run_args32
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
Example, BPF calls via JIT:
# sysctl net.core.bpf_jit_enable=1
net.core.bpf_jit_enable = 1
# sysctl net.core.bpf_jit_kallsyms=1
net.core.bpf_jit_kallsyms = 1
# bpftool prog dump xlated id 1
0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F
1: (b7) r0 = 1
2: (95) exit
3: (b7) r0 = 2
4: (95) exit
And finally, an example for tail calls that is now working
as well wrt correlation:
# bpftool prog dump xlated id 2
[...]
10: (b7) r2 = 8
11: (85) call bpf_trace_printk#-41312
12: (bf) r1 = r6
13: (18) r2 = map[id:1]
15: (b7) r3 = 0
16: (85) call bpf_tail_call#12
17: (b7) r1 = 42
18: (6b) *(u16 *)(r6 +46) = r1
19: (b7) r0 = 0
20: (95) exit
# bpftool map show id 1
1: prog_array flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2017-12-20 20:42:57 +08:00
|
|
|
__func_get_name(cbs, insn,
|
|
|
|
tmp, sizeof(tmp)),
|
|
|
|
insn->imm);
|
|
|
|
}
|
2017-10-10 01:30:12 +08:00
|
|
|
} else if (insn->code == (BPF_JMP | BPF_JA)) {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) goto pc%+d\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code, insn->off);
|
2023-06-29 06:29:51 +08:00
|
|
|
} else if (insn->code == (BPF_JMP32 | BPF_JA)) {
|
|
|
|
verbose(cbs->private_data, "(%02x) gotol pc%+d\n",
|
|
|
|
insn->code, insn->imm);
|
2017-10-10 01:30:12 +08:00
|
|
|
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) exit\n", insn->code);
|
2017-10-10 01:30:12 +08:00
|
|
|
} else if (BPF_SRC(insn->code) == BPF_X) {
|
2019-01-27 01:26:02 +08:00
|
|
|
verbose(cbs->private_data,
|
|
|
|
"(%02x) if %c%d %s %c%d goto pc%+d\n",
|
|
|
|
insn->code, class == BPF_JMP32 ? 'w' : 'r',
|
|
|
|
insn->dst_reg,
|
2017-10-10 01:30:12 +08:00
|
|
|
bpf_jmp_string[BPF_OP(insn->code) >> 4],
|
2019-01-27 01:26:02 +08:00
|
|
|
class == BPF_JMP32 ? 'w' : 'r',
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->src_reg, insn->off);
|
|
|
|
} else {
|
2019-01-27 01:26:02 +08:00
|
|
|
verbose(cbs->private_data,
|
|
|
|
"(%02x) if %c%d %s 0x%x goto pc%+d\n",
|
|
|
|
insn->code, class == BPF_JMP32 ? 'w' : 'r',
|
|
|
|
insn->dst_reg,
|
2017-10-10 01:30:12 +08:00
|
|
|
bpf_jmp_string[BPF_OP(insn->code) >> 4],
|
|
|
|
insn->imm, insn->off);
|
|
|
|
}
|
|
|
|
} else {
|
2018-03-23 18:41:28 +08:00
|
|
|
verbose(cbs->private_data, "(%02x) %s\n",
|
2017-10-10 01:30:12 +08:00
|
|
|
insn->code, bpf_class_string[class]);
|
|
|
|
}
|
|
|
|
}
|