mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
bpf-next-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZGKqEAAKCRDbK58LschI g6LYAQDp1jAszCOkmJ8VUA0ZyC5NAFDv+7y9Nd1toYWYX1btzAEAkf8+5qBJ1qmI P5M0hjMTbH4MID9Aql10ZbMHheyOBAo= =NUQM -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next Daniel Borkmann says: ==================== pull-request: bpf-next 2023-05-16 We've added 57 non-merge commits during the last 19 day(s) which contain a total of 63 files changed, 3293 insertions(+), 690 deletions(-). The main changes are: 1) Add precision propagation to verifier for subprogs and callbacks, from Andrii Nakryiko. 2) Improve BPF's {g,s}setsockopt() handling with wrong option lengths, from Stanislav Fomichev. 3) Utilize pahole v1.25 for the kernel's BTF generation to filter out inconsistent function prototypes, from Alan Maguire. 4) Various dyn-pointer verifier improvements to relax restrictions, from Daniel Rosenberg. 5) Add a new bpf_task_under_cgroup() kfunc for designated task, from Feng Zhou. 6) Unblock tests for arm64 BPF CI after ftrace supporting direct call, from Florent Revest. 7) Add XDP hint kfunc metadata for RX hash/timestamp for igc, from Jesper Dangaard Brouer. 8) Add several new dyn-pointer kfuncs to ease their usability, from Joanne Koong. 9) Add in-depth LRU internals description and dot function graph, from Joe Stringer. 10) Fix KCSAN report on bpf_lru_list when accessing node->ref, from Martin KaFai Lau. 11) Only dump unprivileged_bpf_disabled log warning upon write, from Kui-Feng Lee. 12) Extend test_progs to directly passing allow/denylist file, from Stephen Veiss. 13) Fix BPF trampoline memleak upon failure attaching to fentry, from Yafang Shao. 14) Fix emitting struct bpf_tcp_sock type in vmlinux BTF, from Yonghong Song. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (57 commits) bpf: Fix memleak due to fentry attach failure bpf: Remove bpf trampoline selector bpf, arm64: Support struct arguments in the BPF trampoline bpftool: JIT limited misreported as negative value on aarch64 bpf: fix calculation of subseq_idx during precision backtracking bpf: Remove anonymous union in bpf_kfunc_call_arg_meta bpf: Document EFAULT changes for sockopt selftests/bpf: Correctly handle optlen > 4096 selftests/bpf: Update EFAULT {g,s}etsockopt selftests bpf: Don't EFAULT for {g,s}setsockopt with wrong optlen libbpf: fix offsetof() and container_of() to work with CO-RE bpf: Address KCSAN report on bpf_lru_list bpf: Add --skip_encoding_btf_inconsistent_proto, --btf_gen_optimized to pahole flags for v1.25 selftests/bpf: Accept mem from dynptr in helper funcs bpf: verifier: Accept dynptr mem as mem in helpers selftests/bpf: Check overflow in optional buffer selftests/bpf: Test allowing NULL buffer in dynptr slice bpf: Allow NULL buffers in bpf_dynptr_slice(_rw) selftests/bpf: Add testcase for bpf_task_under_cgroup bpf: Add bpf_task_under_cgroup() kfunc ... ==================== Link: https://lore.kernel.org/r/20230515225603.27027-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
a0e35a648f
@ -100,7 +100,7 @@ Hence, whenever a constant scalar argument is accepted by a kfunc which is not a
|
||||
size parameter, and the value of the constant matters for program safety, __k
|
||||
suffix should be used.
|
||||
|
||||
2.2.2 __uninit Annotation
|
||||
2.2.3 __uninit Annotation
|
||||
-------------------------
|
||||
|
||||
This annotation is used to indicate that the argument will be treated as
|
||||
@ -117,6 +117,27 @@ Here, the dynptr will be treated as an uninitialized dynptr. Without this
|
||||
annotation, the verifier will reject the program if the dynptr passed in is
|
||||
not initialized.
|
||||
|
||||
2.2.4 __opt Annotation
|
||||
-------------------------
|
||||
|
||||
This annotation is used to indicate that the buffer associated with an __sz or __szk
|
||||
argument may be null. If the function is passed a nullptr in place of the buffer,
|
||||
the verifier will not check that length is appropriate for the buffer. The kfunc is
|
||||
responsible for checking if this buffer is null before using it.
|
||||
|
||||
An example is given below::
|
||||
|
||||
__bpf_kfunc void *bpf_dynptr_slice(..., void *buffer__opt, u32 buffer__szk)
|
||||
{
|
||||
...
|
||||
}
|
||||
|
||||
Here, the buffer may be null. If buffer is not null, it at least of size buffer_szk.
|
||||
Either way, the returned buffer is either NULL, or of size buffer_szk. Without this
|
||||
annotation, the verifier will reject the program if a null pointer is passed in with
|
||||
a nonzero size.
|
||||
|
||||
|
||||
.. _BPF_kfunc_nodef:
|
||||
|
||||
2.3 Using an existing kernel function
|
||||
|
@ -48,7 +48,7 @@ the code with ``llvm-objdump -dr test.o``::
|
||||
14: 0f 10 00 00 00 00 00 00 r0 += r1
|
||||
15: 95 00 00 00 00 00 00 00 exit
|
||||
|
||||
There are four relations in the above for four ``LD_imm64`` instructions.
|
||||
There are four relocations in the above for four ``LD_imm64`` instructions.
|
||||
The following ``llvm-readelf -r test.o`` shows the binary values of the four
|
||||
relocations::
|
||||
|
||||
@ -79,14 +79,16 @@ The following is the symbol table with ``llvm-readelf -s test.o``::
|
||||
The 6th entry is global variable ``g1`` with value 0.
|
||||
|
||||
Similarly, the second relocation is at ``.text`` offset ``0x18``, instruction 3,
|
||||
for global variable ``g2`` which has a symbol value 4, the offset
|
||||
from the start of ``.data`` section.
|
||||
has a type of ``R_BPF_64_64`` and refers to entry 7 in the symbol table.
|
||||
The second relocation resolves to global variable ``g2`` which has a symbol
|
||||
value 4. The symbol value represents the offset from the start of ``.data``
|
||||
section where the initial value of the global variable ``g2`` is stored.
|
||||
|
||||
The third and fourth relocations refers to static variables ``l1``
|
||||
and ``l2``. From ``.rel.text`` section above, it is not clear
|
||||
which symbols they really refers to as they both refers to
|
||||
The third and fourth relocations refer to static variables ``l1``
|
||||
and ``l2``. From the ``.rel.text`` section above, it is not clear
|
||||
to which symbols they really refer as they both refer to
|
||||
symbol table entry 4, symbol ``sec``, which has ``STT_SECTION`` type
|
||||
and represents a section. So for static variable or function,
|
||||
and represents a section. So for a static variable or function,
|
||||
the section offset is written to the original insn
|
||||
buffer, which is called ``A`` (addend). Looking at
|
||||
above insn ``7`` and ``11``, they have section offset ``8`` and ``12``.
|
||||
|
@ -1,5 +1,6 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0-only
|
||||
.. Copyright (C) 2022 Red Hat, Inc.
|
||||
.. Copyright (C) 2022-2023 Isovalent, Inc.
|
||||
|
||||
===============================================
|
||||
BPF_MAP_TYPE_HASH, with PERCPU and LRU Variants
|
||||
@ -29,7 +30,16 @@ will automatically evict the least recently used entries when the hash
|
||||
table reaches capacity. An LRU hash maintains an internal LRU list that
|
||||
is used to select elements for eviction. This internal LRU list is
|
||||
shared across CPUs but it is possible to request a per CPU LRU list with
|
||||
the ``BPF_F_NO_COMMON_LRU`` flag when calling ``bpf_map_create``.
|
||||
the ``BPF_F_NO_COMMON_LRU`` flag when calling ``bpf_map_create``. The
|
||||
following table outlines the properties of LRU maps depending on the a
|
||||
map type and the flags used to create the map.
|
||||
|
||||
======================== ========================= ================================
|
||||
Flag ``BPF_MAP_TYPE_LRU_HASH`` ``BPF_MAP_TYPE_LRU_PERCPU_HASH``
|
||||
======================== ========================= ================================
|
||||
**BPF_F_NO_COMMON_LRU** Per-CPU LRU, global map Per-CPU LRU, per-cpu map
|
||||
**!BPF_F_NO_COMMON_LRU** Global LRU, global map Global LRU, per-cpu map
|
||||
======================== ========================= ================================
|
||||
|
||||
Usage
|
||||
=====
|
||||
@ -206,3 +216,44 @@ Userspace walking the map elements from the map declared above:
|
||||
cur_key = &next_key;
|
||||
}
|
||||
}
|
||||
|
||||
Internals
|
||||
=========
|
||||
|
||||
This section of the document is targeted at Linux developers and describes
|
||||
aspects of the map implementations that are not considered stable ABI. The
|
||||
following details are subject to change in future versions of the kernel.
|
||||
|
||||
``BPF_MAP_TYPE_LRU_HASH`` and variants
|
||||
--------------------------------------
|
||||
|
||||
Updating elements in LRU maps may trigger eviction behaviour when the capacity
|
||||
of the map is reached. There are various steps that the update algorithm
|
||||
attempts in order to enforce the LRU property which have increasing impacts on
|
||||
other CPUs involved in the following operation attempts:
|
||||
|
||||
- Attempt to use CPU-local state to batch operations
|
||||
- Attempt to fetch free nodes from global lists
|
||||
- Attempt to pull any node from a global list and remove it from the hashmap
|
||||
- Attempt to pull any node from any CPU's list and remove it from the hashmap
|
||||
|
||||
This algorithm is described visually in the following diagram. See the
|
||||
description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of
|
||||
the corresponding operations:
|
||||
|
||||
.. kernel-figure:: map_lru_hash_update.dot
|
||||
:alt: Diagram outlining the LRU eviction steps taken during map update.
|
||||
|
||||
LRU hash eviction during map update for ``BPF_MAP_TYPE_LRU_HASH`` and
|
||||
variants. See the dot file source for kernel function name code references.
|
||||
|
||||
Map updates start from the oval in the top right "begin ``bpf_map_update()``"
|
||||
and progress through the graph towards the bottom where the result may be
|
||||
either a successful update or a failure with various error codes. The key in
|
||||
the top right provides indicators for which locks may be involved in specific
|
||||
operations. This is intended as a visual hint for reasoning about how map
|
||||
contention may impact update operations, though the map type and flags may
|
||||
impact the actual contention on those locks, based on the logic described in
|
||||
the table above. For instance, if the map is created with type
|
||||
``BPF_MAP_TYPE_LRU_PERCPU_HASH`` and flags ``BPF_F_NO_COMMON_LRU`` then all map
|
||||
properties would be per-cpu.
|
||||
|
172
Documentation/bpf/map_lru_hash_update.dot
Normal file
172
Documentation/bpf/map_lru_hash_update.dot
Normal file
@ -0,0 +1,172 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright (C) 2022-2023 Isovalent, Inc.
|
||||
digraph {
|
||||
node [colorscheme=accent4,style=filled] # Apply colorscheme to all nodes
|
||||
graph [splines=ortho, nodesep=1]
|
||||
|
||||
subgraph cluster_key {
|
||||
label = "Key\n(locks held during operation)";
|
||||
rankdir = TB;
|
||||
|
||||
remote_lock [shape=rectangle,fillcolor=4,label="remote CPU LRU lock"]
|
||||
hash_lock [shape=rectangle,fillcolor=3,label="hashtab lock"]
|
||||
lru_lock [shape=rectangle,fillcolor=2,label="LRU lock"]
|
||||
local_lock [shape=rectangle,fillcolor=1,label="local CPU LRU lock"]
|
||||
no_lock [shape=rectangle,label="no locks held"]
|
||||
}
|
||||
|
||||
begin [shape=oval,label="begin\nbpf_map_update()"]
|
||||
|
||||
// Nodes below with an 'fn_' prefix are roughly labeled by the C function
|
||||
// names that initiate the corresponding logic in kernel/bpf/bpf_lru_list.c.
|
||||
// Number suffixes and errno suffixes handle subsections of the corresponding
|
||||
// logic in the function as of the writing of this dot.
|
||||
|
||||
// cf. __local_list_pop_free() / bpf_percpu_lru_pop_free()
|
||||
local_freelist_check [shape=diamond,fillcolor=1,
|
||||
label="Local freelist\nnode available?"];
|
||||
use_local_node [shape=rectangle,
|
||||
label="Use node owned\nby this CPU"]
|
||||
|
||||
// cf. bpf_lru_pop_free()
|
||||
common_lru_check [shape=diamond,
|
||||
label="Map created with\ncommon LRU?\n(!BPF_F_NO_COMMON_LRU)"];
|
||||
|
||||
fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2,
|
||||
label="Flush local pending,
|
||||
Rotate Global list, move
|
||||
LOCAL_FREE_TARGET
|
||||
from global -> local"]
|
||||
// Also corresponds to:
|
||||
// fn__local_list_flush()
|
||||
// fn_bpf_lru_list_rotate()
|
||||
fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2,
|
||||
label="Able to free\nLOCAL_FREE_TARGET\nnodes?"]
|
||||
|
||||
fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3,
|
||||
label="Shrink inactive list
|
||||
up to remaining
|
||||
LOCAL_FREE_TARGET
|
||||
(global LRU -> local)"]
|
||||
fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2,
|
||||
label="> 0 entries in\nlocal free list?"]
|
||||
fn___bpf_lru_list_shrink2 [shape=rectangle,fillcolor=2,
|
||||
label="Steal one node from
|
||||
inactive, or if empty,
|
||||
from active global list"]
|
||||
fn___bpf_lru_list_shrink3 [shape=rectangle,fillcolor=3,
|
||||
label="Try to remove\nnode from hashtab"]
|
||||
|
||||
local_freelist_check2 [shape=diamond,label="Htab removal\nsuccessful?"]
|
||||
common_lru_check2 [shape=diamond,
|
||||
label="Map created with\ncommon LRU?\n(!BPF_F_NO_COMMON_LRU)"];
|
||||
|
||||
subgraph cluster_remote_lock {
|
||||
label = "Iterate through CPUs\n(start from current)";
|
||||
style = dashed;
|
||||
rankdir=LR;
|
||||
|
||||
local_freelist_check5 [shape=diamond,fillcolor=4,
|
||||
label="Steal a node from\nper-cpu freelist?"]
|
||||
local_freelist_check6 [shape=rectangle,fillcolor=4,
|
||||
label="Steal a node from
|
||||
(1) Unreferenced pending, or
|
||||
(2) Any pending node"]
|
||||
local_freelist_check7 [shape=rectangle,fillcolor=3,
|
||||
label="Try to remove\nnode from hashtab"]
|
||||
fn_htab_lru_map_update_elem [shape=diamond,
|
||||
label="Stole node\nfrom remote\nCPU?"]
|
||||
fn_htab_lru_map_update_elem2 [shape=diamond,label="Iterated\nall CPUs?"]
|
||||
// Also corresponds to:
|
||||
// use_local_node()
|
||||
// fn__local_list_pop_pending()
|
||||
}
|
||||
|
||||
fn_bpf_lru_list_pop_free_to_local2 [shape=rectangle,
|
||||
label="Use node that was\nnot recently referenced"]
|
||||
local_freelist_check4 [shape=rectangle,
|
||||
label="Use node that was\nactively referenced\nin global list"]
|
||||
fn_htab_lru_map_update_elem_ENOMEM [shape=oval,label="return -ENOMEM"]
|
||||
fn_htab_lru_map_update_elem3 [shape=rectangle,
|
||||
label="Use node that was\nactively referenced\nin (another?) CPU's cache"]
|
||||
fn_htab_lru_map_update_elem4 [shape=rectangle,fillcolor=3,
|
||||
label="Update hashmap\nwith new element"]
|
||||
fn_htab_lru_map_update_elem5 [shape=oval,label="return 0"]
|
||||
fn_htab_lru_map_update_elem_EBUSY [shape=oval,label="return -EBUSY"]
|
||||
fn_htab_lru_map_update_elem_EEXIST [shape=oval,label="return -EEXIST"]
|
||||
fn_htab_lru_map_update_elem_ENOENT [shape=oval,label="return -ENOENT"]
|
||||
|
||||
begin -> local_freelist_check
|
||||
local_freelist_check -> use_local_node [xlabel="Y"]
|
||||
local_freelist_check -> common_lru_check [xlabel="N"]
|
||||
common_lru_check -> fn_bpf_lru_list_pop_free_to_local [xlabel="Y"]
|
||||
common_lru_check -> fn___bpf_lru_list_shrink_inactive [xlabel="N"]
|
||||
fn_bpf_lru_list_pop_free_to_local -> fn___bpf_lru_node_move_to_free
|
||||
fn___bpf_lru_node_move_to_free ->
|
||||
fn_bpf_lru_list_pop_free_to_local2 [xlabel="Y"]
|
||||
fn___bpf_lru_node_move_to_free ->
|
||||
fn___bpf_lru_list_shrink_inactive [xlabel="N"]
|
||||
fn___bpf_lru_list_shrink_inactive -> fn___bpf_lru_list_shrink
|
||||
fn___bpf_lru_list_shrink -> fn_bpf_lru_list_pop_free_to_local2 [xlabel = "Y"]
|
||||
fn___bpf_lru_list_shrink -> fn___bpf_lru_list_shrink2 [xlabel="N"]
|
||||
fn___bpf_lru_list_shrink2 -> fn___bpf_lru_list_shrink3
|
||||
fn___bpf_lru_list_shrink3 -> local_freelist_check2
|
||||
local_freelist_check2 -> local_freelist_check4 [xlabel = "Y"]
|
||||
local_freelist_check2 -> common_lru_check2 [xlabel = "N"]
|
||||
common_lru_check2 -> local_freelist_check5 [xlabel = "Y"]
|
||||
common_lru_check2 -> fn_htab_lru_map_update_elem_ENOMEM [xlabel = "N"]
|
||||
local_freelist_check5 -> fn_htab_lru_map_update_elem [xlabel = "Y"]
|
||||
local_freelist_check5 -> local_freelist_check6 [xlabel = "N"]
|
||||
local_freelist_check6 -> local_freelist_check7
|
||||
local_freelist_check7 -> fn_htab_lru_map_update_elem
|
||||
|
||||
fn_htab_lru_map_update_elem -> fn_htab_lru_map_update_elem3 [xlabel = "Y"]
|
||||
fn_htab_lru_map_update_elem -> fn_htab_lru_map_update_elem2 [xlabel = "N"]
|
||||
fn_htab_lru_map_update_elem2 ->
|
||||
fn_htab_lru_map_update_elem_ENOMEM [xlabel = "Y"]
|
||||
fn_htab_lru_map_update_elem2 -> local_freelist_check5 [xlabel = "N"]
|
||||
fn_htab_lru_map_update_elem3 -> fn_htab_lru_map_update_elem4
|
||||
|
||||
use_local_node -> fn_htab_lru_map_update_elem4
|
||||
fn_bpf_lru_list_pop_free_to_local2 -> fn_htab_lru_map_update_elem4
|
||||
local_freelist_check4 -> fn_htab_lru_map_update_elem4
|
||||
|
||||
fn_htab_lru_map_update_elem4 -> fn_htab_lru_map_update_elem5 [headlabel="Success"]
|
||||
fn_htab_lru_map_update_elem4 ->
|
||||
fn_htab_lru_map_update_elem_EBUSY [xlabel="Hashtab lock failed"]
|
||||
fn_htab_lru_map_update_elem4 ->
|
||||
fn_htab_lru_map_update_elem_EEXIST [xlabel="BPF_EXIST set and\nkey already exists"]
|
||||
fn_htab_lru_map_update_elem4 ->
|
||||
fn_htab_lru_map_update_elem_ENOENT [headlabel="BPF_NOEXIST set\nand no such entry"]
|
||||
|
||||
// Create invisible pad nodes to line up various nodes
|
||||
pad0 [style=invis]
|
||||
pad1 [style=invis]
|
||||
pad2 [style=invis]
|
||||
pad3 [style=invis]
|
||||
pad4 [style=invis]
|
||||
|
||||
// Line up the key with the top of the graph
|
||||
no_lock -> local_lock [style=invis]
|
||||
local_lock -> lru_lock [style=invis]
|
||||
lru_lock -> hash_lock [style=invis]
|
||||
hash_lock -> remote_lock [style=invis]
|
||||
remote_lock -> local_freelist_check5 [style=invis]
|
||||
remote_lock -> fn___bpf_lru_list_shrink [style=invis]
|
||||
|
||||
// Line up return code nodes at the bottom of the graph
|
||||
fn_htab_lru_map_update_elem -> pad0 [style=invis]
|
||||
pad0 -> pad1 [style=invis]
|
||||
pad1 -> pad2 [style=invis]
|
||||
//pad2-> fn_htab_lru_map_update_elem_ENOMEM [style=invis]
|
||||
fn_htab_lru_map_update_elem4 -> pad3 [style=invis]
|
||||
pad3 -> fn_htab_lru_map_update_elem5 [style=invis]
|
||||
pad3 -> fn_htab_lru_map_update_elem_EBUSY [style=invis]
|
||||
pad3 -> fn_htab_lru_map_update_elem_EEXIST [style=invis]
|
||||
pad3 -> fn_htab_lru_map_update_elem_ENOENT [style=invis]
|
||||
|
||||
// Reduce diagram width by forcing some nodes to appear above others
|
||||
local_freelist_check4 -> fn_htab_lru_map_update_elem3 [style=invis]
|
||||
common_lru_check2 -> pad4 [style=invis]
|
||||
pad4 -> local_freelist_check5 [style=invis]
|
||||
}
|
@ -98,10 +98,65 @@ can access only the first ``PAGE_SIZE`` of that data. So it has to options:
|
||||
indicates that the kernel should use BPF's trimmed ``optval``.
|
||||
|
||||
When the BPF program returns with the ``optlen`` greater than
|
||||
``PAGE_SIZE``, the userspace will receive ``EFAULT`` errno.
|
||||
``PAGE_SIZE``, the userspace will receive original kernel
|
||||
buffers without any modifications that the BPF program might have
|
||||
applied.
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Recommended way to handle BPF programs is as follows:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
SEC("cgroup/getsockopt")
|
||||
int getsockopt(struct bpf_sockopt *ctx)
|
||||
{
|
||||
/* Custom socket option. */
|
||||
if (ctx->level == MY_SOL && ctx->optname == MY_OPTNAME) {
|
||||
ctx->retval = 0;
|
||||
optval[0] = ...;
|
||||
ctx->optlen = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Modify kernel's socket option. */
|
||||
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
|
||||
ctx->retval = 0;
|
||||
optval[0] = ...;
|
||||
ctx->optlen = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > PAGE_SIZE)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
int setsockopt(struct bpf_sockopt *ctx)
|
||||
{
|
||||
/* Custom socket option. */
|
||||
if (ctx->level == MY_SOL && ctx->optname == MY_OPTNAME) {
|
||||
/* do something */
|
||||
ctx->optlen = -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Modify kernel's socket option. */
|
||||
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
|
||||
optval[0] = ...;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > PAGE_SIZE)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
See ``tools/testing/selftests/bpf/progs/sockopt_sk.c`` for an example
|
||||
of BPF program that handles socket options.
|
||||
|
@ -1731,21 +1731,21 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
|
||||
}
|
||||
}
|
||||
|
||||
static void save_args(struct jit_ctx *ctx, int args_off, int nargs)
|
||||
static void save_args(struct jit_ctx *ctx, int args_off, int nregs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nargs; i++) {
|
||||
for (i = 0; i < nregs; i++) {
|
||||
emit(A64_STR64I(i, A64_SP, args_off), ctx);
|
||||
args_off += 8;
|
||||
}
|
||||
}
|
||||
|
||||
static void restore_args(struct jit_ctx *ctx, int args_off, int nargs)
|
||||
static void restore_args(struct jit_ctx *ctx, int args_off, int nregs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nargs; i++) {
|
||||
for (i = 0; i < nregs; i++) {
|
||||
emit(A64_LDR64I(i, A64_SP, args_off), ctx);
|
||||
args_off += 8;
|
||||
}
|
||||
@ -1764,7 +1764,7 @@ static void restore_args(struct jit_ctx *ctx, int args_off, int nargs)
|
||||
*/
|
||||
static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
struct bpf_tramp_links *tlinks, void *orig_call,
|
||||
int nargs, u32 flags)
|
||||
int nregs, u32 flags)
|
||||
{
|
||||
int i;
|
||||
int stack_size;
|
||||
@ -1772,7 +1772,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
int regs_off;
|
||||
int retval_off;
|
||||
int args_off;
|
||||
int nargs_off;
|
||||
int nregs_off;
|
||||
int ip_off;
|
||||
int run_ctx_off;
|
||||
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
|
||||
@ -1795,11 +1795,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
* SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
|
||||
* BPF_TRAMP_F_RET_FENTRY_RET
|
||||
*
|
||||
* [ argN ]
|
||||
* [ arg reg N ]
|
||||
* [ ... ]
|
||||
* SP + args_off [ arg1 ]
|
||||
* SP + args_off [ arg reg 1 ]
|
||||
*
|
||||
* SP + nargs_off [ args count ]
|
||||
* SP + nregs_off [ arg regs count ]
|
||||
*
|
||||
* SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
|
||||
*
|
||||
@ -1816,13 +1816,13 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
if (flags & BPF_TRAMP_F_IP_ARG)
|
||||
stack_size += 8;
|
||||
|
||||
nargs_off = stack_size;
|
||||
nregs_off = stack_size;
|
||||
/* room for args count */
|
||||
stack_size += 8;
|
||||
|
||||
args_off = stack_size;
|
||||
/* room for args */
|
||||
stack_size += nargs * 8;
|
||||
stack_size += nregs * 8;
|
||||
|
||||
/* room for return value */
|
||||
retval_off = stack_size;
|
||||
@ -1865,12 +1865,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
emit(A64_STR64I(A64_R(10), A64_SP, ip_off), ctx);
|
||||
}
|
||||
|
||||
/* save args count*/
|
||||
emit(A64_MOVZ(1, A64_R(10), nargs, 0), ctx);
|
||||
emit(A64_STR64I(A64_R(10), A64_SP, nargs_off), ctx);
|
||||
/* save arg regs count*/
|
||||
emit(A64_MOVZ(1, A64_R(10), nregs, 0), ctx);
|
||||
emit(A64_STR64I(A64_R(10), A64_SP, nregs_off), ctx);
|
||||
|
||||
/* save args */
|
||||
save_args(ctx, args_off, nargs);
|
||||
/* save arg regs */
|
||||
save_args(ctx, args_off, nregs);
|
||||
|
||||
/* save callee saved registers */
|
||||
emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx);
|
||||
@ -1897,7 +1897,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
restore_args(ctx, args_off, nargs);
|
||||
restore_args(ctx, args_off, nregs);
|
||||
/* call original func */
|
||||
emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
|
||||
emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
|
||||
@ -1926,7 +1926,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_RESTORE_REGS)
|
||||
restore_args(ctx, args_off, nargs);
|
||||
restore_args(ctx, args_off, nregs);
|
||||
|
||||
/* restore callee saved register x19 and x20 */
|
||||
emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx);
|
||||
@ -1967,24 +1967,25 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *orig_call)
|
||||
{
|
||||
int i, ret;
|
||||
int nargs = m->nr_args;
|
||||
int nregs = m->nr_args;
|
||||
int max_insns = ((long)image_end - (long)image) / AARCH64_INSN_SIZE;
|
||||
struct jit_ctx ctx = {
|
||||
.image = NULL,
|
||||
.idx = 0,
|
||||
};
|
||||
|
||||
/* the first 8 arguments are passed by registers */
|
||||
if (nargs > 8)
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* don't support struct argument */
|
||||
/* extra registers needed for struct argument */
|
||||
for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
|
||||
/* The arg_size is at most 16 bytes, enforced by the verifier. */
|
||||
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
|
||||
return -ENOTSUPP;
|
||||
nregs += (m->arg_size[i] + 7) / 8 - 1;
|
||||
}
|
||||
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nargs, flags);
|
||||
/* the first 8 registers are used for arguments */
|
||||
if (nregs > 8)
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nregs, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1995,7 +1996,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
ctx.idx = 0;
|
||||
|
||||
jit_fill_hole(image, (unsigned int)(image_end - image));
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nargs, flags);
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nregs, flags);
|
||||
|
||||
if (ret > 0 && validate_code(&ctx) < 0)
|
||||
ret = -EINVAL;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <linux/timecounter.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include "igc_hw.h"
|
||||
|
||||
@ -311,6 +312,33 @@ extern char igc_driver_name[];
|
||||
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
|
||||
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
|
||||
|
||||
/* RX-desc Write-Back format RSS Type's */
|
||||
enum igc_rss_type_num {
|
||||
IGC_RSS_TYPE_NO_HASH = 0,
|
||||
IGC_RSS_TYPE_HASH_TCP_IPV4 = 1,
|
||||
IGC_RSS_TYPE_HASH_IPV4 = 2,
|
||||
IGC_RSS_TYPE_HASH_TCP_IPV6 = 3,
|
||||
IGC_RSS_TYPE_HASH_IPV6_EX = 4,
|
||||
IGC_RSS_TYPE_HASH_IPV6 = 5,
|
||||
IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6,
|
||||
IGC_RSS_TYPE_HASH_UDP_IPV4 = 7,
|
||||
IGC_RSS_TYPE_HASH_UDP_IPV6 = 8,
|
||||
IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9,
|
||||
IGC_RSS_TYPE_MAX = 10,
|
||||
};
|
||||
#define IGC_RSS_TYPE_MAX_TABLE 16
|
||||
#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */
|
||||
|
||||
/* igc_rss_type - Rx descriptor RSS type field */
|
||||
static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
|
||||
{
|
||||
/* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved)
|
||||
* Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info)
|
||||
* is slightly slower than via u32 (wb.lower.lo_dword.data)
|
||||
*/
|
||||
return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK);
|
||||
}
|
||||
|
||||
/* Interrupt defines */
|
||||
#define IGC_START_ITR 648 /* ~6000 ints/sec */
|
||||
#define IGC_4K_ITR 980
|
||||
@ -471,6 +499,13 @@ struct igc_rx_buffer {
|
||||
};
|
||||
};
|
||||
|
||||
/* context wrapper around xdp_buff to provide access to descriptor metadata */
|
||||
struct igc_xdp_buff {
|
||||
struct xdp_buff xdp;
|
||||
union igc_adv_rx_desc *rx_desc;
|
||||
ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
|
||||
};
|
||||
|
||||
struct igc_q_vector {
|
||||
struct igc_adapter *adapter; /* backlink */
|
||||
void __iomem *itr_register;
|
||||
|
@ -1690,14 +1690,36 @@ static void igc_rx_checksum(struct igc_ring *ring,
|
||||
le32_to_cpu(rx_desc->wb.upper.status_error));
|
||||
}
|
||||
|
||||
/* Mapping HW RSS Type to enum pkt_hash_types */
|
||||
static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
|
||||
[IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
|
||||
[IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
|
||||
[IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
|
||||
[IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
|
||||
[IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
|
||||
[IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
|
||||
[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
|
||||
[IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
|
||||
[IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
|
||||
[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
|
||||
[10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
|
||||
[11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
|
||||
[12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
|
||||
[13] = PKT_HASH_TYPE_NONE,
|
||||
[14] = PKT_HASH_TYPE_NONE,
|
||||
[15] = PKT_HASH_TYPE_NONE,
|
||||
};
|
||||
|
||||
static inline void igc_rx_hash(struct igc_ring *ring,
|
||||
union igc_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (ring->netdev->features & NETIF_F_RXHASH)
|
||||
skb_set_hash(skb,
|
||||
le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
|
||||
PKT_HASH_TYPE_L3);
|
||||
if (ring->netdev->features & NETIF_F_RXHASH) {
|
||||
u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
|
||||
u32 rss_type = igc_rss_type(rx_desc);
|
||||
|
||||
skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
|
||||
}
|
||||
}
|
||||
|
||||
static void igc_rx_vlan(struct igc_ring *rx_ring,
|
||||
@ -2214,6 +2236,8 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
|
||||
if (!count)
|
||||
return ok;
|
||||
|
||||
XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
|
||||
|
||||
desc = IGC_RX_DESC(ring, i);
|
||||
bi = &ring->rx_buffer_info[i];
|
||||
i -= ring->count;
|
||||
@ -2498,8 +2522,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
||||
union igc_adv_rx_desc *rx_desc;
|
||||
struct igc_rx_buffer *rx_buffer;
|
||||
unsigned int size, truesize;
|
||||
struct igc_xdp_buff ctx;
|
||||
ktime_t timestamp = 0;
|
||||
struct xdp_buff xdp;
|
||||
int pkt_offset = 0;
|
||||
void *pktbuf;
|
||||
|
||||
@ -2528,18 +2552,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
||||
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
|
||||
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
|
||||
pktbuf);
|
||||
ctx.rx_ts = timestamp;
|
||||
pkt_offset = IGC_TS_HDR_LEN;
|
||||
size -= IGC_TS_HDR_LEN;
|
||||
}
|
||||
|
||||
if (!skb) {
|
||||
xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
|
||||
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
|
||||
xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
|
||||
xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
|
||||
igc_rx_offset(rx_ring) + pkt_offset,
|
||||
size, true);
|
||||
xdp_buff_clear_frags_flag(&xdp);
|
||||
xdp_buff_clear_frags_flag(&ctx.xdp);
|
||||
ctx.rx_desc = rx_desc;
|
||||
|
||||
skb = igc_xdp_run_prog(adapter, &xdp);
|
||||
skb = igc_xdp_run_prog(adapter, &ctx.xdp);
|
||||
}
|
||||
|
||||
if (IS_ERR(skb)) {
|
||||
@ -2561,9 +2587,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
||||
} else if (skb)
|
||||
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
|
||||
else if (ring_uses_build_skb(rx_ring))
|
||||
skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
|
||||
skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
|
||||
else
|
||||
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
|
||||
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
|
||||
timestamp);
|
||||
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
@ -2664,6 +2690,15 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
|
||||
napi_gro_receive(&q_vector->napi, skb);
|
||||
}
|
||||
|
||||
static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
|
||||
{
|
||||
/* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
|
||||
* igc_xdp_buff shares its layout with xdp_buff_xsk and private
|
||||
* igc_xdp_buff fields fall into xdp_buff_xsk->cb
|
||||
*/
|
||||
return (struct igc_xdp_buff *)xdp;
|
||||
}
|
||||
|
||||
static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
|
||||
{
|
||||
struct igc_adapter *adapter = q_vector->adapter;
|
||||
@ -2682,6 +2717,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
|
||||
while (likely(total_packets < budget)) {
|
||||
union igc_adv_rx_desc *desc;
|
||||
struct igc_rx_buffer *bi;
|
||||
struct igc_xdp_buff *ctx;
|
||||
ktime_t timestamp = 0;
|
||||
unsigned int size;
|
||||
int res;
|
||||
@ -2699,9 +2735,13 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
|
||||
|
||||
bi = &ring->rx_buffer_info[ntc];
|
||||
|
||||
ctx = xsk_buff_to_igc_ctx(bi->xdp);
|
||||
ctx->rx_desc = desc;
|
||||
|
||||
if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
|
||||
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
|
||||
bi->xdp->data);
|
||||
ctx->rx_ts = timestamp;
|
||||
|
||||
bi->xdp->data += IGC_TS_HDR_LEN;
|
||||
|
||||
@ -6454,6 +6494,58 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
|
||||
return value;
|
||||
}
|
||||
|
||||
/* Mapping HW RSS Type to enum xdp_rss_hash_type */
|
||||
static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
|
||||
[IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
|
||||
[IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
|
||||
[IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
|
||||
[IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
|
||||
[IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
|
||||
[IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
|
||||
[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
|
||||
[IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
|
||||
[IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
|
||||
[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
|
||||
[10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
|
||||
[11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
|
||||
[12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
|
||||
[13] = XDP_RSS_TYPE_NONE,
|
||||
[14] = XDP_RSS_TYPE_NONE,
|
||||
[15] = XDP_RSS_TYPE_NONE,
|
||||
};
|
||||
|
||||
static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type)
|
||||
{
|
||||
const struct igc_xdp_buff *ctx = (void *)_ctx;
|
||||
|
||||
if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
|
||||
return -ENODATA;
|
||||
|
||||
*hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
|
||||
*rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
|
||||
{
|
||||
const struct igc_xdp_buff *ctx = (void *)_ctx;
|
||||
|
||||
if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
|
||||
*timestamp = ctx->rx_ts;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
|
||||
.xmo_rx_hash = igc_xdp_rx_hash,
|
||||
.xmo_rx_timestamp = igc_xdp_rx_timestamp,
|
||||
};
|
||||
|
||||
/**
|
||||
* igc_probe - Device Initialization Routine
|
||||
* @pdev: PCI device information struct
|
||||
@ -6527,6 +6619,7 @@ static int igc_probe(struct pci_dev *pdev,
|
||||
hw->hw_addr = adapter->io_addr;
|
||||
|
||||
netdev->netdev_ops = &igc_netdev_ops;
|
||||
netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
|
||||
igc_ethtool_set_ops(netdev);
|
||||
netdev->watchdog_timeo = 5 * HZ;
|
||||
|
||||
@ -6554,6 +6647,7 @@ static int igc_probe(struct pci_dev *pdev,
|
||||
netdev->features |= NETIF_F_TSO;
|
||||
netdev->features |= NETIF_F_TSO6;
|
||||
netdev->features |= NETIF_F_TSO_ECN;
|
||||
netdev->features |= NETIF_F_RXHASH;
|
||||
netdev->features |= NETIF_F_RXCSUM;
|
||||
netdev->features |= NETIF_F_HW_CSUM;
|
||||
netdev->features |= NETIF_F_SCTP_CRC;
|
||||
|
@ -1125,7 +1125,6 @@ struct bpf_trampoline {
|
||||
int progs_cnt[BPF_TRAMP_MAX];
|
||||
/* Executable image of trampoline */
|
||||
struct bpf_tramp_image *cur_image;
|
||||
u64 selector;
|
||||
struct module *mod;
|
||||
};
|
||||
|
||||
@ -1197,7 +1196,7 @@ enum bpf_dynptr_type {
|
||||
};
|
||||
|
||||
int bpf_dynptr_check_size(u32 size);
|
||||
u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr);
|
||||
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
|
||||
|
@ -18,8 +18,11 @@
|
||||
* that converting umax_value to int cannot overflow.
|
||||
*/
|
||||
#define BPF_MAX_VAR_SIZ (1 << 29)
|
||||
/* size of type_str_buf in bpf_verifier. */
|
||||
#define TYPE_STR_BUF_LEN 128
|
||||
/* size of tmp_str_buf in bpf_verifier.
|
||||
* we need at least 306 bytes to fit full stack mask representation
|
||||
* (in the "-8,-16,...,-512" form)
|
||||
*/
|
||||
#define TMP_STR_BUF_LEN 320
|
||||
|
||||
/* Liveness marks, used for registers and spilled-regs (in stack slots).
|
||||
* Read marks propagate upwards until they find a write mark; they record that
|
||||
@ -238,6 +241,10 @@ enum bpf_stack_slot_type {
|
||||
|
||||
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
||||
|
||||
#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
|
||||
(1 << BPF_REG_3) | (1 << BPF_REG_4) | \
|
||||
(1 << BPF_REG_5))
|
||||
|
||||
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
|
||||
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
|
||||
|
||||
@ -541,6 +548,15 @@ struct bpf_subprog_info {
|
||||
bool is_async_cb;
|
||||
};
|
||||
|
||||
struct bpf_verifier_env;
|
||||
|
||||
struct backtrack_state {
|
||||
struct bpf_verifier_env *env;
|
||||
u32 frame;
|
||||
u32 reg_masks[MAX_CALL_FRAMES];
|
||||
u64 stack_masks[MAX_CALL_FRAMES];
|
||||
};
|
||||
|
||||
/* single container for all structs
|
||||
* one verifier_env per bpf_check() call
|
||||
*/
|
||||
@ -578,6 +594,7 @@ struct bpf_verifier_env {
|
||||
int *insn_stack;
|
||||
int cur_stack;
|
||||
} cfg;
|
||||
struct backtrack_state bt;
|
||||
u32 pass_cnt; /* number of times do_check() was called */
|
||||
u32 subprog_cnt;
|
||||
/* number of instructions analyzed by the verifier */
|
||||
@ -606,8 +623,10 @@ struct bpf_verifier_env {
|
||||
/* Same as scratched_regs but for stack slots */
|
||||
u64 scratched_stack_slots;
|
||||
u64 prev_log_pos, prev_insn_print_pos;
|
||||
/* buffer used in reg_type_str() to generate reg_type string */
|
||||
char type_str_buf[TYPE_STR_BUF_LEN];
|
||||
/* buffer used to generate temporary string representations,
|
||||
* e.g., in reg_type_str() to generate reg_type string
|
||||
*/
|
||||
char tmp_str_buf[TMP_STR_BUF_LEN];
|
||||
};
|
||||
|
||||
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
|
||||
|
@ -4014,7 +4014,7 @@ __skb_header_pointer(const struct sk_buff *skb, int offset, int len,
|
||||
if (likely(hlen - offset >= len))
|
||||
return (void *)data + offset;
|
||||
|
||||
if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
|
||||
if (!skb || !buffer || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
|
||||
return NULL;
|
||||
|
||||
return buffer;
|
||||
|
@ -180,7 +180,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
|
||||
if (likely(!cross_pg))
|
||||
return false;
|
||||
|
||||
return pool->dma_pages_cnt &&
|
||||
return pool->dma_pages &&
|
||||
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,12 @@ static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
|
||||
/* bpf_lru_node helpers */
|
||||
static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
|
||||
{
|
||||
return node->ref;
|
||||
return READ_ONCE(node->ref);
|
||||
}
|
||||
|
||||
static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
|
||||
{
|
||||
WRITE_ONCE(node->ref, 0);
|
||||
}
|
||||
|
||||
static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
|
||||
@ -89,7 +94,7 @@ static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
|
||||
|
||||
bpf_lru_list_count_inc(l, tgt_type);
|
||||
node->type = tgt_type;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_move(&node->list, &l->lists[tgt_type]);
|
||||
}
|
||||
|
||||
@ -110,7 +115,7 @@ static void __bpf_lru_node_move(struct bpf_lru_list *l,
|
||||
bpf_lru_list_count_inc(l, tgt_type);
|
||||
node->type = tgt_type;
|
||||
}
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
|
||||
/* If the moving node is the next_inactive_rotation candidate,
|
||||
* move the next_inactive_rotation pointer also.
|
||||
@ -353,7 +358,7 @@ static void __local_list_add_pending(struct bpf_lru *lru,
|
||||
*(u32 *)((void *)node + lru->hash_offset) = hash;
|
||||
node->cpu = cpu;
|
||||
node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_add(&node->list, local_pending_list(loc_l));
|
||||
}
|
||||
|
||||
@ -419,7 +424,7 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
|
||||
if (!list_empty(free_list)) {
|
||||
node = list_first_entry(free_list, struct bpf_lru_node, list);
|
||||
*(u32 *)((void *)node + lru->hash_offset) = hash;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
|
||||
}
|
||||
|
||||
@ -522,7 +527,7 @@ static void bpf_common_lru_push_free(struct bpf_lru *lru,
|
||||
}
|
||||
|
||||
node->type = BPF_LRU_LOCAL_LIST_T_FREE;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_move(&node->list, local_free_list(loc_l));
|
||||
|
||||
raw_spin_unlock_irqrestore(&loc_l->lock, flags);
|
||||
@ -568,7 +573,7 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
|
||||
|
||||
node = (struct bpf_lru_node *)(buf + node_offset);
|
||||
node->type = BPF_LRU_LIST_T_FREE;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
|
||||
buf += elem_size;
|
||||
}
|
||||
@ -594,7 +599,7 @@ again:
|
||||
node = (struct bpf_lru_node *)(buf + node_offset);
|
||||
node->cpu = cpu;
|
||||
node->type = BPF_LRU_LIST_T_FREE;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
|
||||
i++;
|
||||
buf += elem_size;
|
||||
|
@ -64,11 +64,8 @@ struct bpf_lru {
|
||||
|
||||
static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
|
||||
{
|
||||
/* ref is an approximation on access frequency. It does not
|
||||
* have to be very accurate. Hence, no protection is used.
|
||||
*/
|
||||
if (!node->ref)
|
||||
node->ref = 1;
|
||||
if (!READ_ONCE(node->ref))
|
||||
WRITE_ONCE(node->ref, 1);
|
||||
}
|
||||
|
||||
int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
|
||||
|
@ -1826,6 +1826,12 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
||||
ret = 1;
|
||||
} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
|
||||
/* optlen is out of bounds */
|
||||
if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
|
||||
pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
|
||||
ctx.optlen, max_optlen);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
/* optlen within bounds, run kernel handler */
|
||||
@ -1881,8 +1887,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
.optname = optname,
|
||||
.current_task = current,
|
||||
};
|
||||
int orig_optlen;
|
||||
int ret;
|
||||
|
||||
orig_optlen = max_optlen;
|
||||
ctx.optlen = max_optlen;
|
||||
max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
|
||||
if (max_optlen < 0)
|
||||
@ -1905,6 +1913,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
orig_optlen = ctx.optlen;
|
||||
|
||||
if (copy_from_user(ctx.optval, optval,
|
||||
min(ctx.optlen, max_optlen)) != 0) {
|
||||
@ -1922,6 +1931,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
goto out;
|
||||
|
||||
if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
|
||||
if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
|
||||
pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
|
||||
ctx.optlen, max_optlen);
|
||||
ret = retval;
|
||||
goto out;
|
||||
}
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1423,7 +1423,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
|
||||
#define DYNPTR_SIZE_MASK 0xFFFFFF
|
||||
#define DYNPTR_RDONLY_BIT BIT(31)
|
||||
|
||||
static bool bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
|
||||
static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
|
||||
{
|
||||
return ptr->size & DYNPTR_RDONLY_BIT;
|
||||
}
|
||||
@ -1443,11 +1443,18 @@ static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *pt
|
||||
return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
|
||||
}
|
||||
|
||||
u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr)
|
||||
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
|
||||
{
|
||||
return ptr->size & DYNPTR_SIZE_MASK;
|
||||
}
|
||||
|
||||
static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
|
||||
{
|
||||
u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
|
||||
|
||||
ptr->size = new_size | metadata;
|
||||
}
|
||||
|
||||
int bpf_dynptr_check_size(u32 size)
|
||||
{
|
||||
return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
|
||||
@ -1469,7 +1476,7 @@ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
|
||||
|
||||
static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
|
||||
{
|
||||
u32 size = bpf_dynptr_get_size(ptr);
|
||||
u32 size = __bpf_dynptr_size(ptr);
|
||||
|
||||
if (len > size || offset > size - len)
|
||||
return -E2BIG;
|
||||
@ -1563,7 +1570,7 @@ BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, v
|
||||
enum bpf_dynptr_type type;
|
||||
int err;
|
||||
|
||||
if (!dst->data || bpf_dynptr_is_rdonly(dst))
|
||||
if (!dst->data || __bpf_dynptr_is_rdonly(dst))
|
||||
return -EINVAL;
|
||||
|
||||
err = bpf_dynptr_check_off_len(dst, offset, len);
|
||||
@ -1619,7 +1626,7 @@ BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u3
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
if (bpf_dynptr_is_rdonly(ptr))
|
||||
if (__bpf_dynptr_is_rdonly(ptr))
|
||||
return 0;
|
||||
|
||||
type = bpf_dynptr_get_type(ptr);
|
||||
@ -2142,6 +2149,22 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
|
||||
return NULL;
|
||||
return cgrp;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
|
||||
* task's membership of cgroup ancestry.
|
||||
* @task: the task to be tested
|
||||
* @ancestor: possible ancestor of @task's cgroup
|
||||
*
|
||||
* Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
|
||||
* It follows all the same rules as cgroup_is_descendant, and only applies
|
||||
* to the default hierarchy.
|
||||
*/
|
||||
__bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
|
||||
struct cgroup *ancestor)
|
||||
{
|
||||
return task_under_cgroup_hierarchy(task, ancestor);
|
||||
}
|
||||
#endif /* CONFIG_CGROUPS */
|
||||
|
||||
/**
|
||||
@ -2167,13 +2190,15 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
|
||||
* bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
|
||||
* @ptr: The dynptr whose data slice to retrieve
|
||||
* @offset: Offset into the dynptr
|
||||
* @buffer: User-provided buffer to copy contents into
|
||||
* @buffer__szk: Size (in bytes) of the buffer. This is the length of the
|
||||
* requested slice. This must be a constant.
|
||||
* @buffer__opt: User-provided buffer to copy contents into. May be NULL
|
||||
* @buffer__szk: Size (in bytes) of the buffer if present. This is the
|
||||
* length of the requested slice. This must be a constant.
|
||||
*
|
||||
* For non-skb and non-xdp type dynptrs, there is no difference between
|
||||
* bpf_dynptr_slice and bpf_dynptr_data.
|
||||
*
|
||||
* If buffer__opt is NULL, the call will fail if buffer_opt was needed.
|
||||
*
|
||||
* If the intention is to write to the data slice, please use
|
||||
* bpf_dynptr_slice_rdwr.
|
||||
*
|
||||
@ -2190,7 +2215,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
|
||||
* direct pointer)
|
||||
*/
|
||||
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
|
||||
void *buffer, u32 buffer__szk)
|
||||
void *buffer__opt, u32 buffer__szk)
|
||||
{
|
||||
enum bpf_dynptr_type type;
|
||||
u32 len = buffer__szk;
|
||||
@ -2210,15 +2235,17 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
|
||||
case BPF_DYNPTR_TYPE_RINGBUF:
|
||||
return ptr->data + ptr->offset + offset;
|
||||
case BPF_DYNPTR_TYPE_SKB:
|
||||
return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer);
|
||||
return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
|
||||
case BPF_DYNPTR_TYPE_XDP:
|
||||
{
|
||||
void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
|
||||
if (xdp_ptr)
|
||||
return xdp_ptr;
|
||||
|
||||
bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer, len, false);
|
||||
return buffer;
|
||||
if (!buffer__opt)
|
||||
return NULL;
|
||||
bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
|
||||
return buffer__opt;
|
||||
}
|
||||
default:
|
||||
WARN_ONCE(true, "unknown dynptr type %d\n", type);
|
||||
@ -2230,13 +2257,15 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
|
||||
* bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
|
||||
* @ptr: The dynptr whose data slice to retrieve
|
||||
* @offset: Offset into the dynptr
|
||||
* @buffer: User-provided buffer to copy contents into
|
||||
* @buffer__szk: Size (in bytes) of the buffer. This is the length of the
|
||||
* requested slice. This must be a constant.
|
||||
* @buffer__opt: User-provided buffer to copy contents into. May be NULL
|
||||
* @buffer__szk: Size (in bytes) of the buffer if present. This is the
|
||||
* length of the requested slice. This must be a constant.
|
||||
*
|
||||
* For non-skb and non-xdp type dynptrs, there is no difference between
|
||||
* bpf_dynptr_slice and bpf_dynptr_data.
|
||||
*
|
||||
* If buffer__opt is NULL, the call will fail if buffer_opt was needed.
|
||||
*
|
||||
* The returned pointer is writable and may point to either directly the dynptr
|
||||
* data at the requested offset or to the buffer if unable to obtain a direct
|
||||
* data pointer to (example: the requested slice is to the paged area of an skb
|
||||
@ -2267,9 +2296,9 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
|
||||
* direct pointer)
|
||||
*/
|
||||
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
|
||||
void *buffer, u32 buffer__szk)
|
||||
void *buffer__opt, u32 buffer__szk)
|
||||
{
|
||||
if (!ptr->data || bpf_dynptr_is_rdonly(ptr))
|
||||
if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
|
||||
return NULL;
|
||||
|
||||
/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
|
||||
@ -2294,7 +2323,59 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o
|
||||
* will be copied out into the buffer and the user will need to call
|
||||
* bpf_dynptr_write() to commit changes.
|
||||
*/
|
||||
return bpf_dynptr_slice(ptr, offset, buffer, buffer__szk);
|
||||
return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end)
|
||||
{
|
||||
u32 size;
|
||||
|
||||
if (!ptr->data || start > end)
|
||||
return -EINVAL;
|
||||
|
||||
size = __bpf_dynptr_size(ptr);
|
||||
|
||||
if (start > size || end > size)
|
||||
return -ERANGE;
|
||||
|
||||
ptr->offset += start;
|
||||
bpf_dynptr_set_size(ptr, end - start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr)
|
||||
{
|
||||
return !ptr->data;
|
||||
}
|
||||
|
||||
__bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
|
||||
{
|
||||
if (!ptr->data)
|
||||
return false;
|
||||
|
||||
return __bpf_dynptr_is_rdonly(ptr);
|
||||
}
|
||||
|
||||
__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
|
||||
{
|
||||
if (!ptr->data)
|
||||
return -EINVAL;
|
||||
|
||||
return __bpf_dynptr_size(ptr);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr,
|
||||
struct bpf_dynptr_kern *clone__uninit)
|
||||
{
|
||||
if (!ptr->data) {
|
||||
bpf_dynptr_set_null(clone__uninit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*clone__uninit = *ptr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
|
||||
@ -2341,6 +2422,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
|
||||
#endif
|
||||
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_SET8_END(generic_btf_ids)
|
||||
@ -2369,6 +2451,11 @@ BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
|
||||
BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_adjust)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_is_null)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_size)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_clone)
|
||||
BTF_SET8_END(common_btf_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set common_kfunc_set = {
|
||||
|
@ -5380,7 +5380,8 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
|
||||
*(int *)table->data = unpriv_enable;
|
||||
}
|
||||
|
||||
unpriv_ebpf_notify(unpriv_enable);
|
||||
if (write)
|
||||
unpriv_ebpf_notify(unpriv_enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -251,11 +251,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
|
||||
return tlinks;
|
||||
}
|
||||
|
||||
static void __bpf_tramp_image_put_deferred(struct work_struct *work)
|
||||
static void bpf_tramp_image_free(struct bpf_tramp_image *im)
|
||||
{
|
||||
struct bpf_tramp_image *im;
|
||||
|
||||
im = container_of(work, struct bpf_tramp_image, work);
|
||||
bpf_image_ksym_del(&im->ksym);
|
||||
bpf_jit_free_exec(im->image);
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
@ -263,6 +260,14 @@ static void __bpf_tramp_image_put_deferred(struct work_struct *work)
|
||||
kfree_rcu(im, rcu);
|
||||
}
|
||||
|
||||
static void __bpf_tramp_image_put_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_tramp_image *im;
|
||||
|
||||
im = container_of(work, struct bpf_tramp_image, work);
|
||||
bpf_tramp_image_free(im);
|
||||
}
|
||||
|
||||
/* callback, fexit step 3 or fentry step 2 */
|
||||
static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
@ -344,7 +349,7 @@ static void bpf_tramp_image_put(struct bpf_tramp_image *im)
|
||||
call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
|
||||
}
|
||||
|
||||
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
|
||||
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
|
||||
{
|
||||
struct bpf_tramp_image *im;
|
||||
struct bpf_ksym *ksym;
|
||||
@ -371,7 +376,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
|
||||
|
||||
ksym = &im->ksym;
|
||||
INIT_LIST_HEAD_RCU(&ksym->lnode);
|
||||
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
|
||||
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
|
||||
bpf_image_ksym_add(image, ksym);
|
||||
return im;
|
||||
|
||||
@ -401,11 +406,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
|
||||
err = unregister_fentry(tr, tr->cur_image->image);
|
||||
bpf_tramp_image_put(tr->cur_image);
|
||||
tr->cur_image = NULL;
|
||||
tr->selector = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
im = bpf_tramp_image_alloc(tr->key, tr->selector);
|
||||
im = bpf_tramp_image_alloc(tr->key);
|
||||
if (IS_ERR(im)) {
|
||||
err = PTR_ERR(im);
|
||||
goto out;
|
||||
@ -438,12 +442,11 @@ again:
|
||||
&tr->func.model, tr->flags, tlinks,
|
||||
tr->func.addr);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
goto out_free;
|
||||
|
||||
set_memory_rox((long)im->image, 1);
|
||||
|
||||
WARN_ON(tr->cur_image && tr->selector == 0);
|
||||
WARN_ON(!tr->cur_image && tr->selector);
|
||||
WARN_ON(tr->cur_image && total == 0);
|
||||
if (tr->cur_image)
|
||||
/* progs already running at this address */
|
||||
err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
|
||||
@ -468,18 +471,21 @@ again:
|
||||
}
|
||||
#endif
|
||||
if (err)
|
||||
goto out;
|
||||
goto out_free;
|
||||
|
||||
if (tr->cur_image)
|
||||
bpf_tramp_image_put(tr->cur_image);
|
||||
tr->cur_image = im;
|
||||
tr->selector++;
|
||||
out:
|
||||
/* If any error happens, restore previous flags */
|
||||
if (err)
|
||||
tr->flags = orig_flags;
|
||||
kfree(tlinks);
|
||||
return err;
|
||||
|
||||
out_free:
|
||||
bpf_tramp_image_free(im);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1349,9 +1349,9 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
|
||||
}
|
||||
|
||||
return verify_pkcs7_signature(data_ptr->data,
|
||||
bpf_dynptr_get_size(data_ptr),
|
||||
__bpf_dynptr_size(data_ptr),
|
||||
sig_ptr->data,
|
||||
bpf_dynptr_get_size(sig_ptr),
|
||||
__bpf_dynptr_size(sig_ptr),
|
||||
trusted_keyring->key,
|
||||
VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
|
||||
NULL);
|
||||
|
@ -6916,6 +6916,8 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
FIELD)); \
|
||||
} while (0)
|
||||
|
||||
BTF_TYPE_EMIT(struct bpf_tcp_sock);
|
||||
|
||||
switch (si->off) {
|
||||
case offsetof(struct bpf_tcp_sock, rtt_min):
|
||||
BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
|
||||
|
@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
|
||||
{
|
||||
struct xsk_dma_map *dma_map;
|
||||
|
||||
if (pool->dma_pages_cnt == 0)
|
||||
if (!pool->dma_pages)
|
||||
return;
|
||||
|
||||
dma_map = xp_find_dma_map(pool);
|
||||
@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
|
||||
|
||||
__xp_dma_unmap(dma_map, attrs);
|
||||
kvfree(pool->dma_pages);
|
||||
pool->dma_pages = NULL;
|
||||
pool->dma_pages_cnt = 0;
|
||||
pool->dev = NULL;
|
||||
}
|
||||
@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
|
||||
if (pool->unaligned) {
|
||||
xskb = pool->free_heads[--pool->free_heads_cnt];
|
||||
xp_init_xskb_addr(xskb, pool, addr);
|
||||
if (pool->dma_pages_cnt)
|
||||
if (pool->dma_pages)
|
||||
xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
|
||||
} else {
|
||||
xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
|
||||
@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd
|
||||
if (pool->unaligned) {
|
||||
xskb = pool->free_heads[--pool->free_heads_cnt];
|
||||
xp_init_xskb_addr(xskb, pool, addr);
|
||||
if (pool->dma_pages_cnt)
|
||||
if (pool->dma_pages)
|
||||
xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
|
||||
} else {
|
||||
xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
|
||||
|
@ -47,7 +47,7 @@ int bpf_basertt(struct bpf_sock_ops *skops)
|
||||
case BPF_SOCK_OPS_BASE_RTT:
|
||||
n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
|
||||
cong, sizeof(cong));
|
||||
if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) {
|
||||
if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) {
|
||||
/* Set base_rtt to 80us */
|
||||
rv = 80;
|
||||
} else if (n) {
|
||||
|
@ -23,5 +23,8 @@ if [ "${pahole_ver}" -ge "124" ]; then
|
||||
# see PAHOLE_HAS_LANG_EXCLUDE
|
||||
extra_paholeopt="${extra_paholeopt} --lang_exclude=rust"
|
||||
fi
|
||||
if [ "${pahole_ver}" -ge "125" ]; then
|
||||
extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_inconsistent_proto --btf_gen_optimized"
|
||||
fi
|
||||
|
||||
echo ${extra_paholeopt}
|
||||
|
@ -167,12 +167,12 @@ static int get_vendor_id(int ifindex)
|
||||
return strtol(buf, NULL, 0);
|
||||
}
|
||||
|
||||
static int read_procfs(const char *path)
|
||||
static long read_procfs(const char *path)
|
||||
{
|
||||
char *endptr, *line = NULL;
|
||||
size_t len = 0;
|
||||
FILE *fd;
|
||||
int res;
|
||||
long res;
|
||||
|
||||
fd = fopen(path, "r");
|
||||
if (!fd)
|
||||
@ -194,7 +194,7 @@ static int read_procfs(const char *path)
|
||||
|
||||
static void probe_unprivileged_disabled(void)
|
||||
{
|
||||
int res;
|
||||
long res;
|
||||
|
||||
/* No support for C-style ouptut */
|
||||
|
||||
@ -216,14 +216,14 @@ static void probe_unprivileged_disabled(void)
|
||||
printf("Unable to retrieve required privileges for bpf() syscall\n");
|
||||
break;
|
||||
default:
|
||||
printf("bpf() syscall restriction has unknown value %d\n", res);
|
||||
printf("bpf() syscall restriction has unknown value %ld\n", res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void probe_jit_enable(void)
|
||||
{
|
||||
int res;
|
||||
long res;
|
||||
|
||||
/* No support for C-style ouptut */
|
||||
|
||||
@ -245,7 +245,7 @@ static void probe_jit_enable(void)
|
||||
printf("Unable to retrieve JIT-compiler status\n");
|
||||
break;
|
||||
default:
|
||||
printf("JIT-compiler status has unknown value %d\n",
|
||||
printf("JIT-compiler status has unknown value %ld\n",
|
||||
res);
|
||||
}
|
||||
}
|
||||
@ -253,7 +253,7 @@ static void probe_jit_enable(void)
|
||||
|
||||
static void probe_jit_harden(void)
|
||||
{
|
||||
int res;
|
||||
long res;
|
||||
|
||||
/* No support for C-style ouptut */
|
||||
|
||||
@ -275,7 +275,7 @@ static void probe_jit_harden(void)
|
||||
printf("Unable to retrieve JIT hardening status\n");
|
||||
break;
|
||||
default:
|
||||
printf("JIT hardening status has unknown value %d\n",
|
||||
printf("JIT hardening status has unknown value %ld\n",
|
||||
res);
|
||||
}
|
||||
}
|
||||
@ -283,7 +283,7 @@ static void probe_jit_harden(void)
|
||||
|
||||
static void probe_jit_kallsyms(void)
|
||||
{
|
||||
int res;
|
||||
long res;
|
||||
|
||||
/* No support for C-style ouptut */
|
||||
|
||||
@ -302,14 +302,14 @@ static void probe_jit_kallsyms(void)
|
||||
printf("Unable to retrieve JIT kallsyms export status\n");
|
||||
break;
|
||||
default:
|
||||
printf("JIT kallsyms exports status has unknown value %d\n", res);
|
||||
printf("JIT kallsyms exports status has unknown value %ld\n", res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void probe_jit_limit(void)
|
||||
{
|
||||
int res;
|
||||
long res;
|
||||
|
||||
/* No support for C-style ouptut */
|
||||
|
||||
@ -322,7 +322,7 @@ static void probe_jit_limit(void)
|
||||
printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
|
||||
break;
|
||||
default:
|
||||
printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
|
||||
printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -212,7 +212,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
case BPF_LINK_TYPE_NETFILTER:
|
||||
netfilter_dump_json(info, json_wtr);
|
||||
break;
|
||||
|
||||
case BPF_LINK_TYPE_STRUCT_OPS:
|
||||
jsonw_uint_field(json_wtr, "map_id",
|
||||
info->struct_ops.map_id);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -245,7 +248,10 @@ static void show_link_header_plain(struct bpf_link_info *info)
|
||||
else
|
||||
printf("type %u ", info->type);
|
||||
|
||||
printf("prog %u ", info->prog_id);
|
||||
if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
|
||||
printf("map %u ", info->struct_ops.map_id);
|
||||
else
|
||||
printf("prog %u ", info->prog_id);
|
||||
}
|
||||
|
||||
static void show_link_attach_type_plain(__u32 attach_type)
|
||||
|
@ -139,6 +139,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
|
||||
print_hex_data_json(key, info->key_size);
|
||||
jsonw_name(json_wtr, "value");
|
||||
print_hex_data_json(value, info->value_size);
|
||||
if (map_is_map_of_maps(info->type))
|
||||
jsonw_uint_field(json_wtr, "inner_map_id",
|
||||
*(unsigned int *)value);
|
||||
if (btf) {
|
||||
struct btf_dumper d = {
|
||||
.btf = btf,
|
||||
@ -259,8 +262,13 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
|
||||
}
|
||||
|
||||
if (info->value_size) {
|
||||
printf("value:%c", break_names ? '\n' : ' ');
|
||||
fprint_hex(stdout, value, info->value_size, " ");
|
||||
if (map_is_map_of_maps(info->type)) {
|
||||
printf("inner_map_id:%c", break_names ? '\n' : ' ');
|
||||
printf("%u ", *(unsigned int *)value);
|
||||
} else {
|
||||
printf("value:%c", break_names ? '\n' : ' ');
|
||||
fprint_hex(stdout, value, info->value_size, " ");
|
||||
}
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
|
@ -77,16 +77,21 @@
|
||||
/*
|
||||
* Helper macros to manipulate data structures
|
||||
*/
|
||||
#ifndef offsetof
|
||||
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
#ifndef container_of
|
||||
|
||||
/* offsetof() definition that uses __builtin_offset() might not preserve field
|
||||
* offset CO-RE relocation properly, so force-redefine offsetof() using
|
||||
* old-school approach which works with CO-RE correctly
|
||||
*/
|
||||
#undef offsetof
|
||||
#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
|
||||
|
||||
/* redefined container_of() to ensure we use the above offsetof() macro */
|
||||
#undef container_of
|
||||
#define container_of(ptr, type, member) \
|
||||
({ \
|
||||
void *__mptr = (void *)(ptr); \
|
||||
((type *)(__mptr - offsetof(type, member))); \
|
||||
})
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compiler (optimization) barrier.
|
||||
|
@ -351,6 +351,7 @@ struct pt_regs___arm64 {
|
||||
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
|
||||
*/
|
||||
|
||||
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG a0
|
||||
#define __PT_PARM2_REG a1
|
||||
@ -383,7 +384,7 @@ struct pt_regs___arm64 {
|
||||
* https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
|
||||
*/
|
||||
|
||||
/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
/* arc provides struct user_regs_struct instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG scratch.r0
|
||||
#define __PT_PARM2_REG scratch.r1
|
||||
|
@ -2250,9 +2250,25 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
|
||||
const struct btf_type *t,
|
||||
__u32 id,
|
||||
const void *data,
|
||||
__u8 bits_offset)
|
||||
__u8 bits_offset,
|
||||
__u8 bit_sz)
|
||||
{
|
||||
__s64 size = btf__resolve_size(d->btf, id);
|
||||
__s64 size;
|
||||
|
||||
if (bit_sz) {
|
||||
/* bits_offset is at most 7. bit_sz is at most 128. */
|
||||
__u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
|
||||
|
||||
/* When bit_sz is non zero, it is called from
|
||||
* btf_dump_struct_data() where it only cares about
|
||||
* negative error value.
|
||||
* Return nr_bytes in success case to make it
|
||||
* consistent as the regular integer case below.
|
||||
*/
|
||||
return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
|
||||
}
|
||||
|
||||
size = btf__resolve_size(d->btf, id);
|
||||
|
||||
if (size < 0 || size >= INT_MAX) {
|
||||
pr_warn("unexpected size [%zu] for id [%u]\n",
|
||||
@ -2407,7 +2423,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
|
||||
{
|
||||
int size, err = 0;
|
||||
|
||||
size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset);
|
||||
size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
|
||||
if (size < 0)
|
||||
return size;
|
||||
err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
|
||||
|
@ -1,33 +1,6 @@
|
||||
bloom_filter_map # libbpf: prog 'check_bloom': failed to attach: ERROR: strerror_r(-524)=22
|
||||
bpf_cookie/lsm
|
||||
bpf_cookie/multi_kprobe_attach_api
|
||||
bpf_cookie/multi_kprobe_link_api
|
||||
bpf_cookie/trampoline
|
||||
bpf_loop/check_callback_fn_stop # link unexpected error: -524
|
||||
bpf_loop/check_invalid_flags
|
||||
bpf_loop/check_nested_calls
|
||||
bpf_loop/check_non_constant_callback
|
||||
bpf_loop/check_nr_loops
|
||||
bpf_loop/check_null_callback_ctx
|
||||
bpf_loop/check_stack
|
||||
bpf_mod_race # bpf_mod_kfunc_race__attach unexpected error: -524 (errno 524)
|
||||
bpf_tcp_ca/dctcp_fallback
|
||||
btf_dump/btf_dump: var_data # find type id unexpected find type id: actual -2 < expected 0
|
||||
cgroup_hierarchical_stats # attach unexpected error: -524 (errno 524)
|
||||
d_path/basic # setup attach failed: -524
|
||||
deny_namespace # attach unexpected error: -524 (errno 524)
|
||||
fentry_fexit # fentry_attach unexpected error: -1 (errno 524)
|
||||
fentry_test # fentry_attach unexpected error: -1 (errno 524)
|
||||
fexit_sleep # fexit_attach fexit attach failed: -1
|
||||
fexit_stress # fexit attach unexpected fexit attach: actual -524 < expected 0
|
||||
fexit_test # fexit_attach unexpected error: -1 (errno 524)
|
||||
get_func_args_test # get_func_args_test__attach unexpected error: -524 (errno 524) (trampoline)
|
||||
get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (errno 524) (trampoline)
|
||||
htab_update/reenter_update
|
||||
kfree_skb # attach fentry unexpected error: -524 (trampoline)
|
||||
kfunc_call/subprog # extern (var ksym) 'bpf_prog_active': not found in kernel BTF
|
||||
kfunc_call/subprog_lskel # skel unexpected error: -2
|
||||
kfunc_dynptr_param/dynptr_data_null # libbpf: prog 'dynptr_data_null': failed to attach: ERROR: strerror_r(-524)=22
|
||||
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
|
||||
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
|
||||
fexit_sleep # The test never returns. The remaining tests cannot start.
|
||||
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
@ -35,51 +8,5 @@ kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_mu
|
||||
kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0
|
||||
kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0
|
||||
kprobe_multi_test/skel_api # kprobe_multi__attach unexpected error: -524 (errno 524)
|
||||
ksyms_module/libbpf # 'bpf_testmod_ksym_percpu': not found in kernel BTF
|
||||
ksyms_module/lskel # test_ksyms_module_lskel__open_and_load unexpected error: -2
|
||||
libbpf_get_fd_by_id_opts # test_libbpf_get_fd_by_id_opts__attach unexpected error: -524 (errno 524)
|
||||
linked_list
|
||||
lookup_key # test_lookup_key__attach unexpected error: -524 (errno 524)
|
||||
lru_bug # lru_bug__attach unexpected error: -524 (errno 524)
|
||||
modify_return # modify_return__attach failed unexpected error: -524 (errno 524)
|
||||
module_attach # skel_attach skeleton attach failed: -524
|
||||
module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0
|
||||
mptcp/base # run_test mptcp unexpected error: -524 (errno 524)
|
||||
netcnt # packets unexpected packets: actual 10001 != expected 10000
|
||||
rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22
|
||||
recursion # skel_attach unexpected error: -524 (errno 524)
|
||||
ringbuf # skel_attach skeleton attachment failed: -1
|
||||
setget_sockopt # attach_cgroup unexpected error: -524
|
||||
sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (errno 524)
|
||||
skc_to_unix_sock # could not attach BPF object unexpected error: -524 (errno 524)
|
||||
socket_cookie # prog_attach unexpected error: -524
|
||||
stacktrace_build_id # compare_stack_ips stackmap vs. stack_amap err -1 errno 2
|
||||
task_local_storage/exit_creds # skel_attach unexpected error: -524 (errno 524)
|
||||
task_local_storage/recursion # skel_attach unexpected error: -524 (errno 524)
|
||||
test_bprm_opts # attach attach failed: -524
|
||||
test_ima # attach attach failed: -524
|
||||
test_local_storage # attach lsm attach failed: -524
|
||||
test_lsm # test_lsm_first_attach unexpected error: -524 (errno 524)
|
||||
test_overhead # attach_fentry unexpected error: -524
|
||||
timer # timer unexpected error: -524 (errno 524)
|
||||
timer_crash # timer_crash__attach unexpected error: -524 (errno 524)
|
||||
timer_mim # timer_mim unexpected error: -524 (errno 524)
|
||||
trace_printk # trace_printk__attach unexpected error: -1 (errno 524)
|
||||
trace_vprintk # trace_vprintk__attach unexpected error: -1 (errno 524)
|
||||
tracing_struct # tracing_struct__attach unexpected error: -524 (errno 524)
|
||||
trampoline_count # attach_prog unexpected error: -524
|
||||
unpriv_bpf_disabled # skel_attach unexpected error: -524 (errno 524)
|
||||
user_ringbuf/test_user_ringbuf_post_misaligned # misaligned_skel unexpected error: -524 (errno 524)
|
||||
user_ringbuf/test_user_ringbuf_post_producer_wrong_offset
|
||||
user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz
|
||||
user_ringbuf/test_user_ringbuf_basic # ringbuf_basic_skel unexpected error: -524 (errno 524)
|
||||
user_ringbuf/test_user_ringbuf_sample_full_ring_buffer
|
||||
user_ringbuf/test_user_ringbuf_post_alignment_autoadjust
|
||||
user_ringbuf/test_user_ringbuf_overfill
|
||||
user_ringbuf/test_user_ringbuf_discards_properly_ignored
|
||||
user_ringbuf/test_user_ringbuf_loop
|
||||
user_ringbuf/test_user_ringbuf_msg_protocol
|
||||
user_ringbuf/test_user_ringbuf_blocking_reserve
|
||||
verify_pkcs7_sig # test_verify_pkcs7_sig__attach unexpected error: -524 (errno 524)
|
||||
vmlinux # skel_attach skeleton attach failed: -524
|
||||
kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3
|
||||
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
|
||||
|
@ -26,3 +26,4 @@ user_ringbuf # failed to find kernel BTF type ID of
|
||||
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
|
||||
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
|
||||
xdp_metadata # JIT does not support calling kernel function (kfunc)
|
||||
test_task_under_cgroup # JIT does not support calling kernel function (kfunc)
|
||||
|
@ -35,4 +35,10 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
|
||||
extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
|
||||
void *buffer, __u32 buffer__szk) __ksym;
|
||||
|
||||
extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym;
|
||||
extern int bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym;
|
||||
extern int bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym;
|
||||
extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym;
|
||||
extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym;
|
||||
|
||||
#endif
|
||||
|
@ -96,12 +96,80 @@ static void test_parse_test_list(void)
|
||||
goto error;
|
||||
ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
|
||||
ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
|
||||
free_test_filter_set(&set);
|
||||
|
||||
ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true),
|
||||
"parsing");
|
||||
if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
|
||||
goto error;
|
||||
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
|
||||
goto error;
|
||||
if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count"))
|
||||
goto error;
|
||||
ASSERT_OK(strcmp("t", set.tests[0].name), "test name");
|
||||
ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name");
|
||||
ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name");
|
||||
error:
|
||||
free_test_filter_set(&set);
|
||||
}
|
||||
|
||||
static void test_parse_test_list_file(void)
|
||||
{
|
||||
struct test_filter_set set;
|
||||
char tmpfile[80];
|
||||
FILE *fp;
|
||||
int fd;
|
||||
|
||||
snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX");
|
||||
fd = mkstemp(tmpfile);
|
||||
if (!ASSERT_GE(fd, 0, "create tmp"))
|
||||
return;
|
||||
|
||||
fp = fdopen(fd, "w");
|
||||
if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) {
|
||||
close(fd);
|
||||
goto out_remove;
|
||||
}
|
||||
|
||||
fprintf(fp, "# comment\n");
|
||||
fprintf(fp, " test_with_spaces \n");
|
||||
fprintf(fp, "testA/subtest # comment\n");
|
||||
fprintf(fp, "testB#comment with no space\n");
|
||||
fprintf(fp, "testB # duplicate\n");
|
||||
fprintf(fp, "testA/subtest # subtest duplicate\n");
|
||||
fprintf(fp, "testA/subtest2\n");
|
||||
fprintf(fp, "testC_no_eof_newline");
|
||||
fflush(fp);
|
||||
|
||||
if (!ASSERT_OK(ferror(fp), "prepare tmp"))
|
||||
goto out_fclose;
|
||||
|
||||
init_test_filter_set(&set);
|
||||
|
||||
ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
|
||||
|
||||
ASSERT_EQ(set.cnt, 4, "test count");
|
||||
ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
|
||||
ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
|
||||
ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
|
||||
ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count");
|
||||
ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0");
|
||||
ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1");
|
||||
ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
|
||||
ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
|
||||
|
||||
free_test_filter_set(&set);
|
||||
|
||||
out_fclose:
|
||||
fclose(fp);
|
||||
out_remove:
|
||||
remove(tmpfile);
|
||||
}
|
||||
|
||||
void test_arg_parsing(void)
|
||||
{
|
||||
if (test__start_subtest("test_parse_test_list"))
|
||||
test_parse_test_list();
|
||||
if (test__start_subtest("test_parse_test_list_file"))
|
||||
test_parse_test_list_file();
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ static void test_setsockopt_set(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that sets EUNATCH, assert that
|
||||
* we actually get that error when we run setsockopt()
|
||||
*/
|
||||
@ -59,6 +61,8 @@ static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that sets EUNATCH, and one that gets the
|
||||
* previously set errno. Assert that we get the same errno back.
|
||||
*/
|
||||
@ -100,6 +104,8 @@ static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that gets the previously set errno.
|
||||
* Assert that, without anything setting one, we get 0.
|
||||
*/
|
||||
@ -134,6 +140,8 @@ static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that gets the previously set errno, and then
|
||||
* one that sets the errno to EUNATCH. Assert that the get does not
|
||||
* see EUNATCH set later, and does not prevent EUNATCH from being set.
|
||||
@ -177,6 +185,8 @@ static void test_setsockopt_override(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that sets EUNATCH, then one that sets EISCONN,
|
||||
* and then one that gets the exported errno. Assert both the syscall
|
||||
* and the helper sees the last set errno.
|
||||
@ -224,6 +234,8 @@ static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that return a reject without setting errno
|
||||
* (legacy reject), and one that gets the errno. Assert that for
|
||||
* backward compatibility the syscall result in EPERM, and this
|
||||
@ -268,6 +280,8 @@ static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach setsockopt that sets EUNATCH, then one that return a reject
|
||||
* without setting errno, and then one that gets the exported errno.
|
||||
* Assert both the syscall and the helper's errno are unaffected by
|
||||
@ -319,6 +333,8 @@ static void test_getsockopt_get(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach getsockopt that gets previously set errno. Assert that the
|
||||
* error from kernel is in both ctx_retval_value and retval_value.
|
||||
*/
|
||||
@ -359,6 +375,8 @@ static void test_getsockopt_override(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach getsockopt that sets retval to -EISCONN. Assert that this
|
||||
* overrides the value from kernel.
|
||||
*/
|
||||
@ -396,6 +414,8 @@ static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd)
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
/* Attach getsockopt that sets retval to -EISCONN, and one that clears
|
||||
* ctx retval. Assert that the clearing ctx retval is synced to helper
|
||||
* and clears any errors both from kernel and BPF..
|
||||
|
@ -20,6 +20,14 @@ static struct {
|
||||
{"test_ringbuf", SETUP_SYSCALL_SLEEP},
|
||||
{"test_skb_readonly", SETUP_SKB_PROG},
|
||||
{"test_dynptr_skb_data", SETUP_SKB_PROG},
|
||||
{"test_adjust", SETUP_SYSCALL_SLEEP},
|
||||
{"test_adjust_err", SETUP_SYSCALL_SLEEP},
|
||||
{"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP},
|
||||
{"test_dynptr_is_null", SETUP_SYSCALL_SLEEP},
|
||||
{"test_dynptr_is_rdonly", SETUP_SKB_PROG},
|
||||
{"test_dynptr_clone", SETUP_SKB_PROG},
|
||||
{"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
|
||||
{"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
|
||||
};
|
||||
|
||||
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
|
||||
|
@ -5,10 +5,15 @@
|
||||
static char bpf_log_buf[4096];
|
||||
static bool verbose;
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
#define PAGE_SIZE 4096
|
||||
#endif
|
||||
|
||||
enum sockopt_test_error {
|
||||
OK = 0,
|
||||
DENY_LOAD,
|
||||
DENY_ATTACH,
|
||||
EOPNOTSUPP_GETSOCKOPT,
|
||||
EPERM_GETSOCKOPT,
|
||||
EFAULT_GETSOCKOPT,
|
||||
EPERM_SETSOCKOPT,
|
||||
@ -273,10 +278,31 @@ static struct sockopt_test {
|
||||
.error = EFAULT_GETSOCKOPT,
|
||||
},
|
||||
{
|
||||
.descr = "getsockopt: deny arbitrary ctx->retval",
|
||||
.descr = "getsockopt: ignore >PAGE_SIZE optlen",
|
||||
.insns = {
|
||||
/* ctx->retval = 123 */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 123),
|
||||
/* write 0xFF to the first optval byte */
|
||||
|
||||
/* r6 = ctx->optval */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct bpf_sockopt, optval)),
|
||||
/* r2 = ctx->optval */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
/* r6 = ctx->optval + 1 */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
|
||||
/* r7 = ctx->optval_end */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct bpf_sockopt, optval_end)),
|
||||
|
||||
/* if (ctx->optval + 1 <= ctx->optval_end) { */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
|
||||
/* ctx->optval[0] = 0xF0 */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF),
|
||||
/* } */
|
||||
|
||||
/* retval changes are ignored */
|
||||
/* ctx->retval = 5 */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 5),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct bpf_sockopt, retval)),
|
||||
|
||||
@ -287,9 +313,11 @@ static struct sockopt_test {
|
||||
.attach_type = BPF_CGROUP_GETSOCKOPT,
|
||||
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
|
||||
|
||||
.get_optlen = 64,
|
||||
|
||||
.error = EFAULT_GETSOCKOPT,
|
||||
.get_level = 1234,
|
||||
.get_optname = 5678,
|
||||
.get_optval = {}, /* the changes are ignored */
|
||||
.get_optlen = PAGE_SIZE + 1,
|
||||
.error = EOPNOTSUPP_GETSOCKOPT,
|
||||
},
|
||||
{
|
||||
.descr = "getsockopt: support smaller ctx->optlen",
|
||||
@ -648,6 +676,45 @@ static struct sockopt_test {
|
||||
|
||||
.error = EFAULT_SETSOCKOPT,
|
||||
},
|
||||
{
|
||||
.descr = "setsockopt: ignore >PAGE_SIZE optlen",
|
||||
.insns = {
|
||||
/* write 0xFF to the first optval byte */
|
||||
|
||||
/* r6 = ctx->optval */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct bpf_sockopt, optval)),
|
||||
/* r2 = ctx->optval */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
/* r6 = ctx->optval + 1 */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
|
||||
/* r7 = ctx->optval_end */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct bpf_sockopt, optval_end)),
|
||||
|
||||
/* if (ctx->optval + 1 <= ctx->optval_end) { */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
|
||||
/* ctx->optval[0] = 0xF0 */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0),
|
||||
/* } */
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.attach_type = BPF_CGROUP_SETSOCKOPT,
|
||||
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
|
||||
|
||||
.set_level = SOL_IP,
|
||||
.set_optname = IP_TOS,
|
||||
.set_optval = {},
|
||||
.set_optlen = PAGE_SIZE + 1,
|
||||
|
||||
.get_level = SOL_IP,
|
||||
.get_optname = IP_TOS,
|
||||
.get_optval = {}, /* the changes are ignored */
|
||||
.get_optlen = 4,
|
||||
},
|
||||
{
|
||||
.descr = "setsockopt: allow changing ctx->optlen within bounds",
|
||||
.insns = {
|
||||
@ -906,6 +973,13 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
|
||||
}
|
||||
|
||||
if (test->set_optlen) {
|
||||
if (test->set_optlen >= PAGE_SIZE) {
|
||||
int num_pages = test->set_optlen / PAGE_SIZE;
|
||||
int remainder = test->set_optlen % PAGE_SIZE;
|
||||
|
||||
test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
|
||||
}
|
||||
|
||||
err = setsockopt(sock_fd, test->set_level, test->set_optname,
|
||||
test->set_optval, test->set_optlen);
|
||||
if (err) {
|
||||
@ -921,7 +995,15 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
|
||||
}
|
||||
|
||||
if (test->get_optlen) {
|
||||
if (test->get_optlen >= PAGE_SIZE) {
|
||||
int num_pages = test->get_optlen / PAGE_SIZE;
|
||||
int remainder = test->get_optlen % PAGE_SIZE;
|
||||
|
||||
test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
|
||||
}
|
||||
|
||||
optval = malloc(test->get_optlen);
|
||||
memset(optval, 0, test->get_optlen);
|
||||
socklen_t optlen = test->get_optlen;
|
||||
socklen_t expected_get_optlen = test->get_optlen_ret ?:
|
||||
test->get_optlen;
|
||||
@ -929,6 +1011,8 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
|
||||
err = getsockopt(sock_fd, test->get_level, test->get_optname,
|
||||
optval, &optlen);
|
||||
if (err) {
|
||||
if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT)
|
||||
goto free_optval;
|
||||
if (errno == EPERM && test->error == EPERM_GETSOCKOPT)
|
||||
goto free_optval;
|
||||
if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT)
|
||||
|
@ -2,6 +2,8 @@
|
||||
#include <test_progs.h>
|
||||
#include "cgroup_helpers.h"
|
||||
|
||||
#include "sockopt_inherit.skel.h"
|
||||
|
||||
#define SOL_CUSTOM 0xdeadbeef
|
||||
#define CUSTOM_INHERIT1 0
|
||||
#define CUSTOM_INHERIT2 1
|
||||
@ -132,58 +134,30 @@ static int start_server(void)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title,
|
||||
const char *prog_name)
|
||||
{
|
||||
enum bpf_attach_type attach_type;
|
||||
enum bpf_prog_type prog_type;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
|
||||
err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
|
||||
if (err) {
|
||||
log_err("Failed to deduct types for %s BPF program", prog_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
if (!prog) {
|
||||
log_err("Failed to find %s BPF program", prog_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
|
||||
attach_type, 0);
|
||||
if (err) {
|
||||
log_err("Failed to attach %s BPF program", prog_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void run_test(int cgroup_fd)
|
||||
{
|
||||
struct bpf_link *link_getsockopt = NULL;
|
||||
struct bpf_link *link_setsockopt = NULL;
|
||||
int server_fd = -1, client_fd;
|
||||
struct bpf_object *obj;
|
||||
struct sockopt_inherit *obj;
|
||||
void *server_err;
|
||||
pthread_t tid;
|
||||
int err;
|
||||
|
||||
obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL);
|
||||
if (!ASSERT_OK_PTR(obj, "obj_open"))
|
||||
obj = sockopt_inherit__open_and_load();
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
return;
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (!ASSERT_OK(err, "obj_load"))
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt,
|
||||
cgroup_fd);
|
||||
if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt"))
|
||||
goto close_bpf_object;
|
||||
|
||||
err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt");
|
||||
if (!ASSERT_OK(err, "prog_attach _getsockopt"))
|
||||
goto close_bpf_object;
|
||||
|
||||
err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt");
|
||||
if (!ASSERT_OK(err, "prog_attach _setsockopt"))
|
||||
link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt,
|
||||
cgroup_fd);
|
||||
if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt"))
|
||||
goto close_bpf_object;
|
||||
|
||||
server_fd = start_server();
|
||||
@ -217,7 +191,10 @@ static void run_test(int cgroup_fd)
|
||||
close_server_fd:
|
||||
close(server_fd);
|
||||
close_bpf_object:
|
||||
bpf_object__close(obj);
|
||||
bpf_link__destroy(link_getsockopt);
|
||||
bpf_link__destroy(link_setsockopt);
|
||||
|
||||
sockopt_inherit__destroy(obj);
|
||||
}
|
||||
|
||||
void test_sockopt_inherit(void)
|
||||
|
@ -2,61 +2,13 @@
|
||||
#include <test_progs.h>
|
||||
#include "cgroup_helpers.h"
|
||||
|
||||
static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
|
||||
{
|
||||
enum bpf_attach_type attach_type;
|
||||
enum bpf_prog_type prog_type;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
#include "sockopt_multi.skel.h"
|
||||
|
||||
err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
|
||||
if (err) {
|
||||
log_err("Failed to deduct types for %s BPF program", title);
|
||||
return -1;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, name);
|
||||
if (!prog) {
|
||||
log_err("Failed to find %s BPF program", name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
|
||||
attach_type, BPF_F_ALLOW_MULTI);
|
||||
if (err) {
|
||||
log_err("Failed to attach %s BPF program", name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
|
||||
{
|
||||
enum bpf_attach_type attach_type;
|
||||
enum bpf_prog_type prog_type;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
|
||||
err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
|
||||
if (err)
|
||||
return -1;
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, name);
|
||||
if (!prog)
|
||||
return -1;
|
||||
|
||||
err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd,
|
||||
attach_type);
|
||||
if (err)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent,
|
||||
int cg_child, int sock_fd)
|
||||
{
|
||||
struct bpf_link *link_parent = NULL;
|
||||
struct bpf_link *link_child = NULL;
|
||||
socklen_t optlen;
|
||||
__u8 buf;
|
||||
int err;
|
||||
@ -89,8 +41,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
* - child: 0x80 -> 0x90
|
||||
*/
|
||||
|
||||
err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
|
||||
if (err)
|
||||
link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child,
|
||||
cg_child);
|
||||
if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child"))
|
||||
goto detach;
|
||||
|
||||
buf = 0x00;
|
||||
@ -113,8 +66,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
* - parent: 0x90 -> 0xA0
|
||||
*/
|
||||
|
||||
err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
|
||||
if (err)
|
||||
link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent,
|
||||
cg_parent);
|
||||
if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent"))
|
||||
goto detach;
|
||||
|
||||
buf = 0x00;
|
||||
@ -157,11 +111,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
* - parent: unexpected 0x40, EPERM
|
||||
*/
|
||||
|
||||
err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
|
||||
if (err) {
|
||||
log_err("Failed to detach child program");
|
||||
goto detach;
|
||||
}
|
||||
bpf_link__destroy(link_child);
|
||||
link_child = NULL;
|
||||
|
||||
buf = 0x00;
|
||||
optlen = 1;
|
||||
@ -198,15 +149,17 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
}
|
||||
|
||||
detach:
|
||||
prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
|
||||
prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
|
||||
bpf_link__destroy(link_child);
|
||||
bpf_link__destroy(link_parent);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent,
|
||||
int cg_child, int sock_fd)
|
||||
{
|
||||
struct bpf_link *link_parent = NULL;
|
||||
struct bpf_link *link_child = NULL;
|
||||
socklen_t optlen;
|
||||
__u8 buf;
|
||||
int err;
|
||||
@ -236,8 +189,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
|
||||
/* Attach child program and make sure it adds 0x10. */
|
||||
|
||||
err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
|
||||
if (err)
|
||||
link_child = bpf_program__attach_cgroup(obj->progs._setsockopt,
|
||||
cg_child);
|
||||
if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child"))
|
||||
goto detach;
|
||||
|
||||
buf = 0x80;
|
||||
@ -263,8 +217,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
|
||||
/* Attach parent program and make sure it adds another 0x10. */
|
||||
|
||||
err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
|
||||
if (err)
|
||||
link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt,
|
||||
cg_parent);
|
||||
if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent"))
|
||||
goto detach;
|
||||
|
||||
buf = 0x80;
|
||||
@ -289,8 +244,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
|
||||
}
|
||||
|
||||
detach:
|
||||
prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
|
||||
prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
|
||||
bpf_link__destroy(link_child);
|
||||
bpf_link__destroy(link_parent);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -298,9 +253,8 @@ detach:
|
||||
void test_sockopt_multi(void)
|
||||
{
|
||||
int cg_parent = -1, cg_child = -1;
|
||||
struct bpf_object *obj = NULL;
|
||||
struct sockopt_multi *obj = NULL;
|
||||
int sock_fd = -1;
|
||||
int err = -1;
|
||||
|
||||
cg_parent = test__join_cgroup("/parent");
|
||||
if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
|
||||
@ -310,13 +264,11 @@ void test_sockopt_multi(void)
|
||||
if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child"))
|
||||
goto out;
|
||||
|
||||
obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL);
|
||||
if (!ASSERT_OK_PTR(obj, "obj_load"))
|
||||
obj = sockopt_multi__open_and_load();
|
||||
if (!ASSERT_OK_PTR(obj, "skel-load"))
|
||||
goto out;
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (!ASSERT_OK(err, "obj_load"))
|
||||
goto out;
|
||||
obj->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
sock_fd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (!ASSERT_GE(sock_fd, 0, "socket"))
|
||||
@ -327,7 +279,7 @@ void test_sockopt_multi(void)
|
||||
|
||||
out:
|
||||
close(sock_fd);
|
||||
bpf_object__close(obj);
|
||||
sockopt_multi__destroy(obj);
|
||||
close(cg_child);
|
||||
close(cg_parent);
|
||||
}
|
||||
|
@ -42,6 +42,8 @@ void test_sockopt_qos_to_cc(void)
|
||||
if (!ASSERT_OK_PTR(skel, "skel"))
|
||||
goto done;
|
||||
|
||||
skel->bss->page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
|
||||
if (!ASSERT_GE(sock_fd, 0, "v6 socket open"))
|
||||
goto done;
|
||||
|
53
tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
Normal file
53
tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
Normal file
@ -0,0 +1,53 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Bytedance */
|
||||
|
||||
#include <sys/syscall.h>
|
||||
#include <test_progs.h>
|
||||
#include <cgroup_helpers.h>
|
||||
#include "test_task_under_cgroup.skel.h"
|
||||
|
||||
#define FOO "/foo"
|
||||
|
||||
void test_task_under_cgroup(void)
|
||||
{
|
||||
struct test_task_under_cgroup *skel;
|
||||
int ret, foo;
|
||||
pid_t pid;
|
||||
|
||||
foo = test__join_cgroup(FOO);
|
||||
if (!ASSERT_OK(foo < 0, "cgroup_join_foo"))
|
||||
return;
|
||||
|
||||
skel = test_task_under_cgroup__open();
|
||||
if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open"))
|
||||
goto cleanup;
|
||||
|
||||
skel->rodata->local_pid = getpid();
|
||||
skel->bss->remote_pid = getpid();
|
||||
skel->rodata->cgid = get_cgroup_id(FOO);
|
||||
|
||||
ret = test_task_under_cgroup__load(skel);
|
||||
if (!ASSERT_OK(ret, "test_task_under_cgroup__load"))
|
||||
goto cleanup;
|
||||
|
||||
ret = test_task_under_cgroup__attach(skel);
|
||||
if (!ASSERT_OK(ret, "test_task_under_cgroup__attach"))
|
||||
goto cleanup;
|
||||
|
||||
pid = fork();
|
||||
if (pid == 0)
|
||||
exit(0);
|
||||
|
||||
ret = (pid == -1);
|
||||
if (ASSERT_OK(ret, "fork process"))
|
||||
wait(NULL);
|
||||
|
||||
test_task_under_cgroup__detach(skel);
|
||||
|
||||
ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid,
|
||||
"test task_under_cgroup");
|
||||
|
||||
cleanup:
|
||||
test_task_under_cgroup__destroy(skel);
|
||||
close(foo);
|
||||
}
|
@ -55,6 +55,7 @@
|
||||
#include "verifier_spill_fill.skel.h"
|
||||
#include "verifier_spin_lock.skel.h"
|
||||
#include "verifier_stack_ptr.skel.h"
|
||||
#include "verifier_subprog_precision.skel.h"
|
||||
#include "verifier_subreg.skel.h"
|
||||
#include "verifier_uninit.skel.h"
|
||||
#include "verifier_unpriv.skel.h"
|
||||
@ -154,6 +155,7 @@ void test_verifier_sock(void) { RUN(verifier_sock); }
|
||||
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
|
||||
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
|
||||
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
|
||||
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
|
||||
void test_verifier_subreg(void) { RUN(verifier_subreg); }
|
||||
void test_verifier_uninit(void) { RUN(verifier_uninit); }
|
||||
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
|
||||
|
@ -86,6 +86,10 @@
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
#define TEST_DATA_LEN 64
|
||||
|
||||
#ifndef __used
|
||||
#define __used __attribute__((used))
|
||||
#endif
|
||||
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define SYSCALL_WRAPPER 1
|
||||
#define SYS_PREFIX "__x64_"
|
||||
|
@ -12,6 +12,7 @@ __u32 invocations = 0;
|
||||
__u32 assertion_error = 0;
|
||||
__u32 retval_value = 0;
|
||||
__u32 ctx_retval_value = 0;
|
||||
__u32 page_size = 0;
|
||||
|
||||
SEC("cgroup/getsockopt")
|
||||
int get_retval(struct bpf_sockopt *ctx)
|
||||
@ -20,6 +21,10 @@ int get_retval(struct bpf_sockopt *ctx)
|
||||
ctx_retval_value = ctx->retval;
|
||||
__sync_fetch_and_add(&invocations, 1);
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -31,6 +36,10 @@ int set_eisconn(struct bpf_sockopt *ctx)
|
||||
if (bpf_set_retval(-EISCONN))
|
||||
assertion_error = 1;
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -41,5 +50,9 @@ int clear_retval(struct bpf_sockopt *ctx)
|
||||
|
||||
ctx->retval = 0;
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
__u32 invocations = 0;
|
||||
__u32 assertion_error = 0;
|
||||
__u32 retval_value = 0;
|
||||
__u32 page_size = 0;
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
int get_retval(struct bpf_sockopt *ctx)
|
||||
@ -18,6 +19,10 @@ int get_retval(struct bpf_sockopt *ctx)
|
||||
retval_value = bpf_get_retval();
|
||||
__sync_fetch_and_add(&invocations, 1);
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -29,6 +34,10 @@ int set_eunatch(struct bpf_sockopt *ctx)
|
||||
if (bpf_set_retval(-EUNATCH))
|
||||
assertion_error = 1;
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -40,6 +49,10 @@ int set_eisconn(struct bpf_sockopt *ctx)
|
||||
if (bpf_set_retval(-EISCONN))
|
||||
assertion_error = 1;
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -48,5 +61,9 @@ int legacy_eperm(struct bpf_sockopt *ctx)
|
||||
{
|
||||
__sync_fetch_and_add(&invocations, 1);
|
||||
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1378,3 +1378,310 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* bpf_dynptr_adjust can only be called on initialized dynptrs */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #1")
|
||||
int dynptr_adjust_invalid(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_adjust(&ptr, 1, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* bpf_dynptr_is_null can only be called on initialized dynptrs */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #1")
|
||||
int dynptr_is_null_invalid(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_is_null(&ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #1")
|
||||
int dynptr_is_rdonly_invalid(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_is_rdonly(&ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* bpf_dynptr_size can only be called on initialized dynptrs */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #1")
|
||||
int dynptr_size_invalid(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_size(&ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Only initialized dynptrs can be cloned */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #1")
|
||||
int clone_invalid1(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr1;
|
||||
struct bpf_dynptr ptr2;
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_clone(&ptr1, &ptr2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Can't overwrite an existing dynptr when cloning */
|
||||
SEC("?xdp")
|
||||
__failure __msg("cannot overwrite referenced dynptr")
|
||||
int clone_invalid2(struct xdp_md *xdp)
|
||||
{
|
||||
struct bpf_dynptr ptr1;
|
||||
struct bpf_dynptr clone;
|
||||
|
||||
bpf_dynptr_from_xdp(xdp, 0, &ptr1);
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_clone(&ptr1, &clone);
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&clone, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invalidating a dynptr should invalidate its clones */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #3")
|
||||
int clone_invalidate1(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr clone;
|
||||
struct bpf_dynptr ptr;
|
||||
char read_data[64];
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone);
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&ptr, 0);
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invalidating a dynptr should invalidate its parent */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #3")
|
||||
int clone_invalidate2(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
struct bpf_dynptr clone;
|
||||
char read_data[64];
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone);
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&clone, 0);
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invalidating a dynptr should invalidate its siblings */
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("Expected an initialized dynptr as arg #3")
|
||||
int clone_invalidate3(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
struct bpf_dynptr clone1;
|
||||
struct bpf_dynptr clone2;
|
||||
char read_data[64];
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone1);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone2);
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&clone2, 0);
|
||||
|
||||
/* this should fail */
|
||||
bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invalidating a dynptr should invalidate any data slices
|
||||
* of its clones
|
||||
*/
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid mem access 'scalar'")
|
||||
int clone_invalidate4(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
struct bpf_dynptr clone;
|
||||
int *data;
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone);
|
||||
data = bpf_dynptr_data(&clone, 0, sizeof(val));
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&ptr, 0);
|
||||
|
||||
/* this should fail */
|
||||
*data = 123;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invalidating a dynptr should invalidate any data slices
|
||||
* of its parent
|
||||
*/
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid mem access 'scalar'")
|
||||
int clone_invalidate5(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
struct bpf_dynptr clone;
|
||||
int *data;
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
|
||||
data = bpf_dynptr_data(&ptr, 0, sizeof(val));
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone);
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&clone, 0);
|
||||
|
||||
/* this should fail */
|
||||
*data = 123;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invalidating a dynptr should invalidate any data slices
|
||||
* of its sibling
|
||||
*/
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid mem access 'scalar'")
|
||||
int clone_invalidate6(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
struct bpf_dynptr clone1;
|
||||
struct bpf_dynptr clone2;
|
||||
int *data;
|
||||
|
||||
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone1);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone2);
|
||||
|
||||
data = bpf_dynptr_data(&clone1, 0, sizeof(val));
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
bpf_ringbuf_submit_dynptr(&clone2, 0);
|
||||
|
||||
/* this should fail */
|
||||
*data = 123;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* A skb clone's data slices should be invalid anytime packet data changes */
|
||||
SEC("?tc")
|
||||
__failure __msg("invalid mem access 'scalar'")
|
||||
int clone_skb_packet_data(struct __sk_buff *skb)
|
||||
{
|
||||
char buffer[sizeof(__u32)] = {};
|
||||
struct bpf_dynptr clone;
|
||||
struct bpf_dynptr ptr;
|
||||
__u32 *data;
|
||||
|
||||
bpf_dynptr_from_skb(skb, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone);
|
||||
data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
|
||||
if (!data)
|
||||
return XDP_DROP;
|
||||
|
||||
if (bpf_skb_pull_data(skb, skb->len))
|
||||
return SK_DROP;
|
||||
|
||||
/* this should fail */
|
||||
*data = 123;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* A xdp clone's data slices should be invalid anytime packet data changes */
|
||||
SEC("?xdp")
|
||||
__failure __msg("invalid mem access 'scalar'")
|
||||
int clone_xdp_packet_data(struct xdp_md *xdp)
|
||||
{
|
||||
char buffer[sizeof(__u32)] = {};
|
||||
struct bpf_dynptr clone;
|
||||
struct bpf_dynptr ptr;
|
||||
struct ethhdr *hdr;
|
||||
__u32 *data;
|
||||
|
||||
bpf_dynptr_from_xdp(xdp, 0, &ptr);
|
||||
|
||||
bpf_dynptr_clone(&ptr, &clone);
|
||||
data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
|
||||
if (!data)
|
||||
return XDP_DROP;
|
||||
|
||||
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
|
||||
return XDP_DROP;
|
||||
|
||||
/* this should fail */
|
||||
*data = 123;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Buffers that are provided must be sufficiently long */
|
||||
SEC("?cgroup_skb/egress")
|
||||
__failure __msg("memory, len pair leads to invalid memory access")
|
||||
int test_dynptr_skb_small_buff(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
char buffer[8] = {};
|
||||
__u64 *data;
|
||||
|
||||
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
|
||||
err = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* This may return NULL. SKB may require a buffer */
|
||||
data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
|
||||
|
||||
return !!data;
|
||||
}
|
||||
|
@ -207,3 +207,339 @@ int test_dynptr_skb_data(struct __sk_buff *skb)
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int test_adjust(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
__u32 bytes = 64;
|
||||
__u32 off = 10;
|
||||
__u32 trim = 15;
|
||||
|
||||
if (bpf_get_current_pid_tgid() >> 32 != pid)
|
||||
return 0;
|
||||
|
||||
err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
|
||||
if (err) {
|
||||
err = 1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_size(&ptr) != bytes) {
|
||||
err = 2;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Advance the dynptr by off */
|
||||
err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
|
||||
if (err) {
|
||||
err = 3;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_size(&ptr) != bytes - off) {
|
||||
err = 4;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Trim the dynptr */
|
||||
err = bpf_dynptr_adjust(&ptr, off, 15);
|
||||
if (err) {
|
||||
err = 5;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that the size was adjusted correctly */
|
||||
if (bpf_dynptr_size(&ptr) != trim - off) {
|
||||
err = 6;
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
bpf_ringbuf_discard_dynptr(&ptr, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int test_adjust_err(void *ctx)
|
||||
{
|
||||
char write_data[45] = "hello there, world!!";
|
||||
struct bpf_dynptr ptr;
|
||||
__u32 size = 64;
|
||||
__u32 off = 20;
|
||||
|
||||
if (bpf_get_current_pid_tgid() >> 32 != pid)
|
||||
return 0;
|
||||
|
||||
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
|
||||
err = 1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that start can't be greater than end */
|
||||
if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
|
||||
err = 2;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that start can't be greater than size */
|
||||
if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
|
||||
err = 3;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that end can't be greater than size */
|
||||
if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
|
||||
err = 4;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_adjust(&ptr, off, size)) {
|
||||
err = 5;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that you can't write more bytes than available into the dynptr
|
||||
* after you've adjusted it
|
||||
*/
|
||||
if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
|
||||
err = 6;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that even after adjusting, submitting/discarding
|
||||
* a ringbuf dynptr works
|
||||
*/
|
||||
bpf_ringbuf_submit_dynptr(&ptr, 0);
|
||||
return 0;
|
||||
|
||||
done:
|
||||
bpf_ringbuf_discard_dynptr(&ptr, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int test_zero_size_dynptr(void *ctx)
|
||||
{
|
||||
char write_data = 'x', read_data;
|
||||
struct bpf_dynptr ptr;
|
||||
__u32 size = 64;
|
||||
|
||||
if (bpf_get_current_pid_tgid() >> 32 != pid)
|
||||
return 0;
|
||||
|
||||
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
|
||||
err = 1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* After this, the dynptr has a size of 0 */
|
||||
if (bpf_dynptr_adjust(&ptr, size, size)) {
|
||||
err = 2;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Test that reading + writing non-zero bytes is not ok */
|
||||
if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
|
||||
err = 3;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
|
||||
err = 4;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
|
||||
if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
|
||||
err = 5;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
|
||||
err = 6;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
||||
done:
|
||||
bpf_ringbuf_discard_dynptr(&ptr, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int test_dynptr_is_null(void *ctx)
|
||||
{
|
||||
struct bpf_dynptr ptr1;
|
||||
struct bpf_dynptr ptr2;
|
||||
__u64 size = 4;
|
||||
|
||||
if (bpf_get_current_pid_tgid() >> 32 != pid)
|
||||
return 0;
|
||||
|
||||
/* Pass in invalid flags, get back an invalid dynptr */
|
||||
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
|
||||
err = 1;
|
||||
goto exit_early;
|
||||
}
|
||||
|
||||
/* Test that the invalid dynptr is null */
|
||||
if (!bpf_dynptr_is_null(&ptr1)) {
|
||||
err = 2;
|
||||
goto exit_early;
|
||||
}
|
||||
|
||||
/* Get a valid dynptr */
|
||||
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
|
||||
err = 3;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Test that the valid dynptr is not null */
|
||||
if (bpf_dynptr_is_null(&ptr2)) {
|
||||
err = 4;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
bpf_ringbuf_discard_dynptr(&ptr2, 0);
|
||||
exit_early:
|
||||
bpf_ringbuf_discard_dynptr(&ptr1, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("cgroup_skb/egress")
|
||||
int test_dynptr_is_rdonly(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr1;
|
||||
struct bpf_dynptr ptr2;
|
||||
struct bpf_dynptr ptr3;
|
||||
|
||||
/* Pass in invalid flags, get back an invalid dynptr */
|
||||
if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
|
||||
err = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test that an invalid dynptr is_rdonly returns false */
|
||||
if (bpf_dynptr_is_rdonly(&ptr1)) {
|
||||
err = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get a read-only dynptr */
|
||||
if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
|
||||
err = 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test that the dynptr is read-only */
|
||||
if (!bpf_dynptr_is_rdonly(&ptr2)) {
|
||||
err = 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get a read-writeable dynptr */
|
||||
if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
|
||||
err = 5;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Test that the dynptr is read-only */
|
||||
if (bpf_dynptr_is_rdonly(&ptr3)) {
|
||||
err = 6;
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
bpf_ringbuf_discard_dynptr(&ptr3, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("cgroup_skb/egress")
|
||||
int test_dynptr_clone(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr1;
|
||||
struct bpf_dynptr ptr2;
|
||||
__u32 off = 2, size;
|
||||
|
||||
/* Get a dynptr */
|
||||
if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
|
||||
err = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
|
||||
err = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clone the dynptr */
|
||||
if (bpf_dynptr_clone(&ptr1, &ptr2)) {
|
||||
err = 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = bpf_dynptr_size(&ptr1);
|
||||
|
||||
/* Check that the clone has the same size and rd-only */
|
||||
if (bpf_dynptr_size(&ptr2) != size) {
|
||||
err = 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
|
||||
err = 5;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Advance and trim the original dynptr */
|
||||
bpf_dynptr_adjust(&ptr1, 5, 5);
|
||||
|
||||
/* Check that only original dynptr was affected, and the clone wasn't */
|
||||
if (bpf_dynptr_size(&ptr2) != size) {
|
||||
err = 6;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?cgroup_skb/egress")
|
||||
int test_dynptr_skb_no_buff(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
__u64 *data;
|
||||
|
||||
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
|
||||
err = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* This may return NULL. SKB may require a buffer */
|
||||
data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
|
||||
|
||||
return !!data;
|
||||
}
|
||||
|
||||
SEC("?cgroup_skb/egress")
|
||||
int test_dynptr_skb_strcmp(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
char *data;
|
||||
|
||||
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
|
||||
err = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* This may return NULL. SKB may require a buffer */
|
||||
data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
|
||||
if (data) {
|
||||
bpf_strncmp(data, 10, "foo");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx)
|
||||
return sum;
|
||||
}
|
||||
|
||||
#define ARR_SZ 16
|
||||
|
||||
static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul)
|
||||
static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
|
||||
{
|
||||
int *t;
|
||||
__u64 i;
|
||||
int *t, i;
|
||||
|
||||
while ((t = bpf_iter_num_next(it))) {
|
||||
i = *t;
|
||||
if (i >= ARR_SZ)
|
||||
if (i >= n)
|
||||
break;
|
||||
arr[i] = i * mul;
|
||||
}
|
||||
}
|
||||
|
||||
static __noinline int sum(struct bpf_iter_num *it, int *arr)
|
||||
static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
|
||||
{
|
||||
int *t, sum = 0;;
|
||||
__u64 i;
|
||||
int *t, i, sum = 0;;
|
||||
|
||||
while ((t = bpf_iter_num_next(it))) {
|
||||
i = *t;
|
||||
if (i >= ARR_SZ)
|
||||
if (i >= n)
|
||||
break;
|
||||
sum += arr[i];
|
||||
}
|
||||
@ -685,7 +681,7 @@ SEC("raw_tp")
|
||||
__success
|
||||
int iter_pass_iter_ptr_to_subprog(const void *ctx)
|
||||
{
|
||||
int arr1[ARR_SZ], arr2[ARR_SZ];
|
||||
int arr1[16], arr2[32];
|
||||
struct bpf_iter_num it;
|
||||
int n, sum1, sum2;
|
||||
|
||||
@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx)
|
||||
/* fill arr1 */
|
||||
n = ARRAY_SIZE(arr1);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
fill(&it, arr1, 2);
|
||||
fill(&it, arr1, n, 2);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
/* fill arr2 */
|
||||
n = ARRAY_SIZE(arr2);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
fill(&it, arr2, 10);
|
||||
fill(&it, arr2, n, 10);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
/* sum arr1 */
|
||||
n = ARRAY_SIZE(arr1);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
sum1 = sum(&it, arr1);
|
||||
sum1 = sum(&it, arr1, n);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
/* sum arr2 */
|
||||
n = ARRAY_SIZE(arr2);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
sum2 = sum(&it, arr2);
|
||||
sum2 = sum(&it, arr2, n);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
|
||||
|
@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL";
|
||||
#define CUSTOM_INHERIT2 1
|
||||
#define CUSTOM_LISTENER 2
|
||||
|
||||
__u32 page_size = 0;
|
||||
|
||||
struct sockopt_inherit {
|
||||
__u8 val;
|
||||
};
|
||||
@ -55,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
__u8 *optval = ctx->optval;
|
||||
|
||||
if (ctx->level != SOL_CUSTOM)
|
||||
return 1; /* only interested in SOL_CUSTOM */
|
||||
goto out; /* only interested in SOL_CUSTOM */
|
||||
|
||||
if (optval + 1 > optval_end)
|
||||
return 0; /* EPERM, bounds check */
|
||||
@ -70,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
ctx->optlen = 1;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
@ -80,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
|
||||
__u8 *optval = ctx->optval;
|
||||
|
||||
if (ctx->level != SOL_CUSTOM)
|
||||
return 1; /* only interested in SOL_CUSTOM */
|
||||
goto out; /* only interested in SOL_CUSTOM */
|
||||
|
||||
if (optval + 1 > optval_end)
|
||||
return 0; /* EPERM, bounds check */
|
||||
@ -93,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
|
||||
ctx->optlen = -1;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u32 page_size = 0;
|
||||
|
||||
SEC("cgroup/getsockopt")
|
||||
int _getsockopt_child(struct bpf_sockopt *ctx)
|
||||
{
|
||||
@ -12,7 +14,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
|
||||
__u8 *optval = ctx->optval;
|
||||
|
||||
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
|
||||
return 1;
|
||||
goto out;
|
||||
|
||||
if (optval + 1 > optval_end)
|
||||
return 0; /* EPERM, bounds check */
|
||||
@ -26,6 +28,12 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
|
||||
ctx->optlen = 1;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("cgroup/getsockopt")
|
||||
@ -35,7 +43,7 @@ int _getsockopt_parent(struct bpf_sockopt *ctx)
|
||||
__u8 *optval = ctx->optval;
|
||||
|
||||
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
|
||||
return 1;
|
||||
goto out;
|
||||
|
||||
if (optval + 1 > optval_end)
|
||||
return 0; /* EPERM, bounds check */
|
||||
@ -49,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx)
|
||||
ctx->optlen = 1;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
@ -58,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
|
||||
__u8 *optval = ctx->optval;
|
||||
|
||||
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
|
||||
return 1;
|
||||
goto out;
|
||||
|
||||
if (optval + 1 > optval_end)
|
||||
return 0; /* EPERM, bounds check */
|
||||
@ -67,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
|
||||
ctx->optlen = 1;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u32 page_size = 0;
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
|
||||
{
|
||||
@ -19,7 +21,7 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
|
||||
char cc_cubic[TCP_CA_NAME_MAX] = "cubic";
|
||||
|
||||
if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS)
|
||||
return 1;
|
||||
goto out;
|
||||
|
||||
if (optval + 1 > optval_end)
|
||||
return 0; /* EPERM, bounds check */
|
||||
@ -36,4 +38,10 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
/* Bypass AF_NETLINK. */
|
||||
sk = ctx->sk;
|
||||
if (sk && sk->family == AF_NETLINK)
|
||||
return 1;
|
||||
goto out;
|
||||
|
||||
/* Make sure bpf_get_netns_cookie is callable.
|
||||
*/
|
||||
@ -52,8 +52,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
* let next BPF program in the cgroup chain or kernel
|
||||
* handle it.
|
||||
*/
|
||||
ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
|
||||
return 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
|
||||
@ -61,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
* let next BPF program in the cgroup chain or kernel
|
||||
* handle it.
|
||||
*/
|
||||
return 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) {
|
||||
@ -69,7 +68,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
* let next BPF program in the cgroup chain or kernel
|
||||
* handle it.
|
||||
*/
|
||||
return 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
|
||||
@ -85,7 +84,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
if (((struct tcp_zerocopy_receive *)optval)->address != 0)
|
||||
return 0; /* unexpected data */
|
||||
|
||||
return 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
|
||||
@ -129,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
|
||||
ctx->optlen = 1;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
@ -142,7 +147,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
|
||||
/* Bypass AF_NETLINK. */
|
||||
sk = ctx->sk;
|
||||
if (sk && sk->family == AF_NETLINK)
|
||||
return 1;
|
||||
goto out;
|
||||
|
||||
/* Make sure bpf_get_netns_cookie is callable.
|
||||
*/
|
||||
@ -224,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
|
||||
*/
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
/* optval larger than PAGE_SIZE use kernel's buffer. */
|
||||
if (ctx->optlen > page_size)
|
||||
ctx->optlen = 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -10,6 +10,8 @@
|
||||
static __attribute__ ((noinline))
|
||||
int f0(int var, struct __sk_buff *skb)
|
||||
{
|
||||
asm volatile ("");
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
51
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
Normal file
51
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
Normal file
@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Bytedance */
|
||||
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
|
||||
long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
|
||||
void bpf_cgroup_release(struct cgroup *p) __ksym;
|
||||
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
|
||||
void bpf_task_release(struct task_struct *p) __ksym;
|
||||
|
||||
const volatile int local_pid;
|
||||
const volatile __u64 cgid;
|
||||
int remote_pid;
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
struct cgroup *cgrp = NULL;
|
||||
struct task_struct *acquired;
|
||||
|
||||
if (local_pid != (bpf_get_current_pid_tgid() >> 32))
|
||||
return 0;
|
||||
|
||||
acquired = bpf_task_acquire(task);
|
||||
if (!acquired)
|
||||
return 0;
|
||||
|
||||
if (local_pid == acquired->tgid)
|
||||
goto out;
|
||||
|
||||
cgrp = bpf_cgroup_from_id(cgid);
|
||||
if (!cgrp)
|
||||
goto out;
|
||||
|
||||
if (bpf_task_under_cgroup(acquired, cgrp))
|
||||
remote_pid = acquired->tgid;
|
||||
|
||||
out:
|
||||
if (cgrp)
|
||||
bpf_cgroup_release(cgrp);
|
||||
bpf_task_release(acquired);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
536
tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
Normal file
536
tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
Normal file
@ -0,0 +1,536 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
int vals[] SEC(".data.vals") = {1, 2, 3, 4};
|
||||
|
||||
__naked __noinline __used
|
||||
static unsigned long identity_subprog()
|
||||
{
|
||||
/* the simplest *static* 64-bit identity function */
|
||||
asm volatile (
|
||||
"r0 = r1;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
__noinline __used
|
||||
unsigned long global_identity_subprog(__u64 x)
|
||||
{
|
||||
/* the simplest *global* 64-bit identity function */
|
||||
return x;
|
||||
}
|
||||
|
||||
__naked __noinline __used
|
||||
static unsigned long callback_subprog()
|
||||
{
|
||||
/* the simplest callback function */
|
||||
asm volatile (
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("7: (0f) r1 += r0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
|
||||
__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int subprog_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
/* pass r6 through r1 into subprog to get it back as r0;
|
||||
* this whole chain will have to be marked as precise later
|
||||
*/
|
||||
"r1 = r6;"
|
||||
"call identity_subprog;"
|
||||
/* now use subprog's returned value (which is a
|
||||
* r6 -> r1 -> r0 chain), as index into vals array, forcing
|
||||
* all of that to be known precisely
|
||||
*/
|
||||
"r0 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r0->r1->r6 chain is forced to be precise and has to be
|
||||
* propagated back to the beginning, including through the
|
||||
* subprog call
|
||||
*/
|
||||
"r1 += r0;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("9: (0f) r1 += r0")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
|
||||
__naked int global_subprog_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
/* pass r6 through r1 into subprog to get it back as r0;
|
||||
* given global_identity_subprog is global, precision won't
|
||||
* propagate all the way back to r6
|
||||
*/
|
||||
"r1 = r6;"
|
||||
"call global_identity_subprog;"
|
||||
/* now use subprog's returned value (which is unknown now, so
|
||||
* we need to clamp it), as index into vals array, forcing r0
|
||||
* to be marked precise (with no effect on r6, though)
|
||||
*/
|
||||
"if r0 < %[vals_arr_sz] goto 1f;"
|
||||
"r0 = %[vals_arr_sz] - 1;"
|
||||
"1:"
|
||||
"r0 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r0 is forced to be precise and has to be
|
||||
* propagated back to the global subprog call, but it
|
||||
* shouldn't go all the way to mark r6 as precise
|
||||
*/
|
||||
"r1 += r0;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_const(vals_arr_sz, ARRAY_SIZE(vals))
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("14: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 10")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
|
||||
__msg("mark_precise: frame0: parent state regs=r0 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
|
||||
__naked int callback_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and use result; r0 shouldn't propagate back to
|
||||
* callback_subprog
|
||||
*/
|
||||
"r1 = r6;" /* nr_loops */
|
||||
"r2 = %[callback_subprog];" /* callback_fn */
|
||||
"r3 = 0;" /* callback_ctx */
|
||||
"r4 = 0;" /* flags */
|
||||
"call %[bpf_loop];"
|
||||
|
||||
"r6 = r0;"
|
||||
"if r6 > 3 goto 1f;"
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the bpf_loop() call, but not beyond
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"1:"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_ptr(callback_subprog),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("7: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 7 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
|
||||
__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_callee_saved_reg_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call identity_subprog;"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("7: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 7 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_callee_saved_reg_precise_global(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call global_identity_subprog;"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("12: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 12 first_idx 10")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: parent state regs=r6 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 16 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
__naked int parent_callee_saved_reg_precise_with_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 1;" /* nr_loops */
|
||||
"r2 = %[callback_subprog];" /* callback_fn */
|
||||
"r3 = 0;" /* callback_ctx */
|
||||
"r4 = 0;" /* flags */
|
||||
"call %[bpf_loop];"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) callback call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_ptr(callback_subprog),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("9: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 13 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
|
||||
__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_stack_slot_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill reg */
|
||||
"r6 = 3;"
|
||||
"*(u64 *)(r10 - 8) = r6;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call identity_subprog;"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r6 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("9: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 5 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_stack_slot_precise_global(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill reg */
|
||||
"r6 = 3;"
|
||||
"*(u64 *)(r10 - 8) = r6;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call global_identity_subprog;"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r6 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("14: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 11")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
__naked int parent_stack_slot_precise_with_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill reg */
|
||||
"r6 = 3;"
|
||||
"*(u64 *)(r10 - 8) = r6;"
|
||||
|
||||
/* ensure we have callback frame in jump history */
|
||||
"r1 = r6;" /* nr_loops */
|
||||
"r2 = %[callback_subprog];" /* callback_fn */
|
||||
"r3 = 0;" /* callback_ctx */
|
||||
"r4 = 0;" /* flags */
|
||||
"call %[bpf_loop];"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r6 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_ptr(callback_subprog),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
__noinline __used
|
||||
static __u64 subprog_with_precise_arg(__u64 x)
|
||||
{
|
||||
return vals[x]; /* x is forced to be precise */
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("8: (0f) r2 += r1")
|
||||
__msg("mark_precise: frame1: last_idx 8 first_idx 0")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
|
||||
__naked int subprog_arg_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
"r1 = r6;"
|
||||
/* subprog_with_precise_arg expects its argument to be
|
||||
* precise, so r1->r6 will be marked precise from inside the
|
||||
* subprog
|
||||
*/
|
||||
"call subprog_with_precise_arg;"
|
||||
"r0 += r6;"
|
||||
"exit;"
|
||||
:
|
||||
:
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
/* r1 is pointer to stack slot;
|
||||
* r2 is a register to spill into that slot
|
||||
* subprog also spills r2 into its own stack slot
|
||||
*/
|
||||
__naked __noinline __used
|
||||
static __u64 subprog_spill_reg_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill to parent stack */
|
||||
"*(u64 *)(r1 + 0) = r2;"
|
||||
/* spill to subprog stack (we use -16 offset to avoid
|
||||
* accidental confusion with parent's -8 stack slot in
|
||||
* verifier log output)
|
||||
*/
|
||||
"*(u64 *)(r10 - 16) = r2;"
|
||||
/* use both spills as return result to propagete precision everywhere */
|
||||
"r0 = *(u64 *)(r10 - 16);"
|
||||
"r2 = *(u64 *)(r1 + 0);"
|
||||
"r0 += r2;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
/* precision backtracking can't currently handle stack access not through r10,
|
||||
* so we won't be able to mark stack slot fp-8 as precise, and so will
|
||||
* fallback to forcing all as precise
|
||||
*/
|
||||
__msg("mark_precise: frame0: falling back to forcing all scalars precise")
|
||||
__naked int subprog_spill_into_parent_stack_slot_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 1;"
|
||||
|
||||
/* pass pointer to stack slot and r6 to subprog;
|
||||
* r6 will be marked precise and spilled into fp-8 slot, which
|
||||
* also should be marked precise
|
||||
*/
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"r2 = r6;"
|
||||
"call subprog_spill_reg_precise;"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r7 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r7 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r7 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling subprog call and logic
|
||||
*/
|
||||
"r1 += r7;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6", "r7"
|
||||
);
|
||||
}
|
||||
|
||||
__naked __noinline __used
|
||||
static __u64 subprog_with_checkpoint(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r0 = 0;"
|
||||
/* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */
|
||||
"goto +0;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -77,7 +77,9 @@ int rx(struct xdp_md *ctx)
|
||||
}
|
||||
|
||||
err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
|
||||
if (err)
|
||||
if (!err)
|
||||
meta->xdp_timestamp = bpf_ktime_get_tai_ns();
|
||||
else
|
||||
meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
|
||||
|
||||
err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
|
||||
|
@ -714,7 +714,13 @@ static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
|
||||
|
||||
const char *argp_program_version = "test_progs 0.1";
|
||||
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
|
||||
static const char argp_program_doc[] = "BPF selftests test runner";
|
||||
static const char argp_program_doc[] =
|
||||
"BPF selftests test runner\v"
|
||||
"Options accepting the NAMES parameter take either a comma-separated list\n"
|
||||
"of test names, or a filename prefixed with @. The file contains one name\n"
|
||||
"(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
|
||||
"\n"
|
||||
"These options can be passed repeatedly to read multiple files.\n";
|
||||
|
||||
enum ARG_KEYS {
|
||||
ARG_TEST_NUM = 'n',
|
||||
@ -797,6 +803,7 @@ extern int extra_prog_load_log_flags;
|
||||
static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct test_env *env = state->input;
|
||||
int err = 0;
|
||||
|
||||
switch (key) {
|
||||
case ARG_TEST_NUM: {
|
||||
@ -821,18 +828,28 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
}
|
||||
case ARG_TEST_NAME_GLOB_ALLOWLIST:
|
||||
case ARG_TEST_NAME: {
|
||||
if (parse_test_list(arg,
|
||||
&env->test_selector.whitelist,
|
||||
key == ARG_TEST_NAME_GLOB_ALLOWLIST))
|
||||
return -ENOMEM;
|
||||
if (arg[0] == '@')
|
||||
err = parse_test_list_file(arg + 1,
|
||||
&env->test_selector.whitelist,
|
||||
key == ARG_TEST_NAME_GLOB_ALLOWLIST);
|
||||
else
|
||||
err = parse_test_list(arg,
|
||||
&env->test_selector.whitelist,
|
||||
key == ARG_TEST_NAME_GLOB_ALLOWLIST);
|
||||
|
||||
break;
|
||||
}
|
||||
case ARG_TEST_NAME_GLOB_DENYLIST:
|
||||
case ARG_TEST_NAME_BLACKLIST: {
|
||||
if (parse_test_list(arg,
|
||||
&env->test_selector.blacklist,
|
||||
key == ARG_TEST_NAME_GLOB_DENYLIST))
|
||||
return -ENOMEM;
|
||||
if (arg[0] == '@')
|
||||
err = parse_test_list_file(arg + 1,
|
||||
&env->test_selector.blacklist,
|
||||
key == ARG_TEST_NAME_GLOB_DENYLIST);
|
||||
else
|
||||
err = parse_test_list(arg,
|
||||
&env->test_selector.blacklist,
|
||||
key == ARG_TEST_NAME_GLOB_DENYLIST);
|
||||
|
||||
break;
|
||||
}
|
||||
case ARG_VERIFIER_STATS:
|
||||
@ -900,7 +917,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (C) 2019 Netronome Systems, Inc. */
|
||||
/* Copyright (C) 2020 Facebook, Inc. */
|
||||
#include <ctype.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
@ -70,92 +71,168 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_insert_test(struct test_filter_set *set,
|
||||
char *test_str,
|
||||
char *subtest_str)
|
||||
{
|
||||
struct test_filter *tmp, *test;
|
||||
char **ctmp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < set->cnt; i++) {
|
||||
test = &set->tests[i];
|
||||
|
||||
if (strcmp(test_str, test->name) == 0) {
|
||||
free(test_str);
|
||||
goto subtest;
|
||||
}
|
||||
}
|
||||
|
||||
tmp = realloc(set->tests, sizeof(*test) * (set->cnt + 1));
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
set->tests = tmp;
|
||||
test = &set->tests[set->cnt];
|
||||
|
||||
test->name = test_str;
|
||||
test->subtests = NULL;
|
||||
test->subtest_cnt = 0;
|
||||
|
||||
set->cnt++;
|
||||
|
||||
subtest:
|
||||
if (!subtest_str)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < test->subtest_cnt; i++) {
|
||||
if (strcmp(subtest_str, test->subtests[i]) == 0) {
|
||||
free(subtest_str);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ctmp = realloc(test->subtests,
|
||||
sizeof(*test->subtests) * (test->subtest_cnt + 1));
|
||||
if (!ctmp)
|
||||
return -ENOMEM;
|
||||
|
||||
test->subtests = ctmp;
|
||||
test->subtests[test->subtest_cnt] = subtest_str;
|
||||
|
||||
test->subtest_cnt++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int insert_test(struct test_filter_set *set,
|
||||
char *test_spec,
|
||||
bool is_glob_pattern)
|
||||
{
|
||||
char *pattern, *subtest_str, *ext_test_str, *ext_subtest_str = NULL;
|
||||
int glob_chars = 0;
|
||||
|
||||
if (is_glob_pattern) {
|
||||
pattern = "%s";
|
||||
} else {
|
||||
pattern = "*%s*";
|
||||
glob_chars = 2;
|
||||
}
|
||||
|
||||
subtest_str = strchr(test_spec, '/');
|
||||
if (subtest_str) {
|
||||
*subtest_str = '\0';
|
||||
subtest_str += 1;
|
||||
}
|
||||
|
||||
ext_test_str = malloc(strlen(test_spec) + glob_chars + 1);
|
||||
if (!ext_test_str)
|
||||
goto err;
|
||||
|
||||
sprintf(ext_test_str, pattern, test_spec);
|
||||
|
||||
if (subtest_str) {
|
||||
ext_subtest_str = malloc(strlen(subtest_str) + glob_chars + 1);
|
||||
if (!ext_subtest_str)
|
||||
goto err;
|
||||
|
||||
sprintf(ext_subtest_str, pattern, subtest_str);
|
||||
}
|
||||
|
||||
return do_insert_test(set, ext_test_str, ext_subtest_str);
|
||||
|
||||
err:
|
||||
free(ext_test_str);
|
||||
free(ext_subtest_str);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int parse_test_list_file(const char *path,
|
||||
struct test_filter_set *set,
|
||||
bool is_glob_pattern)
|
||||
{
|
||||
char *buf = NULL, *capture_start, *capture_end, *scan_end;
|
||||
size_t buflen = 0;
|
||||
int err = 0;
|
||||
FILE *f;
|
||||
|
||||
f = fopen(path, "r");
|
||||
if (!f) {
|
||||
err = -errno;
|
||||
fprintf(stderr, "Failed to open '%s': %d\n", path, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
while (getline(&buf, &buflen, f) != -1) {
|
||||
capture_start = buf;
|
||||
|
||||
while (isspace(*capture_start))
|
||||
++capture_start;
|
||||
|
||||
capture_end = capture_start;
|
||||
scan_end = capture_start;
|
||||
|
||||
while (*scan_end && *scan_end != '#') {
|
||||
if (!isspace(*scan_end))
|
||||
capture_end = scan_end;
|
||||
|
||||
++scan_end;
|
||||
}
|
||||
|
||||
if (capture_end == capture_start)
|
||||
continue;
|
||||
|
||||
*(++capture_end) = '\0';
|
||||
|
||||
err = insert_test(set, capture_start, is_glob_pattern);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
int parse_test_list(const char *s,
|
||||
struct test_filter_set *set,
|
||||
bool is_glob_pattern)
|
||||
{
|
||||
char *input, *state = NULL, *next;
|
||||
struct test_filter *tmp, *tests = NULL;
|
||||
int i, j, cnt = 0;
|
||||
char *input, *state = NULL, *test_spec;
|
||||
int err = 0;
|
||||
|
||||
input = strdup(s);
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
||||
while ((next = strtok_r(state ? NULL : input, ",", &state))) {
|
||||
char *subtest_str = strchr(next, '/');
|
||||
char *pattern = NULL;
|
||||
int glob_chars = 0;
|
||||
|
||||
tmp = realloc(tests, sizeof(*tests) * (cnt + 1));
|
||||
if (!tmp)
|
||||
goto err;
|
||||
tests = tmp;
|
||||
|
||||
tests[cnt].subtest_cnt = 0;
|
||||
tests[cnt].subtests = NULL;
|
||||
|
||||
if (is_glob_pattern) {
|
||||
pattern = "%s";
|
||||
} else {
|
||||
pattern = "*%s*";
|
||||
glob_chars = 2;
|
||||
}
|
||||
|
||||
if (subtest_str) {
|
||||
char **tmp_subtests = NULL;
|
||||
int subtest_cnt = tests[cnt].subtest_cnt;
|
||||
|
||||
*subtest_str = '\0';
|
||||
subtest_str += 1;
|
||||
tmp_subtests = realloc(tests[cnt].subtests,
|
||||
sizeof(*tmp_subtests) *
|
||||
(subtest_cnt + 1));
|
||||
if (!tmp_subtests)
|
||||
goto err;
|
||||
tests[cnt].subtests = tmp_subtests;
|
||||
|
||||
tests[cnt].subtests[subtest_cnt] =
|
||||
malloc(strlen(subtest_str) + glob_chars + 1);
|
||||
if (!tests[cnt].subtests[subtest_cnt])
|
||||
goto err;
|
||||
sprintf(tests[cnt].subtests[subtest_cnt],
|
||||
pattern,
|
||||
subtest_str);
|
||||
|
||||
tests[cnt].subtest_cnt++;
|
||||
}
|
||||
|
||||
tests[cnt].name = malloc(strlen(next) + glob_chars + 1);
|
||||
if (!tests[cnt].name)
|
||||
goto err;
|
||||
sprintf(tests[cnt].name, pattern, next);
|
||||
|
||||
cnt++;
|
||||
while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) {
|
||||
err = insert_test(set, test_spec, is_glob_pattern);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt));
|
||||
if (!tmp)
|
||||
goto err;
|
||||
|
||||
memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt);
|
||||
set->tests = tmp;
|
||||
set->cnt += cnt;
|
||||
|
||||
free(tests);
|
||||
free(input);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < cnt; i++) {
|
||||
for (j = 0; j < tests[i].subtest_cnt; j++)
|
||||
free(tests[i].subtests[j]);
|
||||
|
||||
free(tests[i].name);
|
||||
}
|
||||
free(tests);
|
||||
free(input);
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
|
||||
|
@ -20,5 +20,8 @@ struct test_filter_set;
|
||||
int parse_test_list(const char *s,
|
||||
struct test_filter_set *test_set,
|
||||
bool is_glob_pattern);
|
||||
int parse_test_list_file(const char *path,
|
||||
struct test_filter_set *test_set,
|
||||
bool is_glob_pattern);
|
||||
|
||||
__u64 read_perf_max_sample_freq(void);
|
||||
|
@ -38,25 +38,24 @@
|
||||
.fixup_map_array_48b = { 1 },
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.errstr =
|
||||
"26: (85) call bpf_probe_read_kernel#113\
|
||||
last_idx 26 first_idx 20\
|
||||
regs=4 stack=0 before 25\
|
||||
regs=4 stack=0 before 24\
|
||||
regs=4 stack=0 before 23\
|
||||
regs=4 stack=0 before 22\
|
||||
regs=4 stack=0 before 20\
|
||||
parent didn't have regs=4 stack=0 marks\
|
||||
last_idx 19 first_idx 10\
|
||||
regs=4 stack=0 before 19\
|
||||
regs=200 stack=0 before 18\
|
||||
regs=300 stack=0 before 17\
|
||||
regs=201 stack=0 before 15\
|
||||
regs=201 stack=0 before 14\
|
||||
regs=200 stack=0 before 13\
|
||||
regs=200 stack=0 before 12\
|
||||
regs=200 stack=0 before 11\
|
||||
regs=200 stack=0 before 10\
|
||||
parent already had regs=0 stack=0 marks",
|
||||
"mark_precise: frame0: last_idx 26 first_idx 20\
|
||||
mark_precise: frame0: regs=r2 stack= before 25\
|
||||
mark_precise: frame0: regs=r2 stack= before 24\
|
||||
mark_precise: frame0: regs=r2 stack= before 23\
|
||||
mark_precise: frame0: regs=r2 stack= before 22\
|
||||
mark_precise: frame0: regs=r2 stack= before 20\
|
||||
mark_precise: frame0: parent state regs=r2 stack=:\
|
||||
mark_precise: frame0: last_idx 19 first_idx 10\
|
||||
mark_precise: frame0: regs=r2 stack= before 19\
|
||||
mark_precise: frame0: regs=r9 stack= before 18\
|
||||
mark_precise: frame0: regs=r8,r9 stack= before 17\
|
||||
mark_precise: frame0: regs=r0,r9 stack= before 15\
|
||||
mark_precise: frame0: regs=r0,r9 stack= before 14\
|
||||
mark_precise: frame0: regs=r9 stack= before 13\
|
||||
mark_precise: frame0: regs=r9 stack= before 12\
|
||||
mark_precise: frame0: regs=r9 stack= before 11\
|
||||
mark_precise: frame0: regs=r9 stack= before 10\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
},
|
||||
{
|
||||
"precise: test 2",
|
||||
@ -100,20 +99,20 @@
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr =
|
||||
"26: (85) call bpf_probe_read_kernel#113\
|
||||
last_idx 26 first_idx 22\
|
||||
regs=4 stack=0 before 25\
|
||||
regs=4 stack=0 before 24\
|
||||
regs=4 stack=0 before 23\
|
||||
regs=4 stack=0 before 22\
|
||||
parent didn't have regs=4 stack=0 marks\
|
||||
last_idx 20 first_idx 20\
|
||||
regs=4 stack=0 before 20\
|
||||
parent didn't have regs=4 stack=0 marks\
|
||||
last_idx 19 first_idx 17\
|
||||
regs=4 stack=0 before 19\
|
||||
regs=200 stack=0 before 18\
|
||||
regs=300 stack=0 before 17\
|
||||
parent already had regs=0 stack=0 marks",
|
||||
mark_precise: frame0: last_idx 26 first_idx 22\
|
||||
mark_precise: frame0: regs=r2 stack= before 25\
|
||||
mark_precise: frame0: regs=r2 stack= before 24\
|
||||
mark_precise: frame0: regs=r2 stack= before 23\
|
||||
mark_precise: frame0: regs=r2 stack= before 22\
|
||||
mark_precise: frame0: parent state regs=r2 stack=:\
|
||||
mark_precise: frame0: last_idx 20 first_idx 20\
|
||||
mark_precise: frame0: regs=r2 stack= before 20\
|
||||
mark_precise: frame0: parent state regs=r2 stack=:\
|
||||
mark_precise: frame0: last_idx 19 first_idx 17\
|
||||
mark_precise: frame0: regs=r2 stack= before 19\
|
||||
mark_precise: frame0: regs=r9 stack= before 18\
|
||||
mark_precise: frame0: regs=r8,r9 stack= before 17\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
},
|
||||
{
|
||||
"precise: cross frame pruning",
|
||||
@ -153,15 +152,16 @@
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr = "5: (2d) if r4 > r0 goto pc+0\
|
||||
last_idx 5 first_idx 5\
|
||||
parent didn't have regs=10 stack=0 marks\
|
||||
last_idx 4 first_idx 2\
|
||||
regs=10 stack=0 before 4\
|
||||
regs=10 stack=0 before 3\
|
||||
regs=0 stack=1 before 2\
|
||||
last_idx 5 first_idx 5\
|
||||
parent didn't have regs=1 stack=0 marks",
|
||||
.errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
|
||||
mark_precise: frame0: parent state regs=r4 stack=:\
|
||||
mark_precise: frame0: last_idx 4 first_idx 2\
|
||||
mark_precise: frame0: regs=r4 stack= before 4\
|
||||
mark_precise: frame0: regs=r4 stack= before 3\
|
||||
mark_precise: frame0: regs= stack=-8 before 2\
|
||||
mark_precise: frame0: falling back to forcing all scalars precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
mark_precise: frame0: last_idx 5 first_idx 5\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.retval = -1,
|
||||
},
|
||||
@ -179,16 +179,19 @@
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr = "last_idx 6 first_idx 6\
|
||||
parent didn't have regs=10 stack=0 marks\
|
||||
last_idx 5 first_idx 3\
|
||||
regs=10 stack=0 before 5\
|
||||
regs=10 stack=0 before 4\
|
||||
regs=0 stack=1 before 3\
|
||||
last_idx 6 first_idx 6\
|
||||
parent didn't have regs=1 stack=0 marks\
|
||||
last_idx 5 first_idx 3\
|
||||
regs=1 stack=0 before 5",
|
||||
.errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
|
||||
mark_precise: frame0: parent state regs=r4 stack=:\
|
||||
mark_precise: frame0: last_idx 5 first_idx 3\
|
||||
mark_precise: frame0: regs=r4 stack= before 5\
|
||||
mark_precise: frame0: regs=r4 stack= before 4\
|
||||
mark_precise: frame0: regs= stack=-8 before 3\
|
||||
mark_precise: frame0: falling back to forcing all scalars precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
mark_precise: frame0: last_idx 6 first_idx 6\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.retval = -1,
|
||||
},
|
||||
@ -217,3 +220,39 @@
|
||||
.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"precise: program doesn't prematurely prune branches",
|
||||
.insns = {
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
|
||||
BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||
BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
|
||||
BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
|
||||
BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
|
||||
BPF_LD_MAP_FD(BPF_REG_4, 0),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
|
||||
BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 13 },
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.result = REJECT,
|
||||
.errstr = "register with unbounded min value is not allowed",
|
||||
},
|
||||
|
@ -141,6 +141,7 @@ static struct env {
|
||||
bool verbose;
|
||||
bool debug;
|
||||
bool quiet;
|
||||
bool force_checkpoints;
|
||||
enum resfmt out_fmt;
|
||||
bool show_version;
|
||||
bool comparison_mode;
|
||||
@ -209,6 +210,8 @@ static const struct argp_option opts[] = {
|
||||
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
|
||||
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
|
||||
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
|
||||
{ "test-states", 't', NULL, 0,
|
||||
"Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
|
||||
{ "quiet", 'q', NULL, 0, "Quiet mode" },
|
||||
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
|
||||
{ "sort", 's', "SPEC", 0, "Specify sort order" },
|
||||
@ -284,6 +287,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
argp_usage(state);
|
||||
}
|
||||
break;
|
||||
case 't':
|
||||
env.force_checkpoints = true;
|
||||
break;
|
||||
case 'C':
|
||||
env.comparison_mode = true;
|
||||
break;
|
||||
@ -989,6 +995,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
|
||||
/* increase chances of successful BPF object loading */
|
||||
fixup_obj(obj, prog, base_filename);
|
||||
|
||||
if (env.force_checkpoints)
|
||||
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
env.progs_processed++;
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <net/if.h>
|
||||
#include <poll.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "xdp_metadata.h"
|
||||
|
||||
@ -134,18 +135,52 @@ static void refill_rx(struct xsk *xsk, __u64 addr)
|
||||
}
|
||||
}
|
||||
|
||||
static void verify_xdp_metadata(void *data)
|
||||
#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
|
||||
static __u64 gettime(clockid_t clock_id)
|
||||
{
|
||||
struct timespec t;
|
||||
int res;
|
||||
|
||||
/* See man clock_gettime(2) for type of clock_id's */
|
||||
res = clock_gettime(clock_id, &t);
|
||||
|
||||
if (res < 0)
|
||||
error(res, errno, "Error with clock_gettime()");
|
||||
|
||||
return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
|
||||
}
|
||||
|
||||
static void verify_xdp_metadata(void *data, clockid_t clock_id)
|
||||
{
|
||||
struct xdp_meta *meta;
|
||||
|
||||
meta = data - sizeof(*meta);
|
||||
|
||||
printf("rx_timestamp: %llu\n", meta->rx_timestamp);
|
||||
if (meta->rx_hash_err < 0)
|
||||
printf("No rx_hash err=%d\n", meta->rx_hash_err);
|
||||
else
|
||||
printf("rx_hash: 0x%X with RSS type:0x%X\n",
|
||||
meta->rx_hash, meta->rx_hash_type);
|
||||
|
||||
printf("rx_timestamp: %llu (sec:%0.4f)\n", meta->rx_timestamp,
|
||||
(double)meta->rx_timestamp / NANOSEC_PER_SEC);
|
||||
if (meta->rx_timestamp) {
|
||||
__u64 usr_clock = gettime(clock_id);
|
||||
__u64 xdp_clock = meta->xdp_timestamp;
|
||||
__s64 delta_X = xdp_clock - meta->rx_timestamp;
|
||||
__s64 delta_X2U = usr_clock - xdp_clock;
|
||||
|
||||
printf("XDP RX-time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
|
||||
xdp_clock, (double)xdp_clock / NANOSEC_PER_SEC,
|
||||
(double)delta_X / NANOSEC_PER_SEC,
|
||||
(double)delta_X / 1000);
|
||||
|
||||
printf("AF_XDP time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
|
||||
usr_clock, (double)usr_clock / NANOSEC_PER_SEC,
|
||||
(double)delta_X2U / NANOSEC_PER_SEC,
|
||||
(double)delta_X2U / 1000);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void verify_skb_metadata(int fd)
|
||||
@ -193,7 +228,7 @@ static void verify_skb_metadata(int fd)
|
||||
printf("skb hwtstamp is not found!\n");
|
||||
}
|
||||
|
||||
static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
|
||||
static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id)
|
||||
{
|
||||
const struct xdp_desc *rx_desc;
|
||||
struct pollfd fds[rxq + 1];
|
||||
@ -243,7 +278,8 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
|
||||
addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
|
||||
printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
|
||||
xsk, idx, rx_desc->addr, addr, comp_addr);
|
||||
verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr));
|
||||
verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr),
|
||||
clock_id);
|
||||
xsk_ring_cons__release(&xsk->rx, 1);
|
||||
refill_rx(xsk, comp_addr);
|
||||
}
|
||||
@ -370,6 +406,7 @@ static void timestamping_enable(int fd, int val)
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
clockid_t clock_id = CLOCK_TAI;
|
||||
int server_fd = -1;
|
||||
int ret;
|
||||
int i;
|
||||
@ -443,7 +480,7 @@ int main(int argc, char *argv[])
|
||||
error(1, -ret, "bpf_xdp_attach");
|
||||
|
||||
signal(SIGINT, handle_signal);
|
||||
ret = verify_metadata(rx_xsk, rxq, server_fd);
|
||||
ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id);
|
||||
close(server_fd);
|
||||
cleanup();
|
||||
if (ret)
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
struct xdp_meta {
|
||||
__u64 rx_timestamp;
|
||||
__u64 xdp_timestamp;
|
||||
__u32 rx_hash;
|
||||
union {
|
||||
__u32 rx_hash_type;
|
||||
|
Loading…
Reference in New Issue
Block a user