mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-24 02:24:28 +08:00
netdev
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmWAz2EACgkQ6rmadz2v bToqrw/9EwroZCc8GEHOKAlb/fzrMvn92rLo0ZW/cGN84QJPnx4zM6Zo0+fgLaaN oqqztwMUwdzGC3uX3FfVXaaLKbJ/MeHeL9BXFZNW8zkRHciw4R7kIBhOdPnHyET7 uT+rQ4xPe1Mt7e9PjepKlSL5mEsxWfBkdUgsdn19Z2Vjdfr9mZMhYWYMJGcfTCD1 TwxHKBPhq5fN3IsshmMBB8IrRp1HStUKb65MgZ4dI22LJXxTsFkx5XMFXcmuqvkH NhKj8jDcPEEh31bYcb6aG2Z4onw5F2lquygjk1Qyy5cyw45m/ipJKAXKdAyvJG+R VZCWOET/9wbRwFSK5wxwihCuKghFiofK52i2PcGtXZh0PCouyZZneSJOKM0yVWKO BvuJBxK4ETRnQyN6ZxhuJiEXG3/YMBBhyR2TX1LntVK9ct/k7qFVzATG49J39/sR SYMbptBRj4a5oMJ1qn0nFVEDFkg0jTnTDNnsEpcz60Ayt6EsJ1XosO5yz2huf861 xgRMTKMseyG1/uV45tQ8ZPzbSPpBxjUi9Dl3coYsIm1a+y6clWUXcarONY5KVrpS CR98DuFgl+E7dXuisd/Kz2p2KxxSPq8nytsmLlgOvrUqhwiXqB+TKN8EHgIapVOt l1A5LrzXFTcGlT9MlaWBqEIy83Bu1nqQqbxrAFOE0k8A5jomXaw= =stU2 -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next Alexei Starovoitov says: ==================== pull-request: bpf-next 2023-12-18 This PR is larger than usual and contains changes in various parts of the kernel. The main changes are: 1) Fix kCFI bugs in BPF, from Peter Zijlstra. End result: all forms of indirect calls from BPF into kernel and from kernel into BPF work with CFI enabled. This allows BPF to work with CONFIG_FINEIBT=y. 2) Introduce BPF token object, from Andrii Nakryiko. It adds an ability to delegate a subset of BPF features from privileged daemon (e.g., systemd) through special mount options for userns-bound BPF FS to a trusted unprivileged application. The design accommodates suggestions from Christian Brauner and Paul Moore. Example: $ sudo mkdir -p /sys/fs/bpf/token $ sudo mount -t bpf bpffs /sys/fs/bpf/token \ -o delegate_cmds=prog_load:MAP_CREATE \ -o delegate_progs=kprobe \ -o delegate_attachs=xdp 3) Various verifier improvements and fixes, from Andrii Nakryiko, Andrei Matei. - Complete precision tracking support for register spills - Fix verification of possibly-zero-sized stack accesses - Fix access to uninit stack slots - Track aligned STACK_ZERO cases as imprecise spilled registers. It improves the verifier "instructions processed" metric from single digit to 50-60% for some programs. - Fix verifier retval logic 4) Support for VLAN tag in XDP hints, from Larysa Zaremba. 5) Allocate BPF trampoline via bpf_prog_pack mechanism, from Song Liu. End result: better memory utilization and lower I$ miss for calls to BPF via BPF trampoline. 6) Fix race between BPF prog accessing inner map and parallel delete, from Hou Tao. 7) Add bpf_xdp_get_xfrm_state() kfunc, from Daniel Xu. It allows BPF interact with IPSEC infra. The intent is to support software RSS (via XDP) for the upcoming ipsec pcpu work. Experiments on AWS demonstrate single tunnel pcpu ipsec reaching line rate on 100G ENA nics. 8) Expand bpf_cgrp_storage to support cgroup1 non-attach, from Yafang Shao. 9) BPF file verification via fsverity, from Song Liu. It allows BPF progs get fsverity digest. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (164 commits) bpf: Ensure precise is reset to false in __mark_reg_const_zero() selftests/bpf: Add more uprobe multi fail tests bpf: Fail uprobe multi link with negative offset selftests/bpf: Test the release of map btf s390/bpf: Fix indirect trampoline generation selftests/bpf: Temporarily disable dummy_struct_ops test on s390 x86/cfi,bpf: Fix bpf_exception_cb() signature bpf: Fix dtor CFI cfi: Add CFI_NOSEAL() x86/cfi,bpf: Fix bpf_struct_ops CFI x86/cfi,bpf: Fix bpf_callback_t CFI x86/cfi,bpf: Fix BPF JIT call cfi: Flip headers selftests/bpf: Add test for abnormal cnt during multi-kprobe attachment selftests/bpf: Don't use libbpf_get_error() in kprobe_multi_test selftests/bpf: Add test for abnormal cnt during multi-uprobe attachment bpf: Limit the number of kprobes when attaching program to multiple kprobes bpf: Limit the number of uprobes when attaching program to multiple uprobes bpf: xdp: Register generic_kfunc_set with XDP programs selftests/bpf: utilize string values for delegate_xxx mount options ... ==================== Link: https://lore.kernel.org/r/20231219000520.34178-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
c49b292d03
@ -352,7 +352,7 @@ can be used to query the contents of cpumasks.
|
||||
|
||||
.. kernel-doc:: kernel/bpf/cpumask.c
|
||||
:identifiers: bpf_cpumask_first bpf_cpumask_first_zero bpf_cpumask_first_and
|
||||
bpf_cpumask_test_cpu
|
||||
bpf_cpumask_test_cpu bpf_cpumask_weight
|
||||
|
||||
.. kernel-doc:: kernel/bpf/cpumask.c
|
||||
:identifiers: bpf_cpumask_equal bpf_cpumask_intersects bpf_cpumask_subset
|
||||
|
21
Documentation/bpf/fs_kfuncs.rst
Normal file
21
Documentation/bpf/fs_kfuncs.rst
Normal file
@ -0,0 +1,21 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
.. _fs_kfuncs-header-label:
|
||||
|
||||
=====================
|
||||
BPF filesystem kfuncs
|
||||
=====================
|
||||
|
||||
BPF LSM programs need to access filesystem data from LSM hooks. The following
|
||||
BPF kfuncs can be used to get these data.
|
||||
|
||||
* ``bpf_get_file_xattr()``
|
||||
|
||||
* ``bpf_get_fsverity_digest()``
|
||||
|
||||
To avoid recursions, these kfuncs follow the following rules:
|
||||
|
||||
1. These kfuncs are only permitted from BPF LSM function.
|
||||
2. These kfuncs should not call into other LSM hooks, i.e. security_*(). For
|
||||
example, ``bpf_get_file_xattr()`` does not use ``vfs_getxattr()``, because
|
||||
the latter calls LSM hook ``security_inode_getxattr``.
|
@ -21,6 +21,7 @@ that goes into great technical depth about the BPF Architecture.
|
||||
helpers
|
||||
kfuncs
|
||||
cpumasks
|
||||
fs_kfuncs
|
||||
programs
|
||||
maps
|
||||
bpf_prog_run
|
||||
|
@ -54,6 +54,10 @@ definitions:
|
||||
name: hash
|
||||
doc:
|
||||
Device is capable of exposing receive packet hash via bpf_xdp_metadata_rx_hash().
|
||||
-
|
||||
name: vlan-tag
|
||||
doc:
|
||||
Device is capable of exposing receive packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
|
||||
-
|
||||
type: flags
|
||||
name: xsk-flags
|
||||
|
@ -20,7 +20,13 @@ Currently, the following kfuncs are supported. In the future, as more
|
||||
metadata is supported, this set will grow:
|
||||
|
||||
.. kernel-doc:: net/core/xdp.c
|
||||
:identifiers: bpf_xdp_metadata_rx_timestamp bpf_xdp_metadata_rx_hash
|
||||
:identifiers: bpf_xdp_metadata_rx_timestamp
|
||||
|
||||
.. kernel-doc:: net/core/xdp.c
|
||||
:identifiers: bpf_xdp_metadata_rx_hash
|
||||
|
||||
.. kernel-doc:: net/core/xdp.c
|
||||
:identifiers: bpf_xdp_metadata_rx_vlan_tag
|
||||
|
||||
An XDP program can use these kfuncs to read the metadata into stack
|
||||
variables for its own consumption. Or, to pass the metadata on to other
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==================
|
||||
AF_XDP TX Metadata
|
||||
==================
|
||||
|
@ -1828,7 +1828,7 @@ static void restore_args(struct jit_ctx *ctx, int args_off, int nregs)
|
||||
*
|
||||
*/
|
||||
static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
struct bpf_tramp_links *tlinks, void *orig_call,
|
||||
struct bpf_tramp_links *tlinks, void *func_addr,
|
||||
int nregs, u32 flags)
|
||||
{
|
||||
int i;
|
||||
@ -1926,7 +1926,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
|
||||
if (flags & BPF_TRAMP_F_IP_ARG) {
|
||||
/* save ip address of the traced function */
|
||||
emit_addr_mov_i64(A64_R(10), (const u64)orig_call, ctx);
|
||||
emit_addr_mov_i64(A64_R(10), (const u64)func_addr, ctx);
|
||||
emit(A64_STR64I(A64_R(10), A64_SP, ip_off), ctx);
|
||||
}
|
||||
|
||||
@ -2026,18 +2026,10 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
return ctx->idx;
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *image_end, const struct btf_func_model *m,
|
||||
u32 flags, struct bpf_tramp_links *tlinks,
|
||||
void *orig_call)
|
||||
static int btf_func_model_nregs(const struct btf_func_model *m)
|
||||
{
|
||||
int i, ret;
|
||||
int nregs = m->nr_args;
|
||||
int max_insns = ((long)image_end - (long)image) / AARCH64_INSN_SIZE;
|
||||
struct jit_ctx ctx = {
|
||||
.image = NULL,
|
||||
.idx = 0,
|
||||
};
|
||||
int i;
|
||||
|
||||
/* extra registers needed for struct argument */
|
||||
for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
|
||||
@ -2046,22 +2038,49 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
nregs += (m->arg_size[i] + 7) / 8 - 1;
|
||||
}
|
||||
|
||||
return nregs;
|
||||
}
|
||||
|
||||
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks, void *func_addr)
|
||||
{
|
||||
struct jit_ctx ctx = {
|
||||
.image = NULL,
|
||||
.idx = 0,
|
||||
};
|
||||
struct bpf_tramp_image im;
|
||||
int nregs, ret;
|
||||
|
||||
nregs = btf_func_model_nregs(m);
|
||||
/* the first 8 registers are used for arguments */
|
||||
if (nregs > 8)
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nregs, flags);
|
||||
ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, nregs, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret > max_insns)
|
||||
return -EFBIG;
|
||||
return ret < 0 ? ret : ret * AARCH64_INSN_SIZE;
|
||||
}
|
||||
|
||||
ctx.image = image;
|
||||
ctx.idx = 0;
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *image_end, const struct btf_func_model *m,
|
||||
u32 flags, struct bpf_tramp_links *tlinks,
|
||||
void *func_addr)
|
||||
{
|
||||
int ret, nregs;
|
||||
struct jit_ctx ctx = {
|
||||
.image = image,
|
||||
.idx = 0,
|
||||
};
|
||||
|
||||
nregs = btf_func_model_nregs(m);
|
||||
/* the first 8 registers are used for arguments */
|
||||
if (nregs > 8)
|
||||
return -ENOTSUPP;
|
||||
|
||||
jit_fill_hole(image, (unsigned int)(image_end - image));
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nregs, flags);
|
||||
ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);
|
||||
|
||||
if (ret > 0 && validate_code(&ctx) < 0)
|
||||
ret = -EINVAL;
|
||||
|
@ -7,8 +7,9 @@
|
||||
*
|
||||
* Copyright (C) 2023 Google LLC
|
||||
*/
|
||||
#include <linux/bug.h>
|
||||
|
||||
#include <linux/cfi.h>
|
||||
struct pt_regs;
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
|
||||
|
@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2023 Google LLC
|
||||
*/
|
||||
#include <asm/cfi.h>
|
||||
#include <linux/cfi.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
/*
|
||||
|
@ -1029,6 +1029,21 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks, void *func_addr)
|
||||
{
|
||||
struct bpf_tramp_image im;
|
||||
struct rv_jit_context ctx;
|
||||
int ret;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
ctx.insns = NULL;
|
||||
ctx.ro_insns = NULL;
|
||||
ret = __arch_prepare_bpf_trampoline(&im, m, tlinks, func_addr, flags, &ctx);
|
||||
|
||||
return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns);
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *image_end, const struct btf_func_model *m,
|
||||
u32 flags, struct bpf_tramp_links *tlinks,
|
||||
@ -1037,16 +1052,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
int ret;
|
||||
struct rv_jit_context ctx;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
ctx.insns = NULL;
|
||||
ctx.ro_insns = NULL;
|
||||
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ninsns_rvoff(ret) > (long)image_end - (long)image)
|
||||
return -EFBIG;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
/*
|
||||
* The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the
|
||||
|
@ -2362,7 +2362,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Return to %r14, since func_addr and %r0 are not available. */
|
||||
if (!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK))
|
||||
if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) ||
|
||||
(flags & BPF_TRAMP_F_INDIRECT))
|
||||
flags |= BPF_TRAMP_F_SKIP_FRAME;
|
||||
|
||||
/*
|
||||
@ -2637,6 +2638,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks, void *orig_call)
|
||||
{
|
||||
struct bpf_tramp_image im;
|
||||
struct bpf_tramp_jit tjit;
|
||||
int ret;
|
||||
|
||||
memset(&tjit, 0, sizeof(tjit));
|
||||
|
||||
ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags,
|
||||
tlinks, orig_call);
|
||||
|
||||
return ret < 0 ? ret : tjit.common.prg;
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *image_end, const struct btf_func_model *m,
|
||||
u32 flags, struct bpf_tramp_links *tlinks,
|
||||
@ -2644,30 +2660,27 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
{
|
||||
struct bpf_tramp_jit tjit;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
/* Compute offsets, check whether the code fits. */
|
||||
memset(&tjit, 0, sizeof(tjit));
|
||||
} else {
|
||||
/* Generate the code. */
|
||||
tjit.common.prg = 0;
|
||||
tjit.common.prg_buf = image;
|
||||
}
|
||||
ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
|
||||
tlinks, func_addr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (tjit.common.prg > (char *)image_end - (char *)image)
|
||||
/*
|
||||
* Use the same error code as for exceeding
|
||||
* BPF_MAX_TRAMP_LINKS.
|
||||
*/
|
||||
return -E2BIG;
|
||||
}
|
||||
/* Compute offsets, check whether the code fits. */
|
||||
memset(&tjit, 0, sizeof(tjit));
|
||||
ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
|
||||
tlinks, func_addr);
|
||||
|
||||
return tjit.common.prg;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (tjit.common.prg > (char *)image_end - (char *)image)
|
||||
/*
|
||||
* Use the same error code as for exceeding
|
||||
* BPF_MAX_TRAMP_LINKS.
|
||||
*/
|
||||
return -E2BIG;
|
||||
|
||||
tjit.common.prg = 0;
|
||||
tjit.common.prg_buf = image;
|
||||
ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
|
||||
tlinks, func_addr);
|
||||
|
||||
return ret < 0 ? ret : tjit.common.prg;
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_subprog_tailcalls(void)
|
||||
|
@ -7,16 +7,140 @@
|
||||
*
|
||||
* Copyright (C) 2022 Google LLC
|
||||
*/
|
||||
#include <linux/bug.h>
|
||||
#include <asm/ibt.h>
|
||||
|
||||
#include <linux/cfi.h>
|
||||
/*
|
||||
* An overview of the various calling conventions...
|
||||
*
|
||||
* Traditional:
|
||||
*
|
||||
* foo:
|
||||
* ... code here ...
|
||||
* ret
|
||||
*
|
||||
* direct caller:
|
||||
* call foo
|
||||
*
|
||||
* indirect caller:
|
||||
* lea foo(%rip), %r11
|
||||
* ...
|
||||
* call *%r11
|
||||
*
|
||||
*
|
||||
* IBT:
|
||||
*
|
||||
* foo:
|
||||
* endbr64
|
||||
* ... code here ...
|
||||
* ret
|
||||
*
|
||||
* direct caller:
|
||||
* call foo / call foo+4
|
||||
*
|
||||
* indirect caller:
|
||||
* lea foo(%rip), %r11
|
||||
* ...
|
||||
* call *%r11
|
||||
*
|
||||
*
|
||||
* kCFI:
|
||||
*
|
||||
* __cfi_foo:
|
||||
* movl $0x12345678, %eax
|
||||
* # 11 nops when CONFIG_CALL_PADDING
|
||||
* foo:
|
||||
* endbr64 # when IBT
|
||||
* ... code here ...
|
||||
* ret
|
||||
*
|
||||
* direct call:
|
||||
* call foo # / call foo+4 when IBT
|
||||
*
|
||||
* indirect call:
|
||||
* lea foo(%rip), %r11
|
||||
* ...
|
||||
* movl $(-0x12345678), %r10d
|
||||
* addl -4(%r11), %r10d # -15 when CONFIG_CALL_PADDING
|
||||
* jz 1f
|
||||
* ud2
|
||||
* 1:call *%r11
|
||||
*
|
||||
*
|
||||
* FineIBT (builds as kCFI + CALL_PADDING + IBT + RETPOLINE and runtime patches into):
|
||||
*
|
||||
* __cfi_foo:
|
||||
* endbr64
|
||||
* subl 0x12345678, %r10d
|
||||
* jz foo
|
||||
* ud2
|
||||
* nop
|
||||
* foo:
|
||||
* osp nop3 # was endbr64
|
||||
* ... code here ...
|
||||
* ret
|
||||
*
|
||||
* direct caller:
|
||||
* call foo / call foo+4
|
||||
*
|
||||
* indirect caller:
|
||||
* lea foo(%rip), %r11
|
||||
* ...
|
||||
* movl $0x12345678, %r10d
|
||||
* subl $16, %r11
|
||||
* nop4
|
||||
* call *%r11
|
||||
*
|
||||
*/
|
||||
enum cfi_mode {
|
||||
CFI_DEFAULT, /* FineIBT if hardware has IBT, otherwise kCFI */
|
||||
CFI_OFF, /* Taditional / IBT depending on .config */
|
||||
CFI_KCFI, /* Optionally CALL_PADDING, IBT, RETPOLINE */
|
||||
CFI_FINEIBT, /* see arch/x86/kernel/alternative.c */
|
||||
};
|
||||
|
||||
extern enum cfi_mode cfi_mode;
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
|
||||
#define __bpfcall
|
||||
extern u32 cfi_bpf_hash;
|
||||
extern u32 cfi_bpf_subprog_hash;
|
||||
|
||||
static inline int cfi_get_offset(void)
|
||||
{
|
||||
switch (cfi_mode) {
|
||||
case CFI_FINEIBT:
|
||||
return 16;
|
||||
case CFI_KCFI:
|
||||
if (IS_ENABLED(CONFIG_CALL_PADDING))
|
||||
return 16;
|
||||
return 5;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#define cfi_get_offset cfi_get_offset
|
||||
|
||||
extern u32 cfi_get_func_hash(void *func);
|
||||
|
||||
#else
|
||||
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
|
||||
{
|
||||
return BUG_TRAP_TYPE_NONE;
|
||||
}
|
||||
#define cfi_bpf_hash 0U
|
||||
#define cfi_bpf_subprog_hash 0U
|
||||
static inline u32 cfi_get_func_hash(void *func)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_CFI_CLANG */
|
||||
|
||||
#if HAS_KERNEL_IBT == 1
|
||||
#define CFI_NOSEAL(x) asm(IBT_NOSEAL(__stringify(x)))
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_CFI_H */
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/cfi.h>
|
||||
|
||||
int __read_mostly alternatives_patched;
|
||||
|
||||
@ -832,15 +833,82 @@ void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
|
||||
#endif /* CONFIG_X86_KERNEL_IBT */
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
#define __CFI_DEFAULT CFI_DEFAULT
|
||||
#elif defined(CONFIG_CFI_CLANG)
|
||||
#define __CFI_DEFAULT CFI_KCFI
|
||||
#else
|
||||
#define __CFI_DEFAULT CFI_OFF
|
||||
#endif
|
||||
|
||||
enum cfi_mode {
|
||||
CFI_DEFAULT,
|
||||
CFI_OFF,
|
||||
CFI_KCFI,
|
||||
CFI_FINEIBT,
|
||||
};
|
||||
enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
struct bpf_insn;
|
||||
|
||||
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
|
||||
extern unsigned int __bpf_prog_runX(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
|
||||
/*
|
||||
* Force a reference to the external symbol so the compiler generates
|
||||
* __kcfi_typid.
|
||||
*/
|
||||
__ADDRESSABLE(__bpf_prog_runX);
|
||||
|
||||
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
|
||||
asm (
|
||||
" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
|
||||
" .type cfi_bpf_hash,@object \n"
|
||||
" .globl cfi_bpf_hash \n"
|
||||
" .p2align 2, 0x0 \n"
|
||||
"cfi_bpf_hash: \n"
|
||||
" .long __kcfi_typeid___bpf_prog_runX \n"
|
||||
" .size cfi_bpf_hash, 4 \n"
|
||||
" .popsection \n"
|
||||
);
|
||||
|
||||
/* Must match bpf_callback_t */
|
||||
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
|
||||
|
||||
__ADDRESSABLE(__bpf_callback_fn);
|
||||
|
||||
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
|
||||
asm (
|
||||
" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
|
||||
" .type cfi_bpf_subprog_hash,@object \n"
|
||||
" .globl cfi_bpf_subprog_hash \n"
|
||||
" .p2align 2, 0x0 \n"
|
||||
"cfi_bpf_subprog_hash: \n"
|
||||
" .long __kcfi_typeid___bpf_callback_fn \n"
|
||||
" .size cfi_bpf_subprog_hash, 4 \n"
|
||||
" .popsection \n"
|
||||
);
|
||||
|
||||
u32 cfi_get_func_hash(void *func)
|
||||
{
|
||||
u32 hash;
|
||||
|
||||
func -= cfi_get_offset();
|
||||
switch (cfi_mode) {
|
||||
case CFI_FINEIBT:
|
||||
func += 7;
|
||||
break;
|
||||
case CFI_KCFI:
|
||||
func += 1;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (get_kernel_nofault(hash, func))
|
||||
return 0;
|
||||
|
||||
return hash;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
|
||||
static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT;
|
||||
static bool cfi_rand __ro_after_init = true;
|
||||
static u32 cfi_seed __ro_after_init;
|
||||
|
||||
@ -1149,8 +1217,11 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
goto err;
|
||||
|
||||
if (cfi_rand) {
|
||||
if (builtin)
|
||||
if (builtin) {
|
||||
cfi_seed = get_random_u32();
|
||||
cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
|
||||
cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
|
||||
}
|
||||
|
||||
ret = cfi_rand_preamble(start_cfi, end_cfi);
|
||||
if (ret)
|
||||
|
@ -4,10 +4,10 @@
|
||||
*
|
||||
* Copyright (C) 2022 Google LLC
|
||||
*/
|
||||
#include <asm/cfi.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/cfi.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/insn-eval.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
/*
|
||||
* Returns the target address and the expected type when regs->ip points
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/cfi.h>
|
||||
|
||||
static bool all_callee_regs_used[4] = {true, true, true, true};
|
||||
|
||||
@ -51,9 +52,11 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
|
||||
do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
#define EMIT_ENDBR() EMIT(gen_endbr(), 4)
|
||||
#define EMIT_ENDBR() EMIT(gen_endbr(), 4)
|
||||
#define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
|
||||
#else
|
||||
#define EMIT_ENDBR()
|
||||
#define EMIT_ENDBR_POISON()
|
||||
#endif
|
||||
|
||||
static bool is_imm8(int value)
|
||||
@ -304,6 +307,69 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
/*
|
||||
* Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
|
||||
* in arch/x86/kernel/alternative.c
|
||||
*/
|
||||
|
||||
static void emit_fineibt(u8 **pprog, u32 hash)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
EMIT_ENDBR();
|
||||
EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */
|
||||
EMIT2(0x74, 0x07); /* jz.d8 +7 */
|
||||
EMIT2(0x0f, 0x0b); /* ud2 */
|
||||
EMIT1(0x90); /* nop */
|
||||
EMIT_ENDBR_POISON();
|
||||
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void emit_kcfi(u8 **pprog, u32 hash)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
|
||||
#ifdef CONFIG_CALL_PADDING
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
EMIT1(0x90);
|
||||
#endif
|
||||
EMIT_ENDBR();
|
||||
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void emit_cfi(u8 **pprog, u32 hash)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
switch (cfi_mode) {
|
||||
case CFI_FINEIBT:
|
||||
emit_fineibt(&prog, hash);
|
||||
break;
|
||||
|
||||
case CFI_KCFI:
|
||||
emit_kcfi(&prog, hash);
|
||||
break;
|
||||
|
||||
default:
|
||||
EMIT_ENDBR();
|
||||
break;
|
||||
}
|
||||
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
/*
|
||||
* Emit x86-64 prologue code for BPF program.
|
||||
* bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
|
||||
@ -315,10 +381,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
|
||||
/* BPF trampoline can be made to work without these nops,
|
||||
* but let's waste 5 bytes for now and optimize later
|
||||
*/
|
||||
EMIT_ENDBR();
|
||||
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
|
||||
prog += X86_PATCH_SIZE;
|
||||
if (!ebpf_from_cbpf) {
|
||||
@ -2198,7 +2264,8 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog,
|
||||
|
||||
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_link *l, int stack_size,
|
||||
int run_ctx_off, bool save_ret)
|
||||
int run_ctx_off, bool save_ret,
|
||||
void *image, void *rw_image)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
u8 *jmp_insn;
|
||||
@ -2226,7 +2293,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
else
|
||||
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
|
||||
|
||||
if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
|
||||
if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
|
||||
return -EINVAL;
|
||||
/* remember prog start time returned by __bpf_prog_enter */
|
||||
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
|
||||
@ -2250,7 +2317,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
(long) p->insnsi >> 32,
|
||||
(u32) (long) p->insnsi);
|
||||
/* call JITed bpf program or interpreter */
|
||||
if (emit_rsb_call(&prog, p->bpf_func, prog))
|
||||
if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
@ -2277,7 +2344,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
|
||||
else
|
||||
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
|
||||
if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
|
||||
if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
|
||||
return -EINVAL;
|
||||
|
||||
*pprog = prog;
|
||||
@ -2312,14 +2379,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
|
||||
|
||||
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_links *tl, int stack_size,
|
||||
int run_ctx_off, bool save_ret)
|
||||
int run_ctx_off, bool save_ret,
|
||||
void *image, void *rw_image)
|
||||
{
|
||||
int i;
|
||||
u8 *prog = *pprog;
|
||||
|
||||
for (i = 0; i < tl->nr_links; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
|
||||
run_ctx_off, save_ret))
|
||||
run_ctx_off, save_ret, image, rw_image))
|
||||
return -EINVAL;
|
||||
}
|
||||
*pprog = prog;
|
||||
@ -2328,7 +2396,8 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
|
||||
|
||||
static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_links *tl, int stack_size,
|
||||
int run_ctx_off, u8 **branches)
|
||||
int run_ctx_off, u8 **branches,
|
||||
void *image, void *rw_image)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
int i;
|
||||
@ -2339,7 +2408,8 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
||||
emit_mov_imm32(&prog, false, BPF_REG_0, 0);
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||
for (i = 0; i < tl->nr_links; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
|
||||
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
|
||||
image, rw_image))
|
||||
return -EINVAL;
|
||||
|
||||
/* mod_ret prog stored return value into [rbp - 8]. Emit:
|
||||
@ -2422,10 +2492,11 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
||||
* add rsp, 8 // skip eth_type_trans's frame
|
||||
* ret // return to its caller
|
||||
*/
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *func_addr)
|
||||
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
|
||||
void *rw_image_end, void *image,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *func_addr)
|
||||
{
|
||||
int i, ret, nr_regs = m->nr_args, stack_size = 0;
|
||||
int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
|
||||
@ -2437,10 +2508,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
u8 *prog;
|
||||
bool save_ret;
|
||||
|
||||
/*
|
||||
* F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
|
||||
* explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
|
||||
* because @func_addr.
|
||||
*/
|
||||
WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
|
||||
(flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
|
||||
|
||||
/* extra registers for struct arguments */
|
||||
for (i = 0; i < m->nr_args; i++)
|
||||
for (i = 0; i < m->nr_args; i++) {
|
||||
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
|
||||
nr_regs += (m->arg_size[i] + 7) / 8 - 1;
|
||||
}
|
||||
|
||||
/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
|
||||
* are passed through regs, the remains are through stack.
|
||||
@ -2521,22 +2601,29 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
orig_call += X86_PATCH_SIZE;
|
||||
}
|
||||
|
||||
prog = image;
|
||||
prog = rw_image;
|
||||
|
||||
EMIT_ENDBR();
|
||||
/*
|
||||
* This is the direct-call trampoline, as such it needs accounting
|
||||
* for the __fentry__ call.
|
||||
*/
|
||||
x86_call_depth_emit_accounting(&prog, NULL);
|
||||
if (flags & BPF_TRAMP_F_INDIRECT) {
|
||||
/*
|
||||
* Indirect call for bpf_struct_ops
|
||||
*/
|
||||
emit_cfi(&prog, cfi_get_func_hash(func_addr));
|
||||
} else {
|
||||
/*
|
||||
* Direct-call fentry stub, as such it needs accounting for the
|
||||
* __fentry__ call.
|
||||
*/
|
||||
x86_call_depth_emit_accounting(&prog, NULL);
|
||||
}
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
if (!is_imm8(stack_size))
|
||||
if (!is_imm8(stack_size)) {
|
||||
/* sub rsp, stack_size */
|
||||
EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
|
||||
else
|
||||
} else {
|
||||
/* sub rsp, stack_size */
|
||||
EMIT4(0x48, 0x83, 0xEC, stack_size);
|
||||
}
|
||||
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
|
||||
EMIT1(0x50); /* push rax */
|
||||
/* mov QWORD PTR [rbp - rbx_off], rbx */
|
||||
@ -2563,16 +2650,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
/* arg1: mov rdi, im */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
|
||||
if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
|
||||
if (emit_rsb_call(&prog, __bpf_tramp_enter,
|
||||
image + (prog - (u8 *)rw_image))) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (fentry->nr_links)
|
||||
if (fentry->nr_links) {
|
||||
if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
|
||||
flags & BPF_TRAMP_F_RET_FENTRY_RET))
|
||||
flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fmod_ret->nr_links) {
|
||||
branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
|
||||
@ -2581,7 +2670,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
return -ENOMEM;
|
||||
|
||||
if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
|
||||
run_ctx_off, branches)) {
|
||||
run_ctx_off, branches, image, rw_image)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
@ -2591,25 +2680,26 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
restore_regs(m, &prog, regs_off);
|
||||
save_args(m, &prog, arg_stack_off, true);
|
||||
|
||||
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
|
||||
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
|
||||
/* Before calling the original function, restore the
|
||||
* tail_call_cnt from stack to rax.
|
||||
*/
|
||||
RESTORE_TAIL_CALL_CNT(stack_size);
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_ORIG_STACK) {
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
|
||||
EMIT2(0xff, 0xd3); /* call *rbx */
|
||||
} else {
|
||||
/* call original function */
|
||||
if (emit_rsb_call(&prog, orig_call, prog)) {
|
||||
if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
/* remember return value in a stack for bpf prog to access */
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||
im->ip_after_call = prog;
|
||||
im->ip_after_call = image + (prog - (u8 *)rw_image);
|
||||
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
|
||||
prog += X86_PATCH_SIZE;
|
||||
}
|
||||
@ -2624,16 +2714,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
/* Update the branches saved in invoke_bpf_mod_ret with the
|
||||
* aligned address of do_fexit.
|
||||
*/
|
||||
for (i = 0; i < fmod_ret->nr_links; i++)
|
||||
emit_cond_near_jump(&branches[i], prog, branches[i],
|
||||
X86_JNE);
|
||||
for (i = 0; i < fmod_ret->nr_links; i++) {
|
||||
emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
|
||||
image + (branches[i] - (u8 *)rw_image), X86_JNE);
|
||||
}
|
||||
}
|
||||
|
||||
if (fexit->nr_links)
|
||||
if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
|
||||
if (fexit->nr_links) {
|
||||
if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
|
||||
false, image, rw_image)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_RESTORE_REGS)
|
||||
restore_regs(m, &prog, regs_off);
|
||||
@ -2643,18 +2736,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
* restored to R0.
|
||||
*/
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
im->ip_epilogue = prog;
|
||||
im->ip_epilogue = image + (prog - (u8 *)rw_image);
|
||||
/* arg1: mov rdi, im */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
|
||||
if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
|
||||
if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
|
||||
} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
|
||||
/* Before running the original function, restore the
|
||||
* tail_call_cnt from stack to rax.
|
||||
*/
|
||||
RESTORE_TAIL_CALL_CNT(stack_size);
|
||||
}
|
||||
|
||||
/* restore return value of orig_call or fentry prog back into RAX */
|
||||
if (save_ret)
|
||||
@ -2662,22 +2756,94 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
|
||||
EMIT1(0xC9); /* leave */
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME) {
|
||||
/* skip our return address and return to parent */
|
||||
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
|
||||
emit_return(&prog, prog);
|
||||
}
|
||||
emit_return(&prog, image + (prog - (u8 *)rw_image));
|
||||
/* Make sure the trampoline generation logic doesn't overflow */
|
||||
if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
|
||||
if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
|
||||
ret = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
ret = prog - (u8 *)image;
|
||||
ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
|
||||
|
||||
cleanup:
|
||||
kfree(branches);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *arch_alloc_bpf_trampoline(unsigned int size)
|
||||
{
|
||||
return bpf_prog_pack_alloc(size, jit_fill_hole);
|
||||
}
|
||||
|
||||
void arch_free_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
bpf_prog_pack_free(image, size);
|
||||
}
|
||||
|
||||
void arch_protect_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
}
|
||||
|
||||
void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *func_addr)
|
||||
{
|
||||
void *rw_image, *tmp;
|
||||
int ret;
|
||||
u32 size = image_end - image;
|
||||
|
||||
/* rw_image doesn't need to be in module memory range, so we can
|
||||
* use kvmalloc.
|
||||
*/
|
||||
rw_image = kvmalloc(size, GFP_KERNEL);
|
||||
if (!rw_image)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
|
||||
flags, tlinks, func_addr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
tmp = bpf_arch_text_copy(image, rw_image, size);
|
||||
if (IS_ERR(tmp))
|
||||
ret = PTR_ERR(tmp);
|
||||
out:
|
||||
kvfree(rw_image);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks, void *func_addr)
|
||||
{
|
||||
struct bpf_tramp_image im;
|
||||
void *image;
|
||||
int ret;
|
||||
|
||||
/* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
|
||||
* This will NOT cause fragmentation in direct map, as we do not
|
||||
* call set_memory_*() on this buffer.
|
||||
*
|
||||
* We cannot use kvmalloc here, because we need image to be in
|
||||
* module memory range.
|
||||
*/
|
||||
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!image)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
|
||||
m, flags, tlinks, func_addr);
|
||||
bpf_jit_free_exec(image);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
|
||||
{
|
||||
u8 *jg_reloc, *prog = *pprog;
|
||||
@ -2935,9 +3101,16 @@ out_image:
|
||||
jit_data->header = header;
|
||||
jit_data->rw_header = rw_header;
|
||||
}
|
||||
prog->bpf_func = (void *)image;
|
||||
/*
|
||||
* ctx.prog_offset is used when CFI preambles put code *before*
|
||||
* the function. See emit_cfi(). For FineIBT specifically this code
|
||||
* can also be executed and bpf_prog_kallsyms_add() will
|
||||
* generate an additional symbol to cover this, hence also
|
||||
* decrement proglen.
|
||||
*/
|
||||
prog->bpf_func = (void *)image + cfi_get_offset();
|
||||
prog->jited = 1;
|
||||
prog->jited_len = proglen;
|
||||
prog->jited_len = proglen - cfi_get_offset();
|
||||
} else {
|
||||
prog = orig_prog;
|
||||
}
|
||||
@ -2992,6 +3165,7 @@ void bpf_jit_free(struct bpf_prog *prog)
|
||||
kvfree(jit_data->addrs);
|
||||
kfree(jit_data);
|
||||
}
|
||||
prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
|
||||
hdr = bpf_jit_binary_pack_hdr(prog);
|
||||
bpf_jit_binary_pack_free(hdr, NULL);
|
||||
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
|
||||
|
@ -110,7 +110,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
return &bpf_get_prandom_u32_proto;
|
||||
case BPF_FUNC_trace_printk:
|
||||
if (perfmon_capable())
|
||||
if (bpf_token_capable(prog->aux->token, CAP_PERFMON))
|
||||
return bpf_get_trace_printk_proto();
|
||||
fallthrough;
|
||||
default:
|
||||
|
@ -1008,4 +1008,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
|
||||
set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
|
||||
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
|
||||
}
|
||||
|
||||
extern const struct xdp_metadata_ops ice_xdp_md_ops;
|
||||
#endif /* _ICE_H_ */
|
||||
|
@ -527,6 +527,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
|
||||
{
|
||||
void *ctx_ptr = &ring->pkt_ctx;
|
||||
struct xsk_cb_desc desc = {};
|
||||
|
||||
XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
|
||||
desc.src = &ctx_ptr;
|
||||
desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
|
||||
sizeof(struct xdp_buff);
|
||||
desc.bytes = sizeof(ctx_ptr);
|
||||
xsk_pool_fill_cb(ring->xsk_pool, &desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_rxq - Configure an Rx queue
|
||||
* @ring: the ring being configured
|
||||
@ -561,6 +574,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
if (err)
|
||||
return err;
|
||||
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
|
||||
ice_xsk_pool_fill_cb(ring);
|
||||
|
||||
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
|
||||
ring->q_index);
|
||||
@ -583,6 +597,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
|
||||
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
|
||||
ring->xdp.data = NULL;
|
||||
ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
|
||||
err = ice_setup_rx_ctx(ring);
|
||||
if (err) {
|
||||
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
|
||||
|
@ -673,6 +673,212 @@ struct ice_tlan_ctx {
|
||||
* Use the enum ice_rx_l2_ptype to decode the packet type
|
||||
* ENDIF
|
||||
*/
|
||||
#define ICE_PTYPES \
|
||||
/* L2 Packet types */ \
|
||||
ICE_PTT_UNUSED_ENTRY(0), \
|
||||
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
|
||||
ICE_PTT_UNUSED_ENTRY(2), \
|
||||
ICE_PTT_UNUSED_ENTRY(3), \
|
||||
ICE_PTT_UNUSED_ENTRY(4), \
|
||||
ICE_PTT_UNUSED_ENTRY(5), \
|
||||
ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
|
||||
ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
|
||||
ICE_PTT_UNUSED_ENTRY(8), \
|
||||
ICE_PTT_UNUSED_ENTRY(9), \
|
||||
ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
|
||||
ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
|
||||
ICE_PTT_UNUSED_ENTRY(12), \
|
||||
ICE_PTT_UNUSED_ENTRY(13), \
|
||||
ICE_PTT_UNUSED_ENTRY(14), \
|
||||
ICE_PTT_UNUSED_ENTRY(15), \
|
||||
ICE_PTT_UNUSED_ENTRY(16), \
|
||||
ICE_PTT_UNUSED_ENTRY(17), \
|
||||
ICE_PTT_UNUSED_ENTRY(18), \
|
||||
ICE_PTT_UNUSED_ENTRY(19), \
|
||||
ICE_PTT_UNUSED_ENTRY(20), \
|
||||
ICE_PTT_UNUSED_ENTRY(21), \
|
||||
\
|
||||
/* Non Tunneled IPv4 */ \
|
||||
ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
|
||||
ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
|
||||
ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(25), \
|
||||
ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
|
||||
ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> IPv4 */ \
|
||||
ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(32), \
|
||||
ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> IPv6 */ \
|
||||
ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(39), \
|
||||
ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT */ \
|
||||
ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT --> IPv4 */ \
|
||||
ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(47), \
|
||||
ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT --> IPv6 */ \
|
||||
ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(54), \
|
||||
ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT --> MAC */ \
|
||||
ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
|
||||
ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(62), \
|
||||
ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
|
||||
ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(69), \
|
||||
ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 --> GRE/NAT --> MAC/VLAN */ \
|
||||
ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
|
||||
\
|
||||
/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
|
||||
ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(77), \
|
||||
ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
|
||||
ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(84), \
|
||||
ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* Non Tunneled IPv6 */ \
|
||||
ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
|
||||
ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
|
||||
ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(91), \
|
||||
ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
|
||||
ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> IPv4 */ \
|
||||
ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(98), \
|
||||
ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> IPv6 */ \
|
||||
ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(105), \
|
||||
ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT */ \
|
||||
ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> IPv4 */ \
|
||||
ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(113), \
|
||||
ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> IPv6 */ \
|
||||
ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(120), \
|
||||
ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> MAC */ \
|
||||
ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
|
||||
ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(128), \
|
||||
ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
|
||||
ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(135), \
|
||||
ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> MAC/VLAN */ \
|
||||
ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
|
||||
ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
|
||||
ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
|
||||
ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(143), \
|
||||
ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
|
||||
ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
|
||||
\
|
||||
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
|
||||
ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
|
||||
ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
|
||||
ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
|
||||
ICE_PTT_UNUSED_ENTRY(150), \
|
||||
ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
|
||||
ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
|
||||
ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
#define ICE_NUM_DEFINED_PTYPES 154
|
||||
|
||||
/* macro to make the table lines short, use explicit indexing with [PTYPE] */
|
||||
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
|
||||
@ -695,212 +901,10 @@ struct ice_tlan_ctx {
|
||||
|
||||
/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
|
||||
static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
|
||||
/* L2 Packet types */
|
||||
ICE_PTT_UNUSED_ENTRY(0),
|
||||
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
|
||||
ICE_PTT_UNUSED_ENTRY(2),
|
||||
ICE_PTT_UNUSED_ENTRY(3),
|
||||
ICE_PTT_UNUSED_ENTRY(4),
|
||||
ICE_PTT_UNUSED_ENTRY(5),
|
||||
ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
|
||||
ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
|
||||
ICE_PTT_UNUSED_ENTRY(8),
|
||||
ICE_PTT_UNUSED_ENTRY(9),
|
||||
ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
|
||||
ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
|
||||
ICE_PTT_UNUSED_ENTRY(12),
|
||||
ICE_PTT_UNUSED_ENTRY(13),
|
||||
ICE_PTT_UNUSED_ENTRY(14),
|
||||
ICE_PTT_UNUSED_ENTRY(15),
|
||||
ICE_PTT_UNUSED_ENTRY(16),
|
||||
ICE_PTT_UNUSED_ENTRY(17),
|
||||
ICE_PTT_UNUSED_ENTRY(18),
|
||||
ICE_PTT_UNUSED_ENTRY(19),
|
||||
ICE_PTT_UNUSED_ENTRY(20),
|
||||
ICE_PTT_UNUSED_ENTRY(21),
|
||||
|
||||
/* Non Tunneled IPv4 */
|
||||
ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
|
||||
ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
|
||||
ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(25),
|
||||
ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
|
||||
ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
|
||||
ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> IPv4 */
|
||||
ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(32),
|
||||
ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> IPv6 */
|
||||
ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(39),
|
||||
ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> GRE/NAT */
|
||||
ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
|
||||
|
||||
/* IPv4 --> GRE/NAT --> IPv4 */
|
||||
ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(47),
|
||||
ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> GRE/NAT --> IPv6 */
|
||||
ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(54),
|
||||
ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> GRE/NAT --> MAC */
|
||||
ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
|
||||
|
||||
/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
|
||||
ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(62),
|
||||
ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
|
||||
ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(69),
|
||||
ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 --> GRE/NAT --> MAC/VLAN */
|
||||
ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
|
||||
|
||||
/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
|
||||
ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(77),
|
||||
ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
|
||||
ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(84),
|
||||
ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* Non Tunneled IPv6 */
|
||||
ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
|
||||
ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
|
||||
ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(91),
|
||||
ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
|
||||
ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
|
||||
ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> IPv4 */
|
||||
ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(98),
|
||||
ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> IPv6 */
|
||||
ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(105),
|
||||
ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> GRE/NAT */
|
||||
ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> IPv4 */
|
||||
ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(113),
|
||||
ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> IPv6 */
|
||||
ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(120),
|
||||
ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> MAC */
|
||||
ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
|
||||
ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(128),
|
||||
ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
|
||||
ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(135),
|
||||
ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> MAC/VLAN */
|
||||
ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
|
||||
ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
|
||||
ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
|
||||
ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(143),
|
||||
ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
|
||||
ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
|
||||
ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
|
||||
|
||||
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
|
||||
ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
|
||||
ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
|
||||
ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
|
||||
ICE_PTT_UNUSED_ENTRY(150),
|
||||
ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
|
||||
ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
|
||||
ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
|
||||
ICE_PTYPES
|
||||
|
||||
/* unused entries */
|
||||
[154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
[ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
|
||||
|
@ -3426,6 +3426,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
|
||||
|
||||
netdev->netdev_ops = &ice_netdev_ops;
|
||||
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
|
||||
netdev->xdp_metadata_ops = &ice_xdp_md_ops;
|
||||
ice_set_ethtool_ops(netdev);
|
||||
|
||||
if (vsi->type != ICE_VSI_PF)
|
||||
@ -6093,6 +6094,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
|
||||
return features;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
|
||||
* @vsi: PF's VSI
|
||||
* @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
|
||||
*
|
||||
* Store current stripped VLAN proto in ring packet context,
|
||||
* so it can be accessed more efficiently by packet processing code.
|
||||
*/
|
||||
static void
|
||||
ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
ice_for_each_alloc_rxq(vsi, i)
|
||||
vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
|
||||
* @vsi: PF's VSI
|
||||
@ -6135,6 +6153,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
|
||||
if (strip_err || insert_err)
|
||||
return -EIO;
|
||||
|
||||
ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
|
||||
htons(vlan_ethertype) : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2129,30 +2129,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_ptp_rx_hwtstamp - Check for an Rx timestamp
|
||||
* @rx_ring: Ring to get the VSI info
|
||||
* ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
|
||||
* @rx_desc: Receive descriptor
|
||||
* @skb: Particular skb to send timestamp with
|
||||
* @pkt_ctx: Packet context to get the cached time
|
||||
*
|
||||
* The driver receives a notification in the receive descriptor with timestamp.
|
||||
* The timestamp is in ns, so we must convert the result first.
|
||||
*/
|
||||
void
|
||||
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
|
||||
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
|
||||
const struct ice_pkt_ctx *pkt_ctx)
|
||||
{
|
||||
struct skb_shared_hwtstamps *hwtstamps;
|
||||
u64 ts_ns, cached_time;
|
||||
u32 ts_high;
|
||||
|
||||
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
cached_time = READ_ONCE(rx_ring->cached_phctime);
|
||||
cached_time = READ_ONCE(pkt_ctx->cached_phctime);
|
||||
|
||||
/* Do not report a timestamp if we don't have a cached PHC time */
|
||||
if (!cached_time)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
|
||||
* PHC value, rather than accessing the PF. This also allows us to
|
||||
@ -2163,9 +2159,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
|
||||
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
|
||||
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
|
||||
|
||||
hwtstamps = skb_hwtstamps(skb);
|
||||
memset(hwtstamps, 0, sizeof(*hwtstamps));
|
||||
hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
|
||||
return ts_ns;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -298,9 +298,8 @@ void ice_ptp_extts_event(struct ice_pf *pf);
|
||||
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
|
||||
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
|
||||
|
||||
void
|
||||
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
|
||||
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
|
||||
const struct ice_pkt_ctx *pkt_ctx);
|
||||
void ice_ptp_reset(struct ice_pf *pf);
|
||||
void ice_ptp_prepare_for_reset(struct ice_pf *pf);
|
||||
void ice_ptp_init(struct ice_pf *pf);
|
||||
@ -329,9 +328,14 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void
|
||||
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
|
||||
|
||||
static inline u64
|
||||
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
|
||||
const struct ice_pkt_ctx *pkt_ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ice_ptp_reset(struct ice_pf *pf) { }
|
||||
static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
|
||||
static inline void ice_ptp_init(struct ice_pf *pf) { }
|
||||
|
@ -557,13 +557,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
|
||||
* @xdp_prog: XDP program to run
|
||||
* @xdp_ring: ring to be used for XDP_TX action
|
||||
* @rx_buf: Rx buffer to store the XDP action
|
||||
* @eop_desc: Last descriptor in packet to read metadata from
|
||||
*
|
||||
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
|
||||
*/
|
||||
static void
|
||||
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
|
||||
struct ice_rx_buf *rx_buf)
|
||||
struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
|
||||
{
|
||||
unsigned int ret = ICE_XDP_PASS;
|
||||
u32 act;
|
||||
@ -571,6 +572,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
|
||||
if (!xdp_prog)
|
||||
goto exit;
|
||||
|
||||
ice_xdp_meta_set_desc(xdp, eop_desc);
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
@ -1180,8 +1183,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||
struct sk_buff *skb;
|
||||
unsigned int size;
|
||||
u16 stat_err_bits;
|
||||
u16 vlan_tag = 0;
|
||||
u16 rx_ptype;
|
||||
u16 vlan_tci;
|
||||
|
||||
/* get the Rx desc from Rx ring based on 'next_to_clean' */
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntc);
|
||||
@ -1241,7 +1243,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||
if (ice_is_non_eop(rx_ring, rx_desc))
|
||||
continue;
|
||||
|
||||
ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
|
||||
ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
|
||||
if (rx_buf->act == ICE_XDP_PASS)
|
||||
goto construct_skb;
|
||||
total_rx_bytes += xdp_get_buff_len(xdp);
|
||||
@ -1276,7 +1278,7 @@ construct_skb:
|
||||
continue;
|
||||
}
|
||||
|
||||
vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
|
||||
vlan_tci = ice_get_vlan_tci(rx_desc);
|
||||
|
||||
/* pad the skb if needed, to make a valid ethernet frame */
|
||||
if (eth_skb_pad(skb))
|
||||
@ -1286,14 +1288,11 @@ construct_skb:
|
||||
total_rx_bytes += skb->len;
|
||||
|
||||
/* populate checksum, VLAN, and protocol */
|
||||
rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
|
||||
ICE_RX_FLEX_DESC_PTYPE_M;
|
||||
|
||||
ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
|
||||
ice_process_skb_fields(rx_ring, rx_desc, skb);
|
||||
|
||||
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
|
||||
/* send completed skb up the stack */
|
||||
ice_receive_skb(rx_ring, skb, vlan_tag);
|
||||
ice_receive_skb(rx_ring, skb, vlan_tci);
|
||||
|
||||
/* update budget accounting */
|
||||
total_rx_pkts++;
|
||||
|
@ -257,6 +257,20 @@ enum ice_rx_dtype {
|
||||
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
|
||||
};
|
||||
|
||||
struct ice_pkt_ctx {
|
||||
u64 cached_phctime;
|
||||
__be16 vlan_proto;
|
||||
};
|
||||
|
||||
struct ice_xdp_buff {
|
||||
struct xdp_buff xdp_buff;
|
||||
const union ice_32b_rx_flex_desc *eop_desc;
|
||||
const struct ice_pkt_ctx *pkt_ctx;
|
||||
};
|
||||
|
||||
/* Required for compatibility with xdp_buffs from xsk_pool */
|
||||
static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
|
||||
|
||||
/* indices into GLINT_ITR registers */
|
||||
#define ICE_RX_ITR ICE_IDX_ITR0
|
||||
#define ICE_TX_ITR ICE_IDX_ITR1
|
||||
@ -298,7 +312,6 @@ enum ice_dynamic_itr {
|
||||
/* descriptor ring, associated with a VSI */
|
||||
struct ice_rx_ring {
|
||||
/* CL1 - 1st cacheline starts here */
|
||||
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
|
||||
void *desc; /* Descriptor ring memory */
|
||||
struct device *dev; /* Used for DMA mapping */
|
||||
struct net_device *netdev; /* netdev ring maps to */
|
||||
@ -310,13 +323,24 @@ struct ice_rx_ring {
|
||||
u16 count; /* Number of descriptors */
|
||||
u16 reg_idx; /* HW register index of the ring */
|
||||
u16 next_to_alloc;
|
||||
/* CL2 - 2nd cacheline starts here */
|
||||
|
||||
union {
|
||||
struct ice_rx_buf *rx_buf;
|
||||
struct xdp_buff **xdp_buf;
|
||||
};
|
||||
struct xdp_buff xdp;
|
||||
/* CL2 - 2nd cacheline starts here */
|
||||
union {
|
||||
struct ice_xdp_buff xdp_ext;
|
||||
struct xdp_buff xdp;
|
||||
};
|
||||
/* CL3 - 3rd cacheline starts here */
|
||||
union {
|
||||
struct ice_pkt_ctx pkt_ctx;
|
||||
struct {
|
||||
u64 cached_phctime;
|
||||
__be16 vlan_proto;
|
||||
};
|
||||
};
|
||||
struct bpf_prog *xdp_prog;
|
||||
u16 rx_offset;
|
||||
|
||||
@ -332,9 +356,9 @@ struct ice_rx_ring {
|
||||
/* CL4 - 4th cacheline starts here */
|
||||
struct ice_channel *ch;
|
||||
struct ice_tx_ring *xdp_ring;
|
||||
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
dma_addr_t dma; /* physical address of ring */
|
||||
u64 cached_phctime;
|
||||
u16 rx_buf_len;
|
||||
u8 dcb_tc; /* Traffic class of ring */
|
||||
u8 ptp_rx;
|
||||
|
@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rx_hash - set the hash value in the skb
|
||||
* ice_get_rx_hash - get RX hash value from descriptor
|
||||
* @rx_desc: specific descriptor
|
||||
*
|
||||
* Returns hash, if present, 0 otherwise.
|
||||
*/
|
||||
static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc)
|
||||
{
|
||||
const struct ice_32b_rx_flex_desc_nic *nic_mdid;
|
||||
|
||||
if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC))
|
||||
return 0;
|
||||
|
||||
nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
|
||||
return le32_to_cpu(nic_mdid->rss_hash);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rx_hash_to_skb - set the hash value in the skb
|
||||
* @rx_ring: descriptor ring
|
||||
* @rx_desc: specific descriptor
|
||||
* @skb: pointer to current skb
|
||||
* @rx_ptype: the ptype value from the descriptor
|
||||
*/
|
||||
static void
|
||||
ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb, u16 rx_ptype)
|
||||
ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
|
||||
const union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb, u16 rx_ptype)
|
||||
{
|
||||
struct ice_32b_rx_flex_desc_nic *nic_mdid;
|
||||
u32 hash;
|
||||
|
||||
if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
|
||||
return;
|
||||
|
||||
if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
|
||||
return;
|
||||
|
||||
nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
|
||||
hash = le32_to_cpu(nic_mdid->rss_hash);
|
||||
skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
|
||||
hash = ice_get_rx_hash(rx_desc);
|
||||
if (likely(hash))
|
||||
skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -170,12 +184,39 @@ checksum_fail:
|
||||
ring->vsi->back->hw_csum_rx_error++;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb
|
||||
* @rx_ring: Ring to get the VSI info
|
||||
* @rx_desc: Receive descriptor
|
||||
* @skb: Particular skb to send timestamp with
|
||||
*
|
||||
* The timestamp is in ns, so we must convert the result first.
|
||||
*/
|
||||
static void
|
||||
ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring,
|
||||
const union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
|
||||
|
||||
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_ptype - Read HW packet type from the descriptor
|
||||
* @rx_desc: RX descriptor
|
||||
*/
|
||||
static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc)
|
||||
{
|
||||
return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
|
||||
ICE_RX_FLEX_DESC_PTYPE_M;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
|
||||
* @rx_ring: Rx descriptor ring packet is being transacted on
|
||||
* @rx_desc: pointer to the EOP Rx descriptor
|
||||
* @skb: pointer to current skb being populated
|
||||
* @ptype: the packet type decoded by hardware
|
||||
*
|
||||
* This function checks the ring, descriptor, and packet information in
|
||||
* order to populate the hash, checksum, VLAN, protocol, and
|
||||
@ -184,9 +225,11 @@ checksum_fail:
|
||||
void
|
||||
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb, u16 ptype)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
|
||||
u16 ptype = ice_get_ptype(rx_desc);
|
||||
|
||||
ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
|
||||
|
||||
/* modifies the skb - consumes the enet header */
|
||||
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
||||
@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
|
||||
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
|
||||
|
||||
if (rx_ring->ptp_rx)
|
||||
ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
|
||||
ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_receive_skb - Send a completed packet up the stack
|
||||
* @rx_ring: Rx ring in play
|
||||
* @skb: packet to send up
|
||||
* @vlan_tag: VLAN tag for packet
|
||||
* @vlan_tci: VLAN TCI for packet
|
||||
*
|
||||
* This function sends the completed packet (via. skb) up the stack using
|
||||
* gro receive functions (with/without VLAN tag)
|
||||
*/
|
||||
void
|
||||
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
|
||||
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
|
||||
{
|
||||
netdev_features_t features = rx_ring->netdev->features;
|
||||
bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
|
||||
|
||||
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
|
||||
if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
|
||||
__vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
|
||||
vlan_tci);
|
||||
|
||||
napi_gro_receive(&rx_ring->q_vector->napi, skb);
|
||||
}
|
||||
@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
|
||||
spin_unlock(&xdp_ring->tx_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_rx_hw_ts - HW timestamp XDP hint handler
|
||||
* @ctx: XDP buff pointer
|
||||
* @ts_ns: destination address
|
||||
*
|
||||
* Copy HW timestamp (if available) to the destination address.
|
||||
*/
|
||||
static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
|
||||
{
|
||||
const struct ice_xdp_buff *xdp_ext = (void *)ctx;
|
||||
|
||||
*ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
|
||||
xdp_ext->pkt_ctx);
|
||||
if (!*ts_ns)
|
||||
return -ENODATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Define a ptype index -> XDP hash type lookup table.
|
||||
* It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
|
||||
* avoiding possible copy-paste errors.
|
||||
*/
|
||||
#undef ICE_PTT
|
||||
#undef ICE_PTT_UNUSED_ENTRY
|
||||
|
||||
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
|
||||
[PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
|
||||
|
||||
#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
|
||||
|
||||
/* A few supplementary definitions for when XDP hash types do not coincide
|
||||
* with what can be generated from ptype definitions
|
||||
* by means of preprocessor concatenation.
|
||||
*/
|
||||
#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
|
||||
#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
|
||||
#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
|
||||
#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
|
||||
#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
|
||||
|
||||
static const enum xdp_rss_hash_type
|
||||
ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
|
||||
ICE_PTYPES
|
||||
};
|
||||
|
||||
#undef XDP_RSS_L3_NONE
|
||||
#undef XDP_RSS_L4_NONE
|
||||
#undef XDP_RSS_TYPE_PAY2
|
||||
#undef XDP_RSS_TYPE_PAY3
|
||||
#undef XDP_RSS_TYPE_PAY4
|
||||
|
||||
#undef ICE_PTT
|
||||
#undef ICE_PTT_UNUSED_ENTRY
|
||||
|
||||
/**
|
||||
* ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
|
||||
* @eop_desc: End of Packet descriptor
|
||||
*/
|
||||
static enum xdp_rss_hash_type
|
||||
ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
|
||||
{
|
||||
u16 ptype = ice_get_ptype(eop_desc);
|
||||
|
||||
if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
|
||||
return 0;
|
||||
|
||||
return ice_ptype_to_xdp_hash[ptype];
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_rx_hash - RX hash XDP hint handler
|
||||
* @ctx: XDP buff pointer
|
||||
* @hash: hash destination address
|
||||
* @rss_type: XDP hash type destination address
|
||||
*
|
||||
* Copy RX hash (if available) and its type to the destination address.
|
||||
*/
|
||||
static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type)
|
||||
{
|
||||
const struct ice_xdp_buff *xdp_ext = (void *)ctx;
|
||||
|
||||
*hash = ice_get_rx_hash(xdp_ext->eop_desc);
|
||||
*rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
|
||||
if (!likely(*hash))
|
||||
return -ENODATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler
|
||||
* @ctx: XDP buff pointer
|
||||
* @vlan_proto: destination address for VLAN protocol
|
||||
* @vlan_tci: destination address for VLAN TCI
|
||||
*
|
||||
* Copy VLAN tag (if was stripped) and corresponding protocol
|
||||
* to the destination address.
|
||||
*/
|
||||
static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
|
||||
u16 *vlan_tci)
|
||||
{
|
||||
const struct ice_xdp_buff *xdp_ext = (void *)ctx;
|
||||
|
||||
*vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
|
||||
if (!*vlan_proto)
|
||||
return -ENODATA;
|
||||
|
||||
*vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
|
||||
if (!*vlan_tci)
|
||||
return -ENODATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct xdp_metadata_ops ice_xdp_md_ops = {
|
||||
.xmo_rx_timestamp = ice_xdp_rx_hw_ts,
|
||||
.xmo_rx_hash = ice_xdp_rx_hash,
|
||||
.xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag,
|
||||
};
|
||||
|
@ -84,7 +84,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
|
||||
* ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
|
||||
* @rx_desc: Rx 32b flex descriptor with RXDID=2
|
||||
*
|
||||
* The OS and current PF implementation only support stripping a single VLAN tag
|
||||
@ -92,7 +92,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
|
||||
* one is found return the tag, else return 0 to mean no VLAN tag was found.
|
||||
*/
|
||||
static inline u16
|
||||
ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
|
||||
ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
|
||||
{
|
||||
u16 stat_err_bits;
|
||||
|
||||
@ -148,7 +148,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
|
||||
void
|
||||
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb, u16 ptype);
|
||||
struct sk_buff *skb);
|
||||
void
|
||||
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
|
||||
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
|
||||
|
||||
static inline void
|
||||
ice_xdp_meta_set_desc(struct xdp_buff *xdp,
|
||||
union ice_32b_rx_flex_desc *eop_desc)
|
||||
{
|
||||
struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
|
||||
xdp_buff);
|
||||
|
||||
xdp_ext->eop_desc = eop_desc;
|
||||
}
|
||||
#endif /* !_ICE_TXRX_LIB_H_ */
|
||||
|
@ -458,6 +458,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
|
||||
rx_desc->read.pkt_addr = cpu_to_le64(dma);
|
||||
rx_desc->wb.status_error0 = 0;
|
||||
|
||||
/* Put private info that changes on a per-packet basis
|
||||
* into xdp_buff_xsk->cb.
|
||||
*/
|
||||
ice_xdp_meta_set_desc(*xdp, rx_desc);
|
||||
|
||||
rx_desc++;
|
||||
xdp++;
|
||||
}
|
||||
@ -863,8 +868,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
struct xdp_buff *xdp;
|
||||
struct sk_buff *skb;
|
||||
u16 stat_err_bits;
|
||||
u16 vlan_tag = 0;
|
||||
u16 rx_ptype;
|
||||
u16 vlan_tci;
|
||||
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntc);
|
||||
|
||||
@ -942,13 +946,10 @@ construct_skb:
|
||||
total_rx_bytes += skb->len;
|
||||
total_rx_packets++;
|
||||
|
||||
vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
|
||||
vlan_tci = ice_get_vlan_tci(rx_desc);
|
||||
|
||||
rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
|
||||
ICE_RX_FLEX_DESC_PTYPE_M;
|
||||
|
||||
ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
|
||||
ice_receive_skb(rx_ring, skb, vlan_tag);
|
||||
ice_process_skb_fields(rx_ring, rx_desc, skb);
|
||||
ice_receive_skb(rx_ring, skb, vlan_tci);
|
||||
}
|
||||
|
||||
rx_ring->next_to_clean = ntc;
|
||||
|
@ -256,9 +256,24 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
|
||||
u16 *vlan_tci)
|
||||
{
|
||||
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
|
||||
const struct mlx5_cqe64 *cqe = _ctx->cqe;
|
||||
|
||||
if (!cqe_has_vlan(cqe))
|
||||
return -ENODATA;
|
||||
|
||||
*vlan_proto = htons(ETH_P_8021Q);
|
||||
*vlan_tci = be16_to_cpu(cqe->vlan_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
|
||||
.xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
|
||||
.xmo_rx_hash = mlx5e_xdp_rx_hash,
|
||||
.xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag,
|
||||
};
|
||||
|
||||
struct mlx5e_xsk_tx_complete {
|
||||
|
@ -1723,6 +1723,24 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
|
||||
u16 *vlan_tci)
|
||||
{
|
||||
const struct veth_xdp_buff *_ctx = (void *)ctx;
|
||||
const struct sk_buff *skb = _ctx->skb;
|
||||
int err;
|
||||
|
||||
if (!skb)
|
||||
return -ENODATA;
|
||||
|
||||
err = __vlan_hwaccel_get_tag(skb, vlan_tci);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*vlan_proto = skb->vlan_proto;
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct net_device_ops veth_netdev_ops = {
|
||||
.ndo_init = veth_dev_init,
|
||||
.ndo_open = veth_open,
|
||||
@ -1747,6 +1765,7 @@ static const struct net_device_ops veth_netdev_ops = {
|
||||
static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
|
||||
.xmo_rx_timestamp = veth_xdp_rx_timestamp,
|
||||
.xmo_rx_hash = veth_xdp_rx_hash,
|
||||
.xmo_rx_vlan_tag = veth_xdp_rx_vlan_tag,
|
||||
};
|
||||
|
||||
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
|
||||
|
@ -100,6 +100,16 @@ fsverity_msg(const struct inode *inode, const char *level,
|
||||
#define fsverity_err(inode, fmt, ...) \
|
||||
fsverity_msg((inode), KERN_ERR, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* measure.c */
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
void __init fsverity_init_bpf(void);
|
||||
#else
|
||||
static inline void fsverity_init_bpf(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* open.c */
|
||||
|
||||
int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
|
||||
|
@ -69,6 +69,7 @@ static int __init fsverity_init(void)
|
||||
fsverity_init_workqueue();
|
||||
fsverity_init_sysctl();
|
||||
fsverity_init_signature();
|
||||
fsverity_init_bpf();
|
||||
return 0;
|
||||
}
|
||||
late_initcall(fsverity_init)
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
#include "fsverity_private.h"
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/**
|
||||
@ -100,3 +102,85 @@ int fsverity_get_digest(struct inode *inode,
|
||||
return hash_alg->digest_size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsverity_get_digest);
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
|
||||
/* bpf kfuncs */
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
/**
|
||||
* bpf_get_fsverity_digest: read fsverity digest of file
|
||||
* @file: file to get digest from
|
||||
* @digest_ptr: (out) dynptr for struct fsverity_digest
|
||||
*
|
||||
* Read fsverity_digest of *file* into *digest_ptr*.
|
||||
*
|
||||
* Return: 0 on success, a negative value on error.
|
||||
*/
|
||||
__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_kern *digest_ptr)
|
||||
{
|
||||
const struct inode *inode = file_inode(file);
|
||||
u32 dynptr_sz = __bpf_dynptr_size(digest_ptr);
|
||||
struct fsverity_digest *arg;
|
||||
const struct fsverity_info *vi;
|
||||
const struct fsverity_hash_alg *hash_alg;
|
||||
int out_digest_sz;
|
||||
|
||||
if (dynptr_sz < sizeof(struct fsverity_digest))
|
||||
return -EINVAL;
|
||||
|
||||
arg = __bpf_dynptr_data_rw(digest_ptr, dynptr_sz);
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED((uintptr_t)arg, __alignof__(*arg)))
|
||||
return -EINVAL;
|
||||
|
||||
vi = fsverity_get_info(inode);
|
||||
if (!vi)
|
||||
return -ENODATA; /* not a verity file */
|
||||
|
||||
hash_alg = vi->tree_params.hash_alg;
|
||||
|
||||
arg->digest_algorithm = hash_alg - fsverity_hash_algs;
|
||||
arg->digest_size = hash_alg->digest_size;
|
||||
|
||||
out_digest_sz = dynptr_sz - sizeof(struct fsverity_digest);
|
||||
|
||||
/* copy digest */
|
||||
memcpy(arg->digest, vi->file_digest, min_t(int, hash_alg->digest_size, out_digest_sz));
|
||||
|
||||
/* fill the extra buffer with zeros */
|
||||
if (out_digest_sz > hash_alg->digest_size)
|
||||
memset(arg->digest + arg->digest_size, 0, out_digest_sz - hash_alg->digest_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_SET8_START(fsverity_set_ids)
|
||||
BTF_ID_FLAGS(func, bpf_get_fsverity_digest, KF_TRUSTED_ARGS)
|
||||
BTF_SET8_END(fsverity_set_ids)
|
||||
|
||||
static int bpf_get_fsverity_digest_filter(const struct bpf_prog *prog, u32 kfunc_id)
|
||||
{
|
||||
if (!btf_id_set8_contains(&fsverity_set_ids, kfunc_id))
|
||||
return 0;
|
||||
|
||||
/* Only allow to attach from LSM hooks, to avoid recursion */
|
||||
return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
|
||||
}
|
||||
|
||||
static const struct btf_kfunc_id_set bpf_fsverity_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &fsverity_set_ids,
|
||||
.filter = bpf_get_fsverity_digest_filter,
|
||||
};
|
||||
|
||||
void __init fsverity_init_bpf(void)
|
||||
{
|
||||
register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fsverity_set);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
@ -11,6 +11,7 @@ mandatory-y += bitops.h
|
||||
mandatory-y += bug.h
|
||||
mandatory-y += bugs.h
|
||||
mandatory-y += cacheflush.h
|
||||
mandatory-y += cfi.h
|
||||
mandatory-y += checksum.h
|
||||
mandatory-y += compat.h
|
||||
mandatory-y += current.h
|
||||
|
5
include/asm-generic/cfi.h
Normal file
5
include/asm-generic/cfi.h
Normal file
@ -0,0 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_GENERIC_CFI_H
|
||||
#define __ASM_GENERIC_CFI_H
|
||||
|
||||
#endif /* __ASM_GENERIC_CFI_H */
|
@ -29,6 +29,7 @@
|
||||
#include <linux/rcupdate_trace.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/cfi.h>
|
||||
|
||||
struct bpf_verifier_env;
|
||||
struct bpf_verifier_log;
|
||||
@ -51,6 +52,10 @@ struct module;
|
||||
struct bpf_func_state;
|
||||
struct ftrace_ops;
|
||||
struct cgroup;
|
||||
struct bpf_token;
|
||||
struct user_namespace;
|
||||
struct super_block;
|
||||
struct inode;
|
||||
|
||||
extern struct idr btf_idr;
|
||||
extern spinlock_t btf_idr_lock;
|
||||
@ -106,7 +111,11 @@ struct bpf_map_ops {
|
||||
/* funcs called by prog_array and perf_event_array map */
|
||||
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
|
||||
int fd);
|
||||
void (*map_fd_put_ptr)(void *ptr);
|
||||
/* If need_defer is true, the implementation should guarantee that
|
||||
* the to-be-put element is still alive before the bpf program, which
|
||||
* may manipulate it, exists.
|
||||
*/
|
||||
void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
|
||||
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
|
||||
u32 (*map_fd_sys_lookup_elem)(void *ptr);
|
||||
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
|
||||
@ -272,7 +281,11 @@ struct bpf_map {
|
||||
*/
|
||||
atomic64_t refcnt ____cacheline_aligned;
|
||||
atomic64_t usercnt;
|
||||
struct work_struct work;
|
||||
/* rcu is used before freeing and work is only used during freeing */
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
struct mutex freeze_mutex;
|
||||
atomic64_t writecnt;
|
||||
/* 'Ownership' of program-containing map is claimed by the first program
|
||||
@ -288,6 +301,9 @@ struct bpf_map {
|
||||
} owner;
|
||||
bool bypass_spec_v1;
|
||||
bool frozen; /* write-once; write-protected by freeze_mutex */
|
||||
bool free_after_mult_rcu_gp;
|
||||
bool free_after_rcu_gp;
|
||||
atomic64_t sleepable_refcnt;
|
||||
s64 __percpu *elem_count;
|
||||
};
|
||||
|
||||
@ -1044,6 +1060,17 @@ struct btf_func_model {
|
||||
*/
|
||||
#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
|
||||
|
||||
/*
|
||||
* Indicate the trampoline should be suitable to receive indirect calls;
|
||||
* without this indirectly calling the generated code can result in #UD/#CP,
|
||||
* depending on the CFI options.
|
||||
*
|
||||
* Used by bpf_struct_ops.
|
||||
*
|
||||
* Incompatible with FENTRY usage, overloads @func_addr argument.
|
||||
*/
|
||||
#define BPF_TRAMP_F_INDIRECT BIT(8)
|
||||
|
||||
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
||||
* bytes on x86.
|
||||
*/
|
||||
@ -1083,10 +1110,17 @@ struct bpf_tramp_run_ctx;
|
||||
* fexit = a set of program to run after original function
|
||||
*/
|
||||
struct bpf_tramp_image;
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *orig_call);
|
||||
void *func_addr);
|
||||
void *arch_alloc_bpf_trampoline(unsigned int size);
|
||||
void arch_free_bpf_trampoline(void *image, unsigned int size);
|
||||
void arch_protect_bpf_trampoline(void *image, unsigned int size);
|
||||
void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
|
||||
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks, void *func_addr);
|
||||
|
||||
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
|
||||
@ -1119,6 +1153,7 @@ enum bpf_tramp_prog_type {
|
||||
|
||||
struct bpf_tramp_image {
|
||||
void *image;
|
||||
int size;
|
||||
struct bpf_ksym ksym;
|
||||
struct percpu_ref pcref;
|
||||
void *ip_after_call;
|
||||
@ -1188,7 +1223,11 @@ struct bpf_dispatcher {
|
||||
#endif
|
||||
};
|
||||
|
||||
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
|
||||
#ifndef __bpfcall
|
||||
#define __bpfcall __nocfi
|
||||
#endif
|
||||
|
||||
static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
|
||||
const void *ctx,
|
||||
const struct bpf_insn *insnsi,
|
||||
bpf_func_t bpf_func)
|
||||
@ -1280,7 +1319,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
|
||||
|
||||
#define DEFINE_BPF_DISPATCHER(name) \
|
||||
__BPF_DISPATCHER_SC(name); \
|
||||
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
|
||||
noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
|
||||
const void *ctx, \
|
||||
const struct bpf_insn *insnsi, \
|
||||
bpf_func_t bpf_func) \
|
||||
@ -1303,7 +1342,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
|
||||
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
||||
struct bpf_prog *to);
|
||||
/* Called only from JIT-enabled code, so there's no need for stubs. */
|
||||
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
|
||||
void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
|
||||
void bpf_image_ksym_del(struct bpf_ksym *ksym);
|
||||
void bpf_ksym_add(struct bpf_ksym *ksym);
|
||||
void bpf_ksym_del(struct bpf_ksym *ksym);
|
||||
@ -1430,6 +1469,9 @@ struct bpf_prog_aux {
|
||||
struct bpf_kfunc_desc_tab *kfunc_tab;
|
||||
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
|
||||
u32 size_poke_tab;
|
||||
#ifdef CONFIG_FINEIBT
|
||||
struct bpf_ksym ksym_prefix;
|
||||
#endif
|
||||
struct bpf_ksym ksym;
|
||||
const struct bpf_prog_ops *ops;
|
||||
struct bpf_map **used_maps;
|
||||
@ -1442,10 +1484,11 @@ struct bpf_prog_aux {
|
||||
int cgroup_atype; /* enum cgroup_bpf_attach_type */
|
||||
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
unsigned int (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp);
|
||||
u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
struct bpf_token *token;
|
||||
struct bpf_prog_offload *offload;
|
||||
struct btf *btf;
|
||||
struct bpf_func_info *func_info;
|
||||
@ -1570,6 +1613,31 @@ struct bpf_link_primer {
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct bpf_mount_opts {
|
||||
kuid_t uid;
|
||||
kgid_t gid;
|
||||
umode_t mode;
|
||||
|
||||
/* BPF token-related delegation options */
|
||||
u64 delegate_cmds;
|
||||
u64 delegate_maps;
|
||||
u64 delegate_progs;
|
||||
u64 delegate_attachs;
|
||||
};
|
||||
|
||||
struct bpf_token {
|
||||
struct work_struct work;
|
||||
atomic64_t refcnt;
|
||||
struct user_namespace *userns;
|
||||
u64 allowed_cmds;
|
||||
u64 allowed_maps;
|
||||
u64 allowed_progs;
|
||||
u64 allowed_attachs;
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bpf_struct_ops_value;
|
||||
struct btf_member;
|
||||
|
||||
@ -1640,6 +1708,7 @@ struct bpf_struct_ops {
|
||||
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
|
||||
u32 type_id;
|
||||
u32 value_id;
|
||||
void *cfi_stubs;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
|
||||
@ -1653,6 +1722,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
|
||||
struct bpf_tramp_link *link,
|
||||
const struct btf_func_model *model,
|
||||
void *stub_func,
|
||||
void *image, void *image_end);
|
||||
static inline bool bpf_try_module_get(const void *data, struct module *owner)
|
||||
{
|
||||
@ -2027,6 +2097,7 @@ static inline void bpf_enable_instrumentation(void)
|
||||
migrate_enable();
|
||||
}
|
||||
|
||||
extern const struct super_operations bpf_super_ops;
|
||||
extern const struct file_operations bpf_map_fops;
|
||||
extern const struct file_operations bpf_prog_fops;
|
||||
extern const struct file_operations bpf_iter_fops;
|
||||
@ -2161,24 +2232,26 @@ static inline void bpf_map_dec_elem_count(struct bpf_map *map)
|
||||
|
||||
extern int sysctl_unprivileged_bpf_disabled;
|
||||
|
||||
static inline bool bpf_allow_ptr_leaks(void)
|
||||
bool bpf_token_capable(const struct bpf_token *token, int cap);
|
||||
|
||||
static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
|
||||
{
|
||||
return perfmon_capable();
|
||||
return bpf_token_capable(token, CAP_PERFMON);
|
||||
}
|
||||
|
||||
static inline bool bpf_allow_uninit_stack(void)
|
||||
static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
|
||||
{
|
||||
return perfmon_capable();
|
||||
return bpf_token_capable(token, CAP_PERFMON);
|
||||
}
|
||||
|
||||
static inline bool bpf_bypass_spec_v1(void)
|
||||
static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
|
||||
{
|
||||
return cpu_mitigations_off() || perfmon_capable();
|
||||
return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
|
||||
}
|
||||
|
||||
static inline bool bpf_bypass_spec_v4(void)
|
||||
static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
|
||||
{
|
||||
return cpu_mitigations_off() || perfmon_capable();
|
||||
return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
|
||||
}
|
||||
|
||||
int bpf_map_new_fd(struct bpf_map *map, int flags);
|
||||
@ -2195,8 +2268,21 @@ int bpf_link_new_fd(struct bpf_link *link);
|
||||
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
|
||||
struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
|
||||
|
||||
void bpf_token_inc(struct bpf_token *token);
|
||||
void bpf_token_put(struct bpf_token *token);
|
||||
int bpf_token_create(union bpf_attr *attr);
|
||||
struct bpf_token *bpf_token_get_from_fd(u32 ufd);
|
||||
|
||||
bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
|
||||
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
|
||||
bool bpf_token_allow_prog_type(const struct bpf_token *token,
|
||||
enum bpf_prog_type prog_type,
|
||||
enum bpf_attach_type attach_type);
|
||||
|
||||
int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
|
||||
int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
|
||||
struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
|
||||
umode_t mode);
|
||||
|
||||
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
|
||||
#define DEFINE_BPF_ITER_FUNC(target, args...) \
|
||||
@ -2431,7 +2517,7 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
|
||||
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *regs);
|
||||
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *reg, bool is_ex_cb);
|
||||
struct bpf_reg_state *reg, u32 *nargs);
|
||||
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
|
||||
struct btf *btf, const struct btf_type *t);
|
||||
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
|
||||
@ -2440,7 +2526,8 @@ const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type
|
||||
struct bpf_prog *bpf_prog_by_id(u32 id);
|
||||
struct bpf_link *bpf_link_by_id(u32 id);
|
||||
|
||||
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
|
||||
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
|
||||
const struct bpf_prog *prog);
|
||||
void bpf_task_storage_free(struct task_struct *task);
|
||||
void bpf_cgrp_storage_free(struct cgroup *cgroup);
|
||||
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
|
||||
@ -2559,6 +2646,24 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
|
||||
{
|
||||
return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
|
||||
}
|
||||
|
||||
static inline void bpf_token_inc(struct bpf_token *token)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_token_put(struct bpf_token *token)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline void __dev_flush(void)
|
||||
{
|
||||
}
|
||||
@ -2682,7 +2787,7 @@ static inline int btf_struct_access(struct bpf_verifier_log *log,
|
||||
}
|
||||
|
||||
static inline const struct bpf_func_proto *
|
||||
bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -275,6 +275,11 @@ struct bpf_reference_state {
|
||||
int callback_ref;
|
||||
};
|
||||
|
||||
struct bpf_retval_range {
|
||||
s32 minval;
|
||||
s32 maxval;
|
||||
};
|
||||
|
||||
/* state of the program:
|
||||
* type of all registers and stack info
|
||||
*/
|
||||
@ -297,8 +302,8 @@ struct bpf_func_state {
|
||||
* void foo(void) { bpf_timer_set_callback(,foo); }
|
||||
*/
|
||||
u32 async_entry_cnt;
|
||||
struct bpf_retval_range callback_ret_range;
|
||||
bool in_callback_fn;
|
||||
struct tnum callback_ret_range;
|
||||
bool in_async_callback_fn;
|
||||
bool in_exception_callback_fn;
|
||||
/* For callback calling functions that limit number of possible
|
||||
@ -316,16 +321,48 @@ struct bpf_func_state {
|
||||
/* The following fields should be last. See copy_func_state() */
|
||||
int acquired_refs;
|
||||
struct bpf_reference_state *refs;
|
||||
int allocated_stack;
|
||||
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
|
||||
* (i.e. 8) bytes worth of stack memory.
|
||||
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
|
||||
* stack[1] represents bytes [*(r10-16)..*(r10-9)]
|
||||
* ...
|
||||
* stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
|
||||
*/
|
||||
struct bpf_stack_state *stack;
|
||||
};
|
||||
|
||||
struct bpf_idx_pair {
|
||||
u32 prev_idx;
|
||||
u32 idx;
|
||||
/* Size of the current stack, in bytes. The stack state is tracked below, in
|
||||
* `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
|
||||
*/
|
||||
int allocated_stack;
|
||||
};
|
||||
|
||||
#define MAX_CALL_FRAMES 8
|
||||
|
||||
/* instruction history flags, used in bpf_jmp_history_entry.flags field */
|
||||
enum {
|
||||
/* instruction references stack slot through PTR_TO_STACK register;
|
||||
* we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
|
||||
* and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
|
||||
* 8 bytes per slot, so slot index (spi) is [0, 63])
|
||||
*/
|
||||
INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
|
||||
|
||||
INSN_F_SPI_MASK = 0x3f, /* 6 bits */
|
||||
INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
|
||||
|
||||
INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
|
||||
};
|
||||
|
||||
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
|
||||
static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
|
||||
|
||||
struct bpf_jmp_history_entry {
|
||||
u32 idx;
|
||||
/* insn idx can't be bigger than 1 million */
|
||||
u32 prev_idx : 22;
|
||||
/* special flags, e.g., whether insn is doing register stack spill/load */
|
||||
u32 flags : 10;
|
||||
};
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
|
||||
struct bpf_verifier_state {
|
||||
@ -408,7 +445,7 @@ struct bpf_verifier_state {
|
||||
* For most states jmp_history_cnt is [0-3].
|
||||
* For loops can go up to ~40.
|
||||
*/
|
||||
struct bpf_idx_pair *jmp_history;
|
||||
struct bpf_jmp_history_entry *jmp_history;
|
||||
u32 jmp_history_cnt;
|
||||
u32 dfs_depth;
|
||||
u32 callback_unroll_depth;
|
||||
@ -574,12 +611,12 @@ struct bpf_subprog_info {
|
||||
u32 start; /* insn idx of function entry point */
|
||||
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
|
||||
u16 stack_depth; /* max. stack depth used by this function */
|
||||
bool has_tail_call;
|
||||
bool tail_call_reachable;
|
||||
bool has_ld_abs;
|
||||
bool is_cb;
|
||||
bool is_async_cb;
|
||||
bool is_exception_cb;
|
||||
bool has_tail_call: 1;
|
||||
bool tail_call_reachable: 1;
|
||||
bool has_ld_abs: 1;
|
||||
bool is_cb: 1;
|
||||
bool is_async_cb: 1;
|
||||
bool is_exception_cb: 1;
|
||||
};
|
||||
|
||||
struct bpf_verifier_env;
|
||||
@ -631,6 +668,10 @@ struct bpf_verifier_env {
|
||||
int exception_callback_subprog;
|
||||
bool explore_alu_limits;
|
||||
bool allow_ptr_leaks;
|
||||
/* Allow access to uninitialized stack memory. Writes with fixed offset are
|
||||
* always allowed, so this refers to reads (with fixed or variable offset),
|
||||
* to writes with variable offset and to indirect (helper) accesses.
|
||||
*/
|
||||
bool allow_uninit_stack;
|
||||
bool bpf_capable;
|
||||
bool bypass_spec_v1;
|
||||
@ -651,6 +692,7 @@ struct bpf_verifier_env {
|
||||
int cur_stack;
|
||||
} cfg;
|
||||
struct backtrack_state bt;
|
||||
struct bpf_jmp_history_entry *cur_hist_ent;
|
||||
u32 pass_cnt; /* number of times do_check() was called */
|
||||
u32 subprog_cnt;
|
||||
/* number of instructions analyzed by the verifier */
|
||||
|
@ -9,6 +9,14 @@
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/cfi.h>
|
||||
|
||||
#ifndef cfi_get_offset
|
||||
static inline int cfi_get_offset(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
|
||||
@ -38,4 +46,8 @@ static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
|
||||
#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
#ifndef CFI_NOSEAL
|
||||
#define CFI_NOSEAL(x)
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CFI_H */
|
||||
|
@ -1067,7 +1067,7 @@ struct bpf_binary_header *
|
||||
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
|
||||
|
||||
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
void bpf_prog_pack_free(struct bpf_binary_header *hdr);
|
||||
void bpf_prog_pack_free(void *ptr, u32 size);
|
||||
|
||||
static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
|
||||
{
|
||||
@ -1139,7 +1139,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
|
||||
return false;
|
||||
if (!bpf_jit_harden)
|
||||
return false;
|
||||
if (bpf_jit_harden == 1 && bpf_capable())
|
||||
if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -540,7 +540,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
||||
struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
|
||||
|
||||
if (!eth_type_vlan(veth->h_vlan_proto))
|
||||
return -EINVAL;
|
||||
return -ENODATA;
|
||||
|
||||
*vlan_tci = ntohs(veth->h_vlan_TCI);
|
||||
return 0;
|
||||
@ -561,7 +561,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
|
||||
return 0;
|
||||
} else {
|
||||
*vlan_tci = 0;
|
||||
return -EINVAL;
|
||||
return -ENODATA;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,10 +398,17 @@ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
|
||||
LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
|
||||
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
|
||||
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
|
||||
LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
|
||||
LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
|
||||
LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
|
||||
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
|
||||
LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
|
||||
LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
|
||||
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
|
||||
struct path *path)
|
||||
LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
|
||||
LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
|
||||
LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
|
||||
|
@ -918,7 +918,7 @@ static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
|
||||
return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
|
||||
}
|
||||
|
||||
static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
|
||||
static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return cqe->l4_l3_hdr_type & 0x1;
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sockptr.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
struct linux_binprm;
|
||||
struct cred;
|
||||
@ -2020,15 +2021,22 @@ static inline void securityfs_remove(struct dentry *dentry)
|
||||
union bpf_attr;
|
||||
struct bpf_map;
|
||||
struct bpf_prog;
|
||||
struct bpf_prog_aux;
|
||||
struct bpf_token;
|
||||
#ifdef CONFIG_SECURITY
|
||||
extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
|
||||
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
|
||||
extern int security_bpf_prog(struct bpf_prog *prog);
|
||||
extern int security_bpf_map_alloc(struct bpf_map *map);
|
||||
extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
|
||||
struct bpf_token *token);
|
||||
extern void security_bpf_map_free(struct bpf_map *map);
|
||||
extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
|
||||
extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
|
||||
extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
|
||||
struct bpf_token *token);
|
||||
extern void security_bpf_prog_free(struct bpf_prog *prog);
|
||||
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
|
||||
struct path *path);
|
||||
extern void security_bpf_token_free(struct bpf_token *token);
|
||||
extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
|
||||
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
|
||||
#else
|
||||
static inline int security_bpf(int cmd, union bpf_attr *attr,
|
||||
unsigned int size)
|
||||
@ -2046,7 +2054,8 @@ static inline int security_bpf_prog(struct bpf_prog *prog)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_bpf_map_alloc(struct bpf_map *map)
|
||||
static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -2054,13 +2063,33 @@ static inline int security_bpf_map_alloc(struct bpf_map *map)
|
||||
static inline void security_bpf_map_free(struct bpf_map *map)
|
||||
{ }
|
||||
|
||||
static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
|
||||
static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
|
||||
static inline void security_bpf_prog_free(struct bpf_prog *prog)
|
||||
{ }
|
||||
|
||||
static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
|
||||
struct path *path)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void security_bpf_token_free(struct bpf_token *token)
|
||||
{ }
|
||||
|
||||
static inline int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_bpf_token_capable(const struct bpf_token *token, int cap)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SECURITY */
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
|
@ -4247,10 +4247,13 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
|
||||
{
|
||||
const void *a = skb_metadata_end(skb_a);
|
||||
const void *b = skb_metadata_end(skb_b);
|
||||
/* Using more efficient variant than plain call to memcmp(). */
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
||||
u64 diffs = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
BITS_PER_LONG != 64)
|
||||
goto slow;
|
||||
|
||||
/* Using more efficient variant than plain call to memcmp(). */
|
||||
switch (meta_len) {
|
||||
#define __it(x, op) (x -= sizeof(u##op))
|
||||
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
|
||||
@ -4270,11 +4273,11 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
|
||||
fallthrough;
|
||||
case 4: diffs |= __it_diff(a, b, 32);
|
||||
break;
|
||||
default:
|
||||
slow:
|
||||
return memcmp(a - meta_len, b - meta_len, meta_len);
|
||||
}
|
||||
return diffs;
|
||||
#else
|
||||
return memcmp(a - meta_len, b - meta_len, meta_len);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
|
||||
|
@ -16,7 +16,7 @@
|
||||
*
|
||||
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
|
||||
* level RX-ring queues. It is information that is specific to how
|
||||
* the driver have configured a given RX-ring queue.
|
||||
* the driver has configured a given RX-ring queue.
|
||||
*
|
||||
* Each xdp_buff frame received in the driver carries a (pointer)
|
||||
* reference to this xdp_rxq_info structure. This provides the XDP
|
||||
@ -32,7 +32,7 @@
|
||||
* The struct is not directly tied to the XDP prog. A new XDP prog
|
||||
* can be attached as long as it doesn't change the underlying
|
||||
* RX-ring. If the RX-ring does change significantly, the NIC driver
|
||||
* naturally need to stop the RX-ring before purging and reallocating
|
||||
* naturally needs to stop the RX-ring before purging and reallocating
|
||||
* memory. In that process the driver MUST call unregister (which
|
||||
* also applies for driver shutdown and unload). The register API is
|
||||
* also mandatory during RX-ring setup.
|
||||
@ -369,7 +369,12 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
|
||||
|
||||
static inline bool xdp_metalen_invalid(unsigned long metalen)
|
||||
{
|
||||
return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
|
||||
unsigned long meta_max;
|
||||
|
||||
meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
|
||||
BUILD_BUG_ON(!__builtin_constant_p(meta_max));
|
||||
|
||||
return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
|
||||
}
|
||||
|
||||
struct xdp_attachment_info {
|
||||
@ -399,6 +404,10 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
|
||||
NETDEV_XDP_RX_METADATA_HASH, \
|
||||
bpf_xdp_metadata_rx_hash, \
|
||||
xmo_rx_hash) \
|
||||
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
|
||||
NETDEV_XDP_RX_METADATA_VLAN_TAG, \
|
||||
bpf_xdp_metadata_rx_vlan_tag, \
|
||||
xmo_rx_vlan_tag) \
|
||||
|
||||
enum xdp_rx_metadata {
|
||||
#define XDP_METADATA_KFUNC(name, _, __, ___) name,
|
||||
@ -427,6 +436,7 @@ enum xdp_rss_hash_type {
|
||||
XDP_RSS_L4_UDP = BIT(5),
|
||||
XDP_RSS_L4_SCTP = BIT(6),
|
||||
XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
|
||||
XDP_RSS_L4_ICMP = BIT(8),
|
||||
|
||||
/* Second part: RSS hash type combinations used for driver HW mapping */
|
||||
XDP_RSS_TYPE_NONE = 0,
|
||||
@ -442,11 +452,13 @@ enum xdp_rss_hash_type {
|
||||
XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
|
||||
XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
|
||||
XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
|
||||
XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
|
||||
|
||||
XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
|
||||
XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
|
||||
XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
|
||||
XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
|
||||
XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
|
||||
|
||||
XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
|
||||
XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
|
||||
@ -457,6 +469,8 @@ struct xdp_metadata_ops {
|
||||
int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
|
||||
int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type);
|
||||
int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
|
||||
u16 *vlan_tci);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
|
@ -14,6 +14,12 @@
|
||||
|
||||
#ifdef CONFIG_XDP_SOCKETS
|
||||
|
||||
struct xsk_cb_desc {
|
||||
void *src;
|
||||
u8 off;
|
||||
u8 bytes;
|
||||
};
|
||||
|
||||
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
|
||||
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
|
||||
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
|
||||
@ -47,6 +53,12 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
|
||||
xp_set_rxq_info(pool, rxq);
|
||||
}
|
||||
|
||||
static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
|
||||
struct xsk_cb_desc *desc)
|
||||
{
|
||||
xp_fill_cb(pool, desc);
|
||||
}
|
||||
|
||||
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
|
||||
{
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
@ -274,6 +286,11 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
|
||||
struct xsk_cb_desc *desc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
|
||||
{
|
||||
return 0;
|
||||
|
@ -2190,4 +2190,13 @@ static inline int register_xfrm_interface_bpf(void)
|
||||
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
|
||||
int register_xfrm_state_bpf(void);
|
||||
#else
|
||||
static inline int register_xfrm_state_bpf(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _NET_XFRM_H */
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
struct xsk_buff_pool;
|
||||
struct xdp_rxq_info;
|
||||
struct xsk_cb_desc;
|
||||
struct xsk_queue;
|
||||
struct xdp_desc;
|
||||
struct xdp_umem;
|
||||
@ -135,6 +136,7 @@ static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_p
|
||||
|
||||
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
|
||||
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
|
||||
void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
|
||||
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
|
||||
unsigned long attrs, struct page **pages, u32 nr_pages);
|
||||
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
|
||||
|
@ -847,6 +847,36 @@ union bpf_iter_link_info {
|
||||
* Returns zero on success. On error, -1 is returned and *errno*
|
||||
* is set appropriately.
|
||||
*
|
||||
* BPF_TOKEN_CREATE
|
||||
* Description
|
||||
* Create BPF token with embedded information about what
|
||||
* BPF-related functionality it allows:
|
||||
* - a set of allowed bpf() syscall commands;
|
||||
* - a set of allowed BPF map types to be created with
|
||||
* BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
|
||||
* - a set of allowed BPF program types and BPF program attach
|
||||
* types to be loaded with BPF_PROG_LOAD command, if
|
||||
* BPF_PROG_LOAD itself is allowed.
|
||||
*
|
||||
* BPF token is created (derived) from an instance of BPF FS,
|
||||
* assuming it has necessary delegation mount options specified.
|
||||
* This BPF token can be passed as an extra parameter to various
|
||||
* bpf() syscall commands to grant BPF subsystem functionality to
|
||||
* unprivileged processes.
|
||||
*
|
||||
* When created, BPF token is "associated" with the owning
|
||||
* user namespace of BPF FS instance (super block) that it was
|
||||
* derived from, and subsequent BPF operations performed with
|
||||
* BPF token would be performing capabilities checks (i.e.,
|
||||
* CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
|
||||
* that user namespace. Without BPF token, such capabilities
|
||||
* have to be granted in init user namespace, making bpf()
|
||||
* syscall incompatible with user namespace, for the most part.
|
||||
*
|
||||
* Return
|
||||
* A new file descriptor (a nonnegative integer), or -1 if an
|
||||
* error occurred (in which case, *errno* is set appropriately).
|
||||
*
|
||||
* NOTES
|
||||
* eBPF objects (maps and programs) can be shared between processes.
|
||||
*
|
||||
@ -901,6 +931,8 @@ enum bpf_cmd {
|
||||
BPF_ITER_CREATE,
|
||||
BPF_LINK_DETACH,
|
||||
BPF_PROG_BIND_MAP,
|
||||
BPF_TOKEN_CREATE,
|
||||
__MAX_BPF_CMD,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
@ -951,6 +983,7 @@ enum bpf_map_type {
|
||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
BPF_MAP_TYPE_USER_RINGBUF,
|
||||
BPF_MAP_TYPE_CGRP_STORAGE,
|
||||
__MAX_BPF_MAP_TYPE
|
||||
};
|
||||
|
||||
/* Note that tracing related programs such as
|
||||
@ -995,6 +1028,7 @@ enum bpf_prog_type {
|
||||
BPF_PROG_TYPE_SK_LOOKUP,
|
||||
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
|
||||
BPF_PROG_TYPE_NETFILTER,
|
||||
__MAX_BPF_PROG_TYPE
|
||||
};
|
||||
|
||||
enum bpf_attach_type {
|
||||
@ -1074,9 +1108,11 @@ enum bpf_link_type {
|
||||
BPF_LINK_TYPE_TCX = 11,
|
||||
BPF_LINK_TYPE_UPROBE_MULTI = 12,
|
||||
BPF_LINK_TYPE_NETKIT = 13,
|
||||
MAX_BPF_LINK_TYPE,
|
||||
__MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
|
||||
#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
|
||||
|
||||
enum bpf_perf_event_type {
|
||||
BPF_PERF_EVENT_UNSPEC = 0,
|
||||
BPF_PERF_EVENT_UPROBE = 1,
|
||||
@ -1401,6 +1437,7 @@ union bpf_attr {
|
||||
* to using 5 hash functions).
|
||||
*/
|
||||
__u64 map_extra;
|
||||
__u32 map_token_fd;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||
@ -1470,6 +1507,7 @@ union bpf_attr {
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
__u32 prog_token_fd;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1582,6 +1620,7 @@ union bpf_attr {
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
__u32 btf_token_fd;
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -1712,6 +1751,11 @@ union bpf_attr {
|
||||
__u32 flags; /* extra flags */
|
||||
} prog_bind_map;
|
||||
|
||||
struct { /* struct used by BPF_TOKEN_CREATE command */
|
||||
__u32 flags;
|
||||
__u32 bpffs_fd;
|
||||
} token_create;
|
||||
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* The description below is an attempt at providing documentation to eBPF
|
||||
|
@ -44,10 +44,13 @@ enum netdev_xdp_act {
|
||||
* timestamp via bpf_xdp_metadata_rx_timestamp().
|
||||
* @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
|
||||
* hash via bpf_xdp_metadata_rx_hash().
|
||||
* @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive
|
||||
* packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
|
||||
*/
|
||||
enum netdev_xdp_rx_metadata {
|
||||
NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
|
||||
NETDEV_XDP_RX_METADATA_HASH = 2,
|
||||
NETDEV_XDP_RX_METADATA_VLAN_TAG = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -6,7 +6,7 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
|
||||
endif
|
||||
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
|
||||
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|
||||
|
@ -82,7 +82,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 elem_size, index_mask, max_entries;
|
||||
bool bypass_spec_v1 = bpf_bypass_spec_v1();
|
||||
bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
|
||||
u64 array_size, mask64;
|
||||
struct bpf_array *array;
|
||||
|
||||
@ -867,11 +867,11 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
}
|
||||
|
||||
if (old_ptr)
|
||||
map->ops->map_fd_put_ptr(old_ptr);
|
||||
map->ops->map_fd_put_ptr(map, old_ptr, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
void *old_ptr;
|
||||
@ -890,13 +890,18 @@ static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
}
|
||||
|
||||
if (old_ptr) {
|
||||
map->ops->map_fd_put_ptr(old_ptr);
|
||||
map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
|
||||
return 0;
|
||||
} else {
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
return __fd_array_map_delete_elem(map, key, true);
|
||||
}
|
||||
|
||||
static void *prog_fd_array_get_ptr(struct bpf_map *map,
|
||||
struct file *map_file, int fd)
|
||||
{
|
||||
@ -913,8 +918,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
|
||||
return prog;
|
||||
}
|
||||
|
||||
static void prog_fd_array_put_ptr(void *ptr)
|
||||
static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
|
||||
{
|
||||
/* bpf_prog is freed after one RCU or tasks trace grace period */
|
||||
bpf_prog_put(ptr);
|
||||
}
|
||||
|
||||
@ -924,13 +930,13 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr)
|
||||
}
|
||||
|
||||
/* decrement refcnt of all bpf_progs that are stored in this map */
|
||||
static void bpf_fd_array_map_clear(struct bpf_map *map)
|
||||
static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
fd_array_map_delete_elem(map, &i);
|
||||
__fd_array_map_delete_elem(map, &i, need_defer);
|
||||
}
|
||||
|
||||
static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
|
||||
@ -1071,7 +1077,7 @@ static void prog_array_map_clear_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_map *map = container_of(work, struct bpf_array_aux,
|
||||
work)->map;
|
||||
bpf_fd_array_map_clear(map);
|
||||
bpf_fd_array_map_clear(map, true);
|
||||
bpf_map_put(map);
|
||||
}
|
||||
|
||||
@ -1151,7 +1157,7 @@ static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
|
||||
{
|
||||
struct bpf_event_entry *ee;
|
||||
|
||||
ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
|
||||
ee = kzalloc(sizeof(*ee), GFP_KERNEL);
|
||||
if (ee) {
|
||||
ee->event = perf_file->private_data;
|
||||
ee->perf_file = perf_file;
|
||||
@ -1201,8 +1207,9 @@ err_out:
|
||||
return ee;
|
||||
}
|
||||
|
||||
static void perf_event_fd_array_put_ptr(void *ptr)
|
||||
static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
|
||||
{
|
||||
/* bpf_perf_event is freed after one RCU grace period */
|
||||
bpf_event_entry_free_rcu(ptr);
|
||||
}
|
||||
|
||||
@ -1220,7 +1227,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
|
||||
for (i = 0; i < array->map.max_entries; i++) {
|
||||
ee = READ_ONCE(array->ptrs[i]);
|
||||
if (ee && ee->map_file == map_file)
|
||||
fd_array_map_delete_elem(map, &i);
|
||||
__fd_array_map_delete_elem(map, &i, true);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -1228,7 +1235,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
|
||||
static void perf_event_fd_array_map_free(struct bpf_map *map)
|
||||
{
|
||||
if (map->map_flags & BPF_F_PRESERVE_ELEMS)
|
||||
bpf_fd_array_map_clear(map);
|
||||
bpf_fd_array_map_clear(map, false);
|
||||
fd_array_map_free(map);
|
||||
}
|
||||
|
||||
@ -1256,7 +1263,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
|
||||
return cgroup_get_from_fd(fd);
|
||||
}
|
||||
|
||||
static void cgroup_fd_array_put_ptr(void *ptr)
|
||||
static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
|
||||
{
|
||||
/* cgroup_put free cgrp after a rcu grace period */
|
||||
cgroup_put(ptr);
|
||||
@ -1264,7 +1271,7 @@ static void cgroup_fd_array_put_ptr(void *ptr)
|
||||
|
||||
static void cgroup_fd_array_free(struct bpf_map *map)
|
||||
{
|
||||
bpf_fd_array_map_clear(map);
|
||||
bpf_fd_array_map_clear(map, false);
|
||||
fd_array_map_free(map);
|
||||
}
|
||||
|
||||
@ -1309,7 +1316,7 @@ static void array_of_map_free(struct bpf_map *map)
|
||||
* is protected by fdget/fdput.
|
||||
*/
|
||||
bpf_map_meta_free(map->inner_map_meta);
|
||||
bpf_fd_array_map_clear(map);
|
||||
bpf_fd_array_map_clear(map, false);
|
||||
fd_array_map_free(map);
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
int fd;
|
||||
|
||||
fd = *(int *)key;
|
||||
cgroup = cgroup_get_from_fd(fd);
|
||||
cgroup = cgroup_v1v2_get_from_fd(fd);
|
||||
if (IS_ERR(cgroup))
|
||||
return ERR_CAST(cgroup);
|
||||
|
||||
@ -101,7 +101,7 @@ static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
|
||||
int fd;
|
||||
|
||||
fd = *(int *)key;
|
||||
cgroup = cgroup_get_from_fd(fd);
|
||||
cgroup = cgroup_v1v2_get_from_fd(fd);
|
||||
if (IS_ERR(cgroup))
|
||||
return PTR_ERR(cgroup);
|
||||
|
||||
@ -131,7 +131,7 @@ static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
int err, fd;
|
||||
|
||||
fd = *(int *)key;
|
||||
cgroup = cgroup_get_from_fd(fd);
|
||||
cgroup = cgroup_v1v2_get_from_fd(fd);
|
||||
if (IS_ERR(cgroup))
|
||||
return PTR_ERR(cgroup);
|
||||
|
||||
|
@ -260,9 +260,15 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
BTF_SET_START(sleepable_lsm_hooks)
|
||||
BTF_ID(func, bpf_lsm_bpf)
|
||||
BTF_ID(func, bpf_lsm_bpf_map)
|
||||
BTF_ID(func, bpf_lsm_bpf_map_alloc_security)
|
||||
BTF_ID(func, bpf_lsm_bpf_map_free_security)
|
||||
BTF_ID(func, bpf_lsm_bpf_map_create)
|
||||
BTF_ID(func, bpf_lsm_bpf_map_free)
|
||||
BTF_ID(func, bpf_lsm_bpf_prog)
|
||||
BTF_ID(func, bpf_lsm_bpf_prog_load)
|
||||
BTF_ID(func, bpf_lsm_bpf_prog_free)
|
||||
BTF_ID(func, bpf_lsm_bpf_token_create)
|
||||
BTF_ID(func, bpf_lsm_bpf_token_free)
|
||||
BTF_ID(func, bpf_lsm_bpf_token_cmd)
|
||||
BTF_ID(func, bpf_lsm_bpf_token_capable)
|
||||
BTF_ID(func, bpf_lsm_bprm_check_security)
|
||||
BTF_ID(func, bpf_lsm_bprm_committed_creds)
|
||||
BTF_ID(func, bpf_lsm_bprm_committing_creds)
|
||||
@ -298,6 +304,18 @@ BTF_ID(func, bpf_lsm_kernel_module_request)
|
||||
BTF_ID(func, bpf_lsm_kernel_read_file)
|
||||
BTF_ID(func, bpf_lsm_kernfs_init_security)
|
||||
|
||||
#ifdef CONFIG_SECURITY_PATH
|
||||
BTF_ID(func, bpf_lsm_path_unlink)
|
||||
BTF_ID(func, bpf_lsm_path_mkdir)
|
||||
BTF_ID(func, bpf_lsm_path_rmdir)
|
||||
BTF_ID(func, bpf_lsm_path_truncate)
|
||||
BTF_ID(func, bpf_lsm_path_symlink)
|
||||
BTF_ID(func, bpf_lsm_path_link)
|
||||
BTF_ID(func, bpf_lsm_path_rename)
|
||||
BTF_ID(func, bpf_lsm_path_chmod)
|
||||
BTF_ID(func, bpf_lsm_path_chown)
|
||||
#endif /* CONFIG_SECURITY_PATH */
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
BTF_ID(func, bpf_lsm_key_free)
|
||||
#endif /* CONFIG_KEYS */
|
||||
@ -345,9 +363,8 @@ BTF_ID(func, bpf_lsm_userns_create)
|
||||
BTF_SET_END(sleepable_lsm_hooks)
|
||||
|
||||
BTF_SET_START(untrusted_lsm_hooks)
|
||||
BTF_ID(func, bpf_lsm_bpf_map_free_security)
|
||||
BTF_ID(func, bpf_lsm_bpf_prog_alloc_security)
|
||||
BTF_ID(func, bpf_lsm_bpf_prog_free_security)
|
||||
BTF_ID(func, bpf_lsm_bpf_map_free)
|
||||
BTF_ID(func, bpf_lsm_bpf_prog_free)
|
||||
BTF_ID(func, bpf_lsm_file_alloc_security)
|
||||
BTF_ID(func, bpf_lsm_file_free_security)
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
|
@ -352,18 +352,24 @@ const struct bpf_link_ops bpf_struct_ops_link_lops = {
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
|
||||
struct bpf_tramp_link *link,
|
||||
const struct btf_func_model *model,
|
||||
void *image, void *image_end)
|
||||
void *stub_func, void *image, void *image_end)
|
||||
{
|
||||
u32 flags;
|
||||
u32 flags = BPF_TRAMP_F_INDIRECT;
|
||||
int size;
|
||||
|
||||
tlinks[BPF_TRAMP_FENTRY].links[0] = link;
|
||||
tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
|
||||
/* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
|
||||
* and it must be used alone.
|
||||
*/
|
||||
flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
|
||||
|
||||
if (model->ret_size > 0)
|
||||
flags |= BPF_TRAMP_F_RET_FENTRY_RET;
|
||||
|
||||
size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
|
||||
if (size < 0)
|
||||
return size;
|
||||
if (size > (unsigned long)image_end - (unsigned long)image)
|
||||
return -E2BIG;
|
||||
return arch_prepare_bpf_trampoline(NULL, image, image_end,
|
||||
model, flags, tlinks, NULL);
|
||||
model, flags, tlinks, stub_func);
|
||||
}
|
||||
|
||||
static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
@ -497,11 +503,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
|
||||
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
|
||||
&st_ops->func_models[i],
|
||||
*(void **)(st_ops->cfi_stubs + moff),
|
||||
image, image_end);
|
||||
if (err < 0)
|
||||
goto reset_unlock;
|
||||
|
||||
*(void **)(kdata + moff) = image;
|
||||
*(void **)(kdata + moff) = image + cfi_get_offset();
|
||||
image += err;
|
||||
|
||||
/* put prog_id to udata */
|
||||
@ -515,7 +522,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
if (err)
|
||||
goto reset_unlock;
|
||||
}
|
||||
set_memory_rox((long)st_map->image, 1);
|
||||
arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
|
||||
/* Let bpf_link handle registration & unregistration.
|
||||
*
|
||||
* Pair with smp_load_acquire() during lookup_elem().
|
||||
@ -524,7 +531,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
set_memory_rox((long)st_map->image, 1);
|
||||
arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
|
||||
err = st_ops->reg(kdata);
|
||||
if (likely(!err)) {
|
||||
/* This refcnt increment on the map here after
|
||||
@ -547,8 +554,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
* there was a race in registering the struct_ops (under the same name) to
|
||||
* a sub-system through different struct_ops's maps.
|
||||
*/
|
||||
set_memory_nx((long)st_map->image, 1);
|
||||
set_memory_rw((long)st_map->image, 1);
|
||||
arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
|
||||
|
||||
reset_unlock:
|
||||
bpf_struct_ops_map_put_progs(st_map);
|
||||
@ -616,7 +622,7 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
bpf_struct_ops_map_put_progs(st_map);
|
||||
bpf_map_area_free(st_map->links);
|
||||
if (st_map->image) {
|
||||
bpf_jit_free_exec(st_map->image);
|
||||
arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
}
|
||||
bpf_map_area_free(st_map->uvalue);
|
||||
@ -691,7 +697,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
|
||||
if (!st_map->image) {
|
||||
/* __bpf_struct_ops_map_free() uses st_map->image as flag
|
||||
* for "charged or not". In this case, we need to unchange
|
||||
@ -711,7 +717,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
}
|
||||
|
||||
mutex_init(&st_map->lock);
|
||||
set_vm_flush_reset_perms(st_map->image);
|
||||
bpf_map_init_from_attr(map, attr);
|
||||
|
||||
return map;
|
||||
|
@ -6956,7 +6956,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
|
||||
* (either PTR_TO_CTX or SCALAR_VALUE).
|
||||
*/
|
||||
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *regs, bool is_ex_cb)
|
||||
struct bpf_reg_state *regs, u32 *arg_cnt)
|
||||
{
|
||||
struct bpf_verifier_log *log = &env->log;
|
||||
struct bpf_prog *prog = env->prog;
|
||||
@ -7013,6 +7013,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
tname, nargs, MAX_BPF_FUNC_REG_ARGS);
|
||||
return -EINVAL;
|
||||
}
|
||||
*arg_cnt = nargs;
|
||||
/* check that function returns int, exception cb also requires this */
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
while (btf_type_is_modifier(t))
|
||||
@ -7062,14 +7063,6 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
i, btf_type_str(t), tname);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* We have already ensured that the callback returns an integer, just
|
||||
* like all global subprogs. We need to determine it only has a single
|
||||
* scalar argument.
|
||||
*/
|
||||
if (is_ex_cb && (nargs != 1 || regs[BPF_REG_1].type != SCALAR_VALUE)) {
|
||||
bpf_log(log, "exception cb only supports single integer argument\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1630,7 +1630,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_event_output_data_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2191,7 +2191,7 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_event_output_data_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2348,7 +2348,7 @@ cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_event_output_data_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,6 +121,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||
#endif
|
||||
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
|
||||
#ifdef CONFIG_FINEIBT
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
|
||||
#endif
|
||||
mutex_init(&fp->aux->used_maps_mutex);
|
||||
mutex_init(&fp->aux->dst_mutex);
|
||||
|
||||
@ -679,7 +682,7 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
|
||||
void bpf_prog_kallsyms_add(struct bpf_prog *fp)
|
||||
{
|
||||
if (!bpf_prog_kallsyms_candidate(fp) ||
|
||||
!bpf_capable())
|
||||
!bpf_token_capable(fp->aux->token, CAP_BPF))
|
||||
return;
|
||||
|
||||
bpf_prog_ksym_set_addr(fp);
|
||||
@ -687,6 +690,23 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
|
||||
fp->aux->ksym.prog = true;
|
||||
|
||||
bpf_ksym_add(&fp->aux->ksym);
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
/*
|
||||
* When FineIBT, code in the __cfi_foo() symbols can get executed
|
||||
* and hence unwinder needs help.
|
||||
*/
|
||||
if (cfi_mode != CFI_FINEIBT)
|
||||
return;
|
||||
|
||||
snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
|
||||
"__cfi_%s", fp->aux->ksym.name);
|
||||
|
||||
fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
|
||||
fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
|
||||
|
||||
bpf_ksym_add(&fp->aux->ksym_prefix);
|
||||
#endif
|
||||
}
|
||||
|
||||
void bpf_prog_kallsyms_del(struct bpf_prog *fp)
|
||||
@ -695,6 +715,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp)
|
||||
return;
|
||||
|
||||
bpf_ksym_del(&fp->aux->ksym);
|
||||
#ifdef CONFIG_FINEIBT
|
||||
if (cfi_mode != CFI_FINEIBT)
|
||||
return;
|
||||
bpf_ksym_del(&fp->aux->ksym_prefix);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
|
||||
@ -932,20 +957,20 @@ out:
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void bpf_prog_pack_free(struct bpf_binary_header *hdr)
|
||||
void bpf_prog_pack_free(void *ptr, u32 size)
|
||||
{
|
||||
struct bpf_prog_pack *pack = NULL, *tmp;
|
||||
unsigned int nbits;
|
||||
unsigned long pos;
|
||||
|
||||
mutex_lock(&pack_mutex);
|
||||
if (hdr->size > BPF_PROG_PACK_SIZE) {
|
||||
bpf_jit_free_exec(hdr);
|
||||
if (size > BPF_PROG_PACK_SIZE) {
|
||||
bpf_jit_free_exec(ptr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp, &pack_list, list) {
|
||||
if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
|
||||
if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
|
||||
pack = tmp;
|
||||
break;
|
||||
}
|
||||
@ -954,10 +979,10 @@ void bpf_prog_pack_free(struct bpf_binary_header *hdr)
|
||||
if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
|
||||
goto out;
|
||||
|
||||
nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
|
||||
pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
|
||||
nbits = BPF_PROG_SIZE_TO_NBITS(size);
|
||||
pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
|
||||
|
||||
WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
|
||||
WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
|
||||
"bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
|
||||
|
||||
bitmap_clear(pack->bitmap, pos, nbits);
|
||||
@ -1104,8 +1129,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
|
||||
*rw_header = kvmalloc(size, GFP_KERNEL);
|
||||
if (!*rw_header) {
|
||||
bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
|
||||
bpf_prog_pack_free(ro_header);
|
||||
bpf_prog_pack_free(ro_header, size);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
return NULL;
|
||||
}
|
||||
@ -1136,7 +1160,7 @@ int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
|
||||
kvfree(rw_header);
|
||||
|
||||
if (IS_ERR(ptr)) {
|
||||
bpf_prog_pack_free(ro_header);
|
||||
bpf_prog_pack_free(ro_header, ro_header->size);
|
||||
return PTR_ERR(ptr);
|
||||
}
|
||||
return 0;
|
||||
@ -1157,7 +1181,7 @@ void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
|
||||
{
|
||||
u32 size = ro_header->size;
|
||||
|
||||
bpf_prog_pack_free(ro_header);
|
||||
bpf_prog_pack_free(ro_header, size);
|
||||
kvfree(rw_header);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
}
|
||||
@ -2668,12 +2692,16 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
|
||||
struct bpf_map **used_maps, u32 len)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
bool sleepable;
|
||||
u32 i;
|
||||
|
||||
sleepable = aux->sleepable;
|
||||
for (i = 0; i < len; i++) {
|
||||
map = used_maps[i];
|
||||
if (map->ops->map_poke_untrack)
|
||||
map->ops->map_poke_untrack(map, aux);
|
||||
if (sleepable)
|
||||
atomic64_dec(&map->sleepable_refcnt);
|
||||
bpf_map_put(map);
|
||||
}
|
||||
}
|
||||
@ -2751,6 +2779,7 @@ void bpf_prog_free(struct bpf_prog *fp)
|
||||
|
||||
if (aux->dst_prog)
|
||||
bpf_prog_put(aux->dst_prog);
|
||||
bpf_token_put(aux->token);
|
||||
INIT_WORK(&aux->work, bpf_prog_free_deferred);
|
||||
schedule_work(&aux->work);
|
||||
}
|
||||
|
@ -96,6 +96,12 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
|
||||
migrate_enable();
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask)
|
||||
{
|
||||
bpf_cpumask_release(cpumask);
|
||||
}
|
||||
CFI_NOSEAL(bpf_cpumask_release_dtor);
|
||||
|
||||
/**
|
||||
* bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask.
|
||||
* @cpumask: The cpumask being queried.
|
||||
@ -405,6 +411,17 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
|
||||
return cpumask_any_and_distribute(src1, src2);
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_cpumask_weight() - Return the number of bits in @cpumask.
|
||||
* @cpumask: The cpumask being queried.
|
||||
*
|
||||
* Count the number of set bits in the given cpumask.
|
||||
*/
|
||||
__bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpumask_weight(cpumask);
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_SET8_START(cpumask_kfunc_btf_ids)
|
||||
@ -432,6 +449,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
|
||||
BTF_SET8_END(cpumask_kfunc_btf_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
|
||||
@ -441,7 +459,7 @@ static const struct btf_kfunc_id_set cpumask_kfunc_set = {
|
||||
|
||||
BTF_ID_LIST(cpumask_dtor_ids)
|
||||
BTF_ID(struct, bpf_cpumask)
|
||||
BTF_ID(func, bpf_cpumask_release)
|
||||
BTF_ID(func, bpf_cpumask_release_dtor)
|
||||
|
||||
static int __init cpumask_kfunc_init(void)
|
||||
{
|
||||
|
@ -150,14 +150,11 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
||||
goto out;
|
||||
d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!d->rw_image) {
|
||||
u32 size = PAGE_SIZE;
|
||||
|
||||
bpf_arch_text_copy(d->image, &size, sizeof(size));
|
||||
bpf_prog_pack_free((struct bpf_binary_header *)d->image);
|
||||
bpf_prog_pack_free(d->image, PAGE_SIZE);
|
||||
d->image = NULL;
|
||||
goto out;
|
||||
}
|
||||
bpf_image_ksym_add(d->image, &d->ksym);
|
||||
bpf_image_ksym_add(d->image, PAGE_SIZE, &d->ksym);
|
||||
}
|
||||
|
||||
prev_num_progs = d->num_progs;
|
||||
|
@ -897,7 +897,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
|
||||
|
||||
if (map->ops->map_fd_put_ptr) {
|
||||
ptr = fd_htab_map_get_ptr(map, l);
|
||||
map->ops->map_fd_put_ptr(ptr);
|
||||
map->ops->map_fd_put_ptr(map, ptr, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2484,7 +2484,7 @@ static void fd_htab_map_free(struct bpf_map *map)
|
||||
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
|
||||
void *ptr = fd_htab_map_get_ptr(map, l);
|
||||
|
||||
map->ops->map_fd_put_ptr(ptr);
|
||||
map->ops->map_fd_put_ptr(map, ptr, false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2523,9 +2523,15 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
|
||||
/* The htab bucket lock is always held during update operations in fd
|
||||
* htab map, and the following rcu_read_lock() is only used to avoid
|
||||
* the WARN_ON_ONCE in htab_map_update_elem().
|
||||
*/
|
||||
rcu_read_lock();
|
||||
ret = htab_map_update_elem(map, key, &ptr, map_flags);
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
map->ops->map_fd_put_ptr(ptr);
|
||||
map->ops->map_fd_put_ptr(map, ptr, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -32,12 +32,13 @@
|
||||
*
|
||||
* Different map implementations will rely on rcu in map methods
|
||||
* lookup/update/delete, therefore eBPF programs must run under rcu lock
|
||||
* if program is allowed to access maps, so check rcu_read_lock_held in
|
||||
* all three functions.
|
||||
* if program is allowed to access maps, so check rcu_read_lock_held() or
|
||||
* rcu_read_lock_trace_held() in all three functions.
|
||||
*/
|
||||
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
return (unsigned long) map->ops->map_lookup_elem(map, key);
|
||||
}
|
||||
|
||||
@ -53,7 +54,8 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
|
||||
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
|
||||
void *, value, u64, flags)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
return map->ops->map_update_elem(map, key, value, flags);
|
||||
}
|
||||
|
||||
@ -70,7 +72,8 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
|
||||
|
||||
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
return map->ops->map_delete_elem(map, key);
|
||||
}
|
||||
|
||||
@ -1676,7 +1679,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
|
||||
const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
|
||||
|
||||
const struct bpf_func_proto *
|
||||
bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
@ -1727,7 +1730,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!bpf_capable())
|
||||
if (!bpf_token_capable(prog->aux->token, CAP_BPF))
|
||||
return NULL;
|
||||
|
||||
switch (func_id) {
|
||||
@ -1785,7 +1788,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!perfmon_capable())
|
||||
if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
|
||||
return NULL;
|
||||
|
||||
switch (func_id) {
|
||||
@ -2147,6 +2150,12 @@ __bpf_kfunc void bpf_task_release(struct task_struct *p)
|
||||
put_task_struct_rcu_user(p);
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_task_release_dtor(void *p)
|
||||
{
|
||||
put_task_struct_rcu_user(p);
|
||||
}
|
||||
CFI_NOSEAL(bpf_task_release_dtor);
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
/**
|
||||
* bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
|
||||
@ -2171,6 +2180,12 @@ __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
|
||||
cgroup_put(cgrp);
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
|
||||
{
|
||||
cgroup_put(cgrp);
|
||||
}
|
||||
CFI_NOSEAL(bpf_cgroup_release_dtor);
|
||||
|
||||
/**
|
||||
* bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
|
||||
* array. A cgroup returned by this kfunc which is not subsequently stored in a
|
||||
@ -2522,7 +2537,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
|
||||
* which skips compiler generated instrumentation to do the same.
|
||||
*/
|
||||
kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
|
||||
ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp);
|
||||
ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
|
||||
WARN(1, "A call to BPF exception callback should never return\n");
|
||||
}
|
||||
|
||||
@ -2567,10 +2582,10 @@ static const struct btf_kfunc_id_set generic_kfunc_set = {
|
||||
|
||||
BTF_ID_LIST(generic_dtor_ids)
|
||||
BTF_ID(struct, task_struct)
|
||||
BTF_ID(func, bpf_task_release)
|
||||
BTF_ID(func, bpf_task_release_dtor)
|
||||
#ifdef CONFIG_CGROUPS
|
||||
BTF_ID(struct, cgroup)
|
||||
BTF_ID(func, bpf_cgroup_release)
|
||||
BTF_ID(func, bpf_cgroup_release_dtor)
|
||||
#endif
|
||||
|
||||
BTF_SET8_START(common_btf_ids)
|
||||
@ -2627,6 +2642,7 @@ static int __init kfunc_init(void)
|
||||
|
||||
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
|
||||
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
|
||||
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
|
||||
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
|
||||
ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
|
||||
ARRAY_SIZE(generic_dtors),
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/filter.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <linux/kstrtox.h>
|
||||
#include "preload/bpf_preload.h"
|
||||
|
||||
enum bpf_type {
|
||||
@ -98,9 +99,9 @@ static const struct inode_operations bpf_prog_iops = { };
|
||||
static const struct inode_operations bpf_map_iops = { };
|
||||
static const struct inode_operations bpf_link_iops = { };
|
||||
|
||||
static struct inode *bpf_get_inode(struct super_block *sb,
|
||||
const struct inode *dir,
|
||||
umode_t mode)
|
||||
struct inode *bpf_get_inode(struct super_block *sb,
|
||||
const struct inode *dir,
|
||||
umode_t mode)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
@ -594,15 +595,183 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
|
||||
}
|
||||
EXPORT_SYMBOL(bpf_prog_get_type_path);
|
||||
|
||||
struct bpffs_btf_enums {
|
||||
const struct btf *btf;
|
||||
const struct btf_type *cmd_t;
|
||||
const struct btf_type *map_t;
|
||||
const struct btf_type *prog_t;
|
||||
const struct btf_type *attach_t;
|
||||
};
|
||||
|
||||
static int find_bpffs_btf_enums(struct bpffs_btf_enums *info)
|
||||
{
|
||||
const struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const char *name;
|
||||
int i, n;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
||||
btf = bpf_get_btf_vmlinux();
|
||||
if (IS_ERR(btf))
|
||||
return PTR_ERR(btf);
|
||||
if (!btf)
|
||||
return -ENOENT;
|
||||
|
||||
info->btf = btf;
|
||||
|
||||
for (i = 1, n = btf_nr_types(btf); i < n; i++) {
|
||||
t = btf_type_by_id(btf, i);
|
||||
if (!btf_type_is_enum(t))
|
||||
continue;
|
||||
|
||||
name = btf_name_by_offset(btf, t->name_off);
|
||||
if (!name)
|
||||
continue;
|
||||
|
||||
if (strcmp(name, "bpf_cmd") == 0)
|
||||
info->cmd_t = t;
|
||||
else if (strcmp(name, "bpf_map_type") == 0)
|
||||
info->map_t = t;
|
||||
else if (strcmp(name, "bpf_prog_type") == 0)
|
||||
info->prog_t = t;
|
||||
else if (strcmp(name, "bpf_attach_type") == 0)
|
||||
info->attach_t = t;
|
||||
else
|
||||
continue;
|
||||
|
||||
if (info->cmd_t && info->map_t && info->prog_t && info->attach_t)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
static bool find_btf_enum_const(const struct btf *btf, const struct btf_type *enum_t,
|
||||
const char *prefix, const char *str, int *value)
|
||||
{
|
||||
const struct btf_enum *e;
|
||||
const char *name;
|
||||
int i, n, pfx_len = strlen(prefix);
|
||||
|
||||
*value = 0;
|
||||
|
||||
if (!btf || !enum_t)
|
||||
return false;
|
||||
|
||||
for (i = 0, n = btf_vlen(enum_t); i < n; i++) {
|
||||
e = &btf_enum(enum_t)[i];
|
||||
|
||||
name = btf_name_by_offset(btf, e->name_off);
|
||||
if (!name || strncasecmp(name, prefix, pfx_len) != 0)
|
||||
continue;
|
||||
|
||||
/* match symbolic name case insensitive and ignoring prefix */
|
||||
if (strcasecmp(name + pfx_len, str) == 0) {
|
||||
*value = e->val;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void seq_print_delegate_opts(struct seq_file *m,
|
||||
const char *opt_name,
|
||||
const struct btf *btf,
|
||||
const struct btf_type *enum_t,
|
||||
const char *prefix,
|
||||
u64 delegate_msk, u64 any_msk)
|
||||
{
|
||||
const struct btf_enum *e;
|
||||
bool first = true;
|
||||
const char *name;
|
||||
u64 msk;
|
||||
int i, n, pfx_len = strlen(prefix);
|
||||
|
||||
delegate_msk &= any_msk; /* clear unknown bits */
|
||||
|
||||
if (delegate_msk == 0)
|
||||
return;
|
||||
|
||||
seq_printf(m, ",%s", opt_name);
|
||||
if (delegate_msk == any_msk) {
|
||||
seq_printf(m, "=any");
|
||||
return;
|
||||
}
|
||||
|
||||
if (btf && enum_t) {
|
||||
for (i = 0, n = btf_vlen(enum_t); i < n; i++) {
|
||||
e = &btf_enum(enum_t)[i];
|
||||
name = btf_name_by_offset(btf, e->name_off);
|
||||
if (!name || strncasecmp(name, prefix, pfx_len) != 0)
|
||||
continue;
|
||||
msk = 1ULL << e->val;
|
||||
if (delegate_msk & msk) {
|
||||
/* emit lower-case name without prefix */
|
||||
seq_printf(m, "%c", first ? '=' : ':');
|
||||
name += pfx_len;
|
||||
while (*name) {
|
||||
seq_printf(m, "%c", tolower(*name));
|
||||
name++;
|
||||
}
|
||||
|
||||
delegate_msk &= ~msk;
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (delegate_msk)
|
||||
seq_printf(m, "%c0x%llx", first ? '=' : ':', delegate_msk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Display the mount options in /proc/mounts.
|
||||
*/
|
||||
static int bpf_show_options(struct seq_file *m, struct dentry *root)
|
||||
{
|
||||
umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
|
||||
struct bpf_mount_opts *opts = root->d_sb->s_fs_info;
|
||||
struct inode *inode = d_inode(root);
|
||||
umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX;
|
||||
u64 mask;
|
||||
|
||||
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
|
||||
seq_printf(m, ",uid=%u",
|
||||
from_kuid_munged(&init_user_ns, inode->i_uid));
|
||||
if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
|
||||
seq_printf(m, ",gid=%u",
|
||||
from_kgid_munged(&init_user_ns, inode->i_gid));
|
||||
if (mode != S_IRWXUGO)
|
||||
seq_printf(m, ",mode=%o", mode);
|
||||
|
||||
if (opts->delegate_cmds || opts->delegate_maps ||
|
||||
opts->delegate_progs || opts->delegate_attachs) {
|
||||
struct bpffs_btf_enums info;
|
||||
|
||||
/* ignore errors, fallback to hex */
|
||||
(void)find_bpffs_btf_enums(&info);
|
||||
|
||||
mask = (1ULL << __MAX_BPF_CMD) - 1;
|
||||
seq_print_delegate_opts(m, "delegate_cmds",
|
||||
info.btf, info.cmd_t, "BPF_",
|
||||
opts->delegate_cmds, mask);
|
||||
|
||||
mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
|
||||
seq_print_delegate_opts(m, "delegate_maps",
|
||||
info.btf, info.map_t, "BPF_MAP_TYPE_",
|
||||
opts->delegate_maps, mask);
|
||||
|
||||
mask = (1ULL << __MAX_BPF_PROG_TYPE) - 1;
|
||||
seq_print_delegate_opts(m, "delegate_progs",
|
||||
info.btf, info.prog_t, "BPF_PROG_TYPE_",
|
||||
opts->delegate_progs, mask);
|
||||
|
||||
mask = (1ULL << __MAX_BPF_ATTACH_TYPE) - 1;
|
||||
seq_print_delegate_opts(m, "delegate_attachs",
|
||||
info.btf, info.attach_t, "BPF_",
|
||||
opts->delegate_attachs, mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -617,7 +786,7 @@ static void bpf_free_inode(struct inode *inode)
|
||||
free_inode_nonrcu(inode);
|
||||
}
|
||||
|
||||
static const struct super_operations bpf_super_ops = {
|
||||
const struct super_operations bpf_super_ops = {
|
||||
.statfs = simple_statfs,
|
||||
.drop_inode = generic_delete_inode,
|
||||
.show_options = bpf_show_options,
|
||||
@ -625,23 +794,33 @@ static const struct super_operations bpf_super_ops = {
|
||||
};
|
||||
|
||||
enum {
|
||||
OPT_UID,
|
||||
OPT_GID,
|
||||
OPT_MODE,
|
||||
OPT_DELEGATE_CMDS,
|
||||
OPT_DELEGATE_MAPS,
|
||||
OPT_DELEGATE_PROGS,
|
||||
OPT_DELEGATE_ATTACHS,
|
||||
};
|
||||
|
||||
static const struct fs_parameter_spec bpf_fs_parameters[] = {
|
||||
fsparam_u32 ("uid", OPT_UID),
|
||||
fsparam_u32 ("gid", OPT_GID),
|
||||
fsparam_u32oct ("mode", OPT_MODE),
|
||||
fsparam_string ("delegate_cmds", OPT_DELEGATE_CMDS),
|
||||
fsparam_string ("delegate_maps", OPT_DELEGATE_MAPS),
|
||||
fsparam_string ("delegate_progs", OPT_DELEGATE_PROGS),
|
||||
fsparam_string ("delegate_attachs", OPT_DELEGATE_ATTACHS),
|
||||
{}
|
||||
};
|
||||
|
||||
struct bpf_mount_opts {
|
||||
umode_t mode;
|
||||
};
|
||||
|
||||
static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
{
|
||||
struct bpf_mount_opts *opts = fc->fs_private;
|
||||
struct bpf_mount_opts *opts = fc->s_fs_info;
|
||||
struct fs_parse_result result;
|
||||
int opt;
|
||||
kuid_t uid;
|
||||
kgid_t gid;
|
||||
int opt, err;
|
||||
|
||||
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
|
||||
if (opt < 0) {
|
||||
@ -662,12 +841,104 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
}
|
||||
|
||||
switch (opt) {
|
||||
case OPT_UID:
|
||||
uid = make_kuid(current_user_ns(), result.uint_32);
|
||||
if (!uid_valid(uid))
|
||||
goto bad_value;
|
||||
|
||||
/*
|
||||
* The requested uid must be representable in the
|
||||
* filesystem's idmapping.
|
||||
*/
|
||||
if (!kuid_has_mapping(fc->user_ns, uid))
|
||||
goto bad_value;
|
||||
|
||||
opts->uid = uid;
|
||||
break;
|
||||
case OPT_GID:
|
||||
gid = make_kgid(current_user_ns(), result.uint_32);
|
||||
if (!gid_valid(gid))
|
||||
goto bad_value;
|
||||
|
||||
/*
|
||||
* The requested gid must be representable in the
|
||||
* filesystem's idmapping.
|
||||
*/
|
||||
if (!kgid_has_mapping(fc->user_ns, gid))
|
||||
goto bad_value;
|
||||
|
||||
opts->gid = gid;
|
||||
break;
|
||||
case OPT_MODE:
|
||||
opts->mode = result.uint_32 & S_IALLUGO;
|
||||
break;
|
||||
case OPT_DELEGATE_CMDS:
|
||||
case OPT_DELEGATE_MAPS:
|
||||
case OPT_DELEGATE_PROGS:
|
||||
case OPT_DELEGATE_ATTACHS: {
|
||||
struct bpffs_btf_enums info;
|
||||
const struct btf_type *enum_t;
|
||||
const char *enum_pfx;
|
||||
u64 *delegate_msk, msk = 0;
|
||||
char *p;
|
||||
int val;
|
||||
|
||||
/* ignore errors, fallback to hex */
|
||||
(void)find_bpffs_btf_enums(&info);
|
||||
|
||||
switch (opt) {
|
||||
case OPT_DELEGATE_CMDS:
|
||||
delegate_msk = &opts->delegate_cmds;
|
||||
enum_t = info.cmd_t;
|
||||
enum_pfx = "BPF_";
|
||||
break;
|
||||
case OPT_DELEGATE_MAPS:
|
||||
delegate_msk = &opts->delegate_maps;
|
||||
enum_t = info.map_t;
|
||||
enum_pfx = "BPF_MAP_TYPE_";
|
||||
break;
|
||||
case OPT_DELEGATE_PROGS:
|
||||
delegate_msk = &opts->delegate_progs;
|
||||
enum_t = info.prog_t;
|
||||
enum_pfx = "BPF_PROG_TYPE_";
|
||||
break;
|
||||
case OPT_DELEGATE_ATTACHS:
|
||||
delegate_msk = &opts->delegate_attachs;
|
||||
enum_t = info.attach_t;
|
||||
enum_pfx = "BPF_";
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while ((p = strsep(¶m->string, ":"))) {
|
||||
if (strcmp(p, "any") == 0) {
|
||||
msk |= ~0ULL;
|
||||
} else if (find_btf_enum_const(info.btf, enum_t, enum_pfx, p, &val)) {
|
||||
msk |= 1ULL << val;
|
||||
} else {
|
||||
err = kstrtou64(p, 0, &msk);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Setting delegation mount options requires privileges */
|
||||
if (msk && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
*delegate_msk |= msk;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* ignore unknown mount options */
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
bad_value:
|
||||
return invalfc(fc, "Bad value for '%s'", param->key);
|
||||
}
|
||||
|
||||
struct bpf_preload_ops *bpf_preload_ops;
|
||||
@ -739,10 +1010,14 @@ out:
|
||||
static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
{
|
||||
static const struct tree_descr bpf_rfiles[] = { { "" } };
|
||||
struct bpf_mount_opts *opts = fc->fs_private;
|
||||
struct bpf_mount_opts *opts = sb->s_fs_info;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
/* Mounting an instance of BPF FS requires privileges */
|
||||
if (fc->user_ns != &init_user_ns && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -750,6 +1025,8 @@ static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
sb->s_op = &bpf_super_ops;
|
||||
|
||||
inode = sb->s_root->d_inode;
|
||||
inode->i_uid = opts->uid;
|
||||
inode->i_gid = opts->gid;
|
||||
inode->i_op = &bpf_dir_iops;
|
||||
inode->i_mode &= ~S_IALLUGO;
|
||||
populate_bpffs(sb->s_root);
|
||||
@ -764,7 +1041,7 @@ static int bpf_get_tree(struct fs_context *fc)
|
||||
|
||||
static void bpf_free_fc(struct fs_context *fc)
|
||||
{
|
||||
kfree(fc->fs_private);
|
||||
kfree(fc->s_fs_info);
|
||||
}
|
||||
|
||||
static const struct fs_context_operations bpf_context_ops = {
|
||||
@ -785,18 +1062,35 @@ static int bpf_init_fs_context(struct fs_context *fc)
|
||||
return -ENOMEM;
|
||||
|
||||
opts->mode = S_IRWXUGO;
|
||||
opts->uid = current_fsuid();
|
||||
opts->gid = current_fsgid();
|
||||
|
||||
fc->fs_private = opts;
|
||||
/* start out with no BPF token delegation enabled */
|
||||
opts->delegate_cmds = 0;
|
||||
opts->delegate_maps = 0;
|
||||
opts->delegate_progs = 0;
|
||||
opts->delegate_attachs = 0;
|
||||
|
||||
fc->s_fs_info = opts;
|
||||
fc->ops = &bpf_context_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_kill_super(struct super_block *sb)
|
||||
{
|
||||
struct bpf_mount_opts *opts = sb->s_fs_info;
|
||||
|
||||
kill_litter_super(sb);
|
||||
kfree(opts);
|
||||
}
|
||||
|
||||
static struct file_system_type bpf_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "bpf",
|
||||
.init_fs_context = bpf_init_fs_context,
|
||||
.parameters = bpf_fs_parameters,
|
||||
.kill_sb = kill_litter_super,
|
||||
.kill_sb = bpf_kill_super,
|
||||
.fs_flags = FS_USERNS_MOUNT,
|
||||
};
|
||||
|
||||
static int __init bpf_init(void)
|
||||
|
@ -539,6 +539,19 @@ static void verbose_snum(struct bpf_verifier_env *env, s64 num)
|
||||
verbose(env, "%#llx", num);
|
||||
}
|
||||
|
||||
int tnum_strn(char *str, size_t size, struct tnum a)
|
||||
{
|
||||
/* print as a constant, if tnum is fully known */
|
||||
if (a.mask == 0) {
|
||||
if (is_unum_decimal(a.value))
|
||||
return snprintf(str, size, "%llu", a.value);
|
||||
else
|
||||
return snprintf(str, size, "%#llx", a.value);
|
||||
}
|
||||
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tnum_strn);
|
||||
|
||||
static void print_scalar_ranges(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
const char **sep)
|
||||
@ -615,6 +628,12 @@ static bool type_is_map_ptr(enum bpf_reg_type t) {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* _a stands for append, was shortened to avoid multiline statements below.
|
||||
* This macro is used to output a comma separated list of attributes.
|
||||
*/
|
||||
#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
|
||||
|
||||
static void print_reg_state(struct bpf_verifier_env *env,
|
||||
const struct bpf_func_state *state,
|
||||
const struct bpf_reg_state *reg)
|
||||
@ -630,11 +649,6 @@ static void print_reg_state(struct bpf_verifier_env *env,
|
||||
verbose_snum(env, reg->var_off.value + reg->off);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* _a stands for append, was shortened to avoid multiline statements below.
|
||||
* This macro is used to output a comma separated list of attributes.
|
||||
*/
|
||||
#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
|
||||
|
||||
verbose(env, "%s", reg_type_str(env, t));
|
||||
if (t == PTR_TO_STACK) {
|
||||
@ -669,6 +683,12 @@ static void print_reg_state(struct bpf_verifier_env *env,
|
||||
verbose_a("r=");
|
||||
verbose_unum(env, reg->range);
|
||||
}
|
||||
if (base_type(t) == PTR_TO_MEM) {
|
||||
verbose_a("sz=");
|
||||
verbose_unum(env, reg->mem_size);
|
||||
}
|
||||
if (t == CONST_PTR_TO_DYNPTR)
|
||||
verbose_a("type=%s", dynptr_type_str(reg->dynptr.type));
|
||||
if (tnum_is_const(reg->var_off)) {
|
||||
/* a pointer register with fixed offset */
|
||||
if (reg->var_off.value) {
|
||||
@ -685,8 +705,6 @@ static void print_reg_state(struct bpf_verifier_env *env,
|
||||
}
|
||||
}
|
||||
verbose(env, ")");
|
||||
|
||||
#undef verbose_a
|
||||
}
|
||||
|
||||
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
|
||||
@ -710,6 +728,7 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_st
|
||||
}
|
||||
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
||||
char types_buf[BPF_REG_SIZE + 1];
|
||||
const char *sep = "";
|
||||
bool valid = false;
|
||||
u8 slot_type;
|
||||
int j;
|
||||
@ -748,9 +767,14 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_st
|
||||
|
||||
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
||||
print_liveness(env, reg->live);
|
||||
verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
|
||||
verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type));
|
||||
if (reg->id)
|
||||
verbose_a("id=%d", reg->id);
|
||||
if (reg->ref_obj_id)
|
||||
verbose(env, "(ref_id=%d)", reg->ref_obj_id);
|
||||
verbose_a("ref_id=%d", reg->ref_obj_id);
|
||||
if (reg->dynptr_id)
|
||||
verbose_a("dynptr_id=%d", reg->dynptr_id);
|
||||
verbose(env, ")");
|
||||
break;
|
||||
case STACK_ITER:
|
||||
/* only main slot has ref_obj_id set; skip others */
|
||||
|
@ -127,12 +127,21 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
|
||||
return inner_map;
|
||||
}
|
||||
|
||||
void bpf_map_fd_put_ptr(void *ptr)
|
||||
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
|
||||
{
|
||||
/* ptr->ops->map_free() has to go through one
|
||||
* rcu grace period by itself.
|
||||
struct bpf_map *inner_map = ptr;
|
||||
|
||||
/* Defer the freeing of inner map according to the sleepable attribute
|
||||
* of bpf program which owns the outer map, so unnecessary waiting for
|
||||
* RCU tasks trace grace period can be avoided.
|
||||
*/
|
||||
bpf_map_put(ptr);
|
||||
if (need_defer) {
|
||||
if (atomic64_read(&map->sleepable_refcnt))
|
||||
WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
|
||||
else
|
||||
WRITE_ONCE(inner_map->free_after_rcu_gp, true);
|
||||
}
|
||||
bpf_map_put(inner_map);
|
||||
}
|
||||
|
||||
u32 bpf_map_fd_sys_lookup_elem(void *ptr)
|
||||
|
@ -13,7 +13,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
|
||||
void bpf_map_meta_free(struct bpf_map *map_meta);
|
||||
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
|
||||
int ufd);
|
||||
void bpf_map_fd_put_ptr(void *ptr);
|
||||
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
|
||||
u32 bpf_map_fd_sys_lookup_elem(void *ptr);
|
||||
|
||||
#endif
|
||||
|
@ -142,9 +142,13 @@ static u32 bpf_map_value_size(const struct bpf_map *map)
|
||||
|
||||
static void maybe_wait_bpf_programs(struct bpf_map *map)
|
||||
{
|
||||
/* Wait for any running BPF programs to complete so that
|
||||
* userspace, when we return to it, knows that all programs
|
||||
* that could be running use the new map value.
|
||||
/* Wait for any running non-sleepable BPF programs to complete so that
|
||||
* userspace, when we return to it, knows that all non-sleepable
|
||||
* programs that could be running use the new map value. For sleepable
|
||||
* BPF programs, synchronize_rcu_tasks_trace() should be used to wait
|
||||
* for the completions of these programs, but considering the waiting
|
||||
* time can be very long and userspace may think it will hang forever,
|
||||
* so don't handle sleepable BPF programs now.
|
||||
*/
|
||||
if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
|
||||
map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
|
||||
@ -180,15 +184,11 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
|
||||
err = bpf_percpu_cgroup_storage_update(map, key, value,
|
||||
flags);
|
||||
} else if (IS_FD_ARRAY(map)) {
|
||||
rcu_read_lock();
|
||||
err = bpf_fd_array_map_update_elem(map, map_file, key, value,
|
||||
flags);
|
||||
rcu_read_unlock();
|
||||
} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
|
||||
rcu_read_lock();
|
||||
err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
|
||||
flags);
|
||||
rcu_read_unlock();
|
||||
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
|
||||
/* rcu_read_lock() is not needed */
|
||||
err = bpf_fd_reuseport_array_update_elem(map, key, value,
|
||||
@ -203,7 +203,6 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
bpf_enable_instrumentation();
|
||||
maybe_wait_bpf_programs(map);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -264,7 +263,6 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
|
||||
}
|
||||
|
||||
bpf_enable_instrumentation();
|
||||
maybe_wait_bpf_programs(map);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -694,6 +692,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_map *map = container_of(work, struct bpf_map, work);
|
||||
struct btf_record *rec = map->record;
|
||||
struct btf *btf = map->btf;
|
||||
|
||||
security_bpf_map_free(map);
|
||||
bpf_map_release_memcg(map);
|
||||
@ -709,6 +708,10 @@ static void bpf_map_free_deferred(struct work_struct *work)
|
||||
* template bpf_map struct used during verification.
|
||||
*/
|
||||
btf_record_free(rec);
|
||||
/* Delay freeing of btf for maps, as map_free callback may need
|
||||
* struct_meta info which will be freed with btf_put().
|
||||
*/
|
||||
btf_put(btf);
|
||||
}
|
||||
|
||||
static void bpf_map_put_uref(struct bpf_map *map)
|
||||
@ -719,6 +722,28 @@ static void bpf_map_put_uref(struct bpf_map *map)
|
||||
}
|
||||
}
|
||||
|
||||
static void bpf_map_free_in_work(struct bpf_map *map)
|
||||
{
|
||||
INIT_WORK(&map->work, bpf_map_free_deferred);
|
||||
/* Avoid spawning kworkers, since they all might contend
|
||||
* for the same mutex like slab_mutex.
|
||||
*/
|
||||
queue_work(system_unbound_wq, &map->work);
|
||||
}
|
||||
|
||||
static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
|
||||
{
|
||||
bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
|
||||
}
|
||||
|
||||
static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
|
||||
{
|
||||
if (rcu_trace_implies_rcu_gp())
|
||||
bpf_map_free_rcu_gp(rcu);
|
||||
else
|
||||
call_rcu(rcu, bpf_map_free_rcu_gp);
|
||||
}
|
||||
|
||||
/* decrement map refcnt and schedule it for freeing via workqueue
|
||||
* (underlying map implementation ops->map_free() might sleep)
|
||||
*/
|
||||
@ -727,12 +752,14 @@ void bpf_map_put(struct bpf_map *map)
|
||||
if (atomic64_dec_and_test(&map->refcnt)) {
|
||||
/* bpf_map_free_id() must be called first */
|
||||
bpf_map_free_id(map);
|
||||
btf_put(map->btf);
|
||||
INIT_WORK(&map->work, bpf_map_free_deferred);
|
||||
/* Avoid spawning kworkers, since they all might contend
|
||||
* for the same mutex like slab_mutex.
|
||||
*/
|
||||
queue_work(system_unbound_wq, &map->work);
|
||||
|
||||
WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
|
||||
if (READ_ONCE(map->free_after_mult_rcu_gp))
|
||||
call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
|
||||
else if (READ_ONCE(map->free_after_rcu_gp))
|
||||
call_rcu(&map->rcu, bpf_map_free_rcu_gp);
|
||||
else
|
||||
bpf_map_free_in_work(map);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_map_put);
|
||||
@ -984,8 +1011,8 @@ int map_check_no_btf(const struct bpf_map *map,
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int map_check_btf(struct bpf_map *map, const struct btf *btf,
|
||||
u32 btf_key_id, u32 btf_value_id)
|
||||
static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
|
||||
const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
|
||||
{
|
||||
const struct btf_type *key_type, *value_type;
|
||||
u32 key_size, value_size;
|
||||
@ -1013,7 +1040,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
|
||||
if (!IS_ERR_OR_NULL(map->record)) {
|
||||
int i;
|
||||
|
||||
if (!bpf_capable()) {
|
||||
if (!bpf_token_capable(token, CAP_BPF)) {
|
||||
ret = -EPERM;
|
||||
goto free_map_tab;
|
||||
}
|
||||
@ -1096,11 +1123,17 @@ free_map_tab:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_MAP_CREATE_LAST_FIELD map_extra
|
||||
static bool bpf_net_capable(void)
|
||||
{
|
||||
return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
|
||||
}
|
||||
|
||||
#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
|
||||
/* called via syscall */
|
||||
static int map_create(union bpf_attr *attr)
|
||||
{
|
||||
const struct bpf_map_ops *ops;
|
||||
struct bpf_token *token = NULL;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 map_type = attr->map_type;
|
||||
struct bpf_map *map;
|
||||
@ -1151,14 +1184,32 @@ static int map_create(union bpf_attr *attr)
|
||||
if (!ops->map_mem_usage)
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->map_token_fd) {
|
||||
token = bpf_token_get_from_fd(attr->map_token_fd);
|
||||
if (IS_ERR(token))
|
||||
return PTR_ERR(token);
|
||||
|
||||
/* if current token doesn't grant map creation permissions,
|
||||
* then we can't use this token, so ignore it and rely on
|
||||
* system-wide capabilities checks
|
||||
*/
|
||||
if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
|
||||
!bpf_token_allow_map_type(token, attr->map_type)) {
|
||||
bpf_token_put(token);
|
||||
token = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
err = -EPERM;
|
||||
|
||||
/* Intent here is for unprivileged_bpf_disabled to block BPF map
|
||||
* creation for unprivileged users; other actions depend
|
||||
* on fd availability and access to bpffs, so are dependent on
|
||||
* object creation success. Even with unprivileged BPF disabled,
|
||||
* capability checks are still carried out.
|
||||
*/
|
||||
if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
|
||||
return -EPERM;
|
||||
if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
|
||||
goto put_token;
|
||||
|
||||
/* check privileged map type permissions */
|
||||
switch (map_type) {
|
||||
@ -1191,25 +1242,27 @@ static int map_create(union bpf_attr *attr)
|
||||
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
|
||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
if (!bpf_capable())
|
||||
return -EPERM;
|
||||
if (!bpf_token_capable(token, CAP_BPF))
|
||||
goto put_token;
|
||||
break;
|
||||
case BPF_MAP_TYPE_SOCKMAP:
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
if (!bpf_token_capable(token, CAP_NET_ADMIN))
|
||||
goto put_token;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "unsupported map type %d", map_type);
|
||||
return -EPERM;
|
||||
goto put_token;
|
||||
}
|
||||
|
||||
map = ops->map_alloc(attr);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
if (IS_ERR(map)) {
|
||||
err = PTR_ERR(map);
|
||||
goto put_token;
|
||||
}
|
||||
map->ops = ops;
|
||||
map->map_type = map_type;
|
||||
|
||||
@ -1246,7 +1299,7 @@ static int map_create(union bpf_attr *attr)
|
||||
map->btf = btf;
|
||||
|
||||
if (attr->btf_value_type_id) {
|
||||
err = map_check_btf(map, btf, attr->btf_key_type_id,
|
||||
err = map_check_btf(map, token, btf, attr->btf_key_type_id,
|
||||
attr->btf_value_type_id);
|
||||
if (err)
|
||||
goto free_map;
|
||||
@ -1258,15 +1311,16 @@ static int map_create(union bpf_attr *attr)
|
||||
attr->btf_vmlinux_value_type_id;
|
||||
}
|
||||
|
||||
err = security_bpf_map_alloc(map);
|
||||
err = security_bpf_map_create(map, attr, token);
|
||||
if (err)
|
||||
goto free_map;
|
||||
goto free_map_sec;
|
||||
|
||||
err = bpf_map_alloc_id(map);
|
||||
if (err)
|
||||
goto free_map_sec;
|
||||
|
||||
bpf_map_save_memcg(map);
|
||||
bpf_token_put(token);
|
||||
|
||||
err = bpf_map_new_fd(map, f_flags);
|
||||
if (err < 0) {
|
||||
@ -1287,6 +1341,8 @@ free_map_sec:
|
||||
free_map:
|
||||
btf_put(map->btf);
|
||||
map->ops->map_free(map);
|
||||
put_token:
|
||||
bpf_token_put(token);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1524,6 +1580,8 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
|
||||
}
|
||||
|
||||
err = bpf_map_update_value(map, f.file, key, value, attr->flags);
|
||||
if (!err)
|
||||
maybe_wait_bpf_programs(map);
|
||||
|
||||
kvfree(value);
|
||||
free_key:
|
||||
@ -1579,7 +1637,8 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
|
||||
err = map->ops->map_delete_elem(map, key);
|
||||
rcu_read_unlock();
|
||||
bpf_enable_instrumentation();
|
||||
maybe_wait_bpf_programs(map);
|
||||
if (!err)
|
||||
maybe_wait_bpf_programs(map);
|
||||
out:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
@ -1676,6 +1735,9 @@ int generic_map_delete_batch(struct bpf_map *map,
|
||||
if (!max_count)
|
||||
return 0;
|
||||
|
||||
if (put_user(0, &uattr->batch.count))
|
||||
return -EFAULT;
|
||||
|
||||
key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!key)
|
||||
return -ENOMEM;
|
||||
@ -1705,7 +1767,6 @@ int generic_map_delete_batch(struct bpf_map *map,
|
||||
|
||||
kvfree(key);
|
||||
|
||||
maybe_wait_bpf_programs(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1733,6 +1794,9 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
|
||||
if (!max_count)
|
||||
return 0;
|
||||
|
||||
if (put_user(0, &uattr->batch.count))
|
||||
return -EFAULT;
|
||||
|
||||
key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!key)
|
||||
return -ENOMEM;
|
||||
@ -1763,6 +1827,7 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
|
||||
|
||||
kvfree(value);
|
||||
kvfree(key);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2108,7 +2173,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||
kvfree(aux->func_info);
|
||||
kfree(aux->func_info_aux);
|
||||
free_uid(aux->user);
|
||||
security_bpf_prog_free(aux);
|
||||
security_bpf_prog_free(aux->prog);
|
||||
bpf_prog_free(aux->prog);
|
||||
}
|
||||
|
||||
@ -2554,13 +2619,15 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_PROG_LOAD_LAST_FIELD log_true_size
|
||||
#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
|
||||
|
||||
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
enum bpf_prog_type type = attr->prog_type;
|
||||
struct bpf_prog *prog, *dst_prog = NULL;
|
||||
struct btf *attach_btf = NULL;
|
||||
struct bpf_token *token = NULL;
|
||||
bool bpf_cap;
|
||||
int err;
|
||||
char license[128];
|
||||
|
||||
@ -2577,10 +2644,31 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
BPF_F_TEST_REG_INVARIANTS))
|
||||
return -EINVAL;
|
||||
|
||||
bpf_prog_load_fixup_attach_type(attr);
|
||||
|
||||
if (attr->prog_token_fd) {
|
||||
token = bpf_token_get_from_fd(attr->prog_token_fd);
|
||||
if (IS_ERR(token))
|
||||
return PTR_ERR(token);
|
||||
/* if current token doesn't grant prog loading permissions,
|
||||
* then we can't use this token, so ignore it and rely on
|
||||
* system-wide capabilities checks
|
||||
*/
|
||||
if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
|
||||
!bpf_token_allow_prog_type(token, attr->prog_type,
|
||||
attr->expected_attach_type)) {
|
||||
bpf_token_put(token);
|
||||
token = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bpf_cap = bpf_token_capable(token, CAP_BPF);
|
||||
err = -EPERM;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
|
||||
(attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
|
||||
!bpf_capable())
|
||||
return -EPERM;
|
||||
!bpf_cap)
|
||||
goto put_token;
|
||||
|
||||
/* Intent here is for unprivileged_bpf_disabled to block BPF program
|
||||
* creation for unprivileged users; other actions depend
|
||||
@ -2589,21 +2677,23 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
* capability checks are still carried out for these
|
||||
* and other operations.
|
||||
*/
|
||||
if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
|
||||
return -EPERM;
|
||||
if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
|
||||
goto put_token;
|
||||
|
||||
if (attr->insn_cnt == 0 ||
|
||||
attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
|
||||
return -E2BIG;
|
||||
attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
|
||||
err = -E2BIG;
|
||||
goto put_token;
|
||||
}
|
||||
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
|
||||
type != BPF_PROG_TYPE_CGROUP_SKB &&
|
||||
!bpf_capable())
|
||||
return -EPERM;
|
||||
!bpf_cap)
|
||||
goto put_token;
|
||||
|
||||
if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (is_perfmon_prog_type(type) && !perfmon_capable())
|
||||
return -EPERM;
|
||||
if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
|
||||
goto put_token;
|
||||
if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
|
||||
goto put_token;
|
||||
|
||||
/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
|
||||
* or btf, we need to check which one it is
|
||||
@ -2613,27 +2703,33 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
if (IS_ERR(dst_prog)) {
|
||||
dst_prog = NULL;
|
||||
attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
|
||||
if (IS_ERR(attach_btf))
|
||||
return -EINVAL;
|
||||
if (IS_ERR(attach_btf)) {
|
||||
err = -EINVAL;
|
||||
goto put_token;
|
||||
}
|
||||
if (!btf_is_kernel(attach_btf)) {
|
||||
/* attaching through specifying bpf_prog's BTF
|
||||
* objects directly might be supported eventually
|
||||
*/
|
||||
btf_put(attach_btf);
|
||||
return -ENOTSUPP;
|
||||
err = -ENOTSUPP;
|
||||
goto put_token;
|
||||
}
|
||||
}
|
||||
} else if (attr->attach_btf_id) {
|
||||
/* fall back to vmlinux BTF, if BTF type ID is specified */
|
||||
attach_btf = bpf_get_btf_vmlinux();
|
||||
if (IS_ERR(attach_btf))
|
||||
return PTR_ERR(attach_btf);
|
||||
if (!attach_btf)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(attach_btf)) {
|
||||
err = PTR_ERR(attach_btf);
|
||||
goto put_token;
|
||||
}
|
||||
if (!attach_btf) {
|
||||
err = -EINVAL;
|
||||
goto put_token;
|
||||
}
|
||||
btf_get(attach_btf);
|
||||
}
|
||||
|
||||
bpf_prog_load_fixup_attach_type(attr);
|
||||
if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
|
||||
attach_btf, attr->attach_btf_id,
|
||||
dst_prog)) {
|
||||
@ -2641,7 +2737,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
bpf_prog_put(dst_prog);
|
||||
if (attach_btf)
|
||||
btf_put(attach_btf);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto put_token;
|
||||
}
|
||||
|
||||
/* plain bpf_prog allocation */
|
||||
@ -2651,7 +2748,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
bpf_prog_put(dst_prog);
|
||||
if (attach_btf)
|
||||
btf_put(attach_btf);
|
||||
return -ENOMEM;
|
||||
err = -EINVAL;
|
||||
goto put_token;
|
||||
}
|
||||
|
||||
prog->expected_attach_type = attr->expected_attach_type;
|
||||
@ -2662,9 +2760,9 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
|
||||
prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
|
||||
|
||||
err = security_bpf_prog_alloc(prog->aux);
|
||||
if (err)
|
||||
goto free_prog;
|
||||
/* move token into prog->aux, reuse taken refcnt */
|
||||
prog->aux->token = token;
|
||||
token = NULL;
|
||||
|
||||
prog->aux->user = get_current_user();
|
||||
prog->len = attr->insn_cnt;
|
||||
@ -2673,12 +2771,12 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
if (copy_from_bpfptr(prog->insns,
|
||||
make_bpfptr(attr->insns, uattr.is_kernel),
|
||||
bpf_prog_insn_size(prog)) != 0)
|
||||
goto free_prog_sec;
|
||||
goto free_prog;
|
||||
/* copy eBPF program license from user space */
|
||||
if (strncpy_from_bpfptr(license,
|
||||
make_bpfptr(attr->license, uattr.is_kernel),
|
||||
sizeof(license) - 1) < 0)
|
||||
goto free_prog_sec;
|
||||
goto free_prog;
|
||||
license[sizeof(license) - 1] = 0;
|
||||
|
||||
/* eBPF programs must be GPL compatible to use GPL-ed functions */
|
||||
@ -2692,25 +2790,29 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
if (bpf_prog_is_dev_bound(prog->aux)) {
|
||||
err = bpf_prog_dev_bound_init(prog, attr);
|
||||
if (err)
|
||||
goto free_prog_sec;
|
||||
goto free_prog;
|
||||
}
|
||||
|
||||
if (type == BPF_PROG_TYPE_EXT && dst_prog &&
|
||||
bpf_prog_is_dev_bound(dst_prog->aux)) {
|
||||
err = bpf_prog_dev_bound_inherit(prog, dst_prog);
|
||||
if (err)
|
||||
goto free_prog_sec;
|
||||
goto free_prog;
|
||||
}
|
||||
|
||||
/* find program type: socket_filter vs tracing_filter */
|
||||
err = find_prog_type(type, prog);
|
||||
if (err < 0)
|
||||
goto free_prog_sec;
|
||||
goto free_prog;
|
||||
|
||||
prog->aux->load_time = ktime_get_boottime_ns();
|
||||
err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
|
||||
sizeof(attr->prog_name));
|
||||
if (err < 0)
|
||||
goto free_prog;
|
||||
|
||||
err = security_bpf_prog_load(prog, attr, token);
|
||||
if (err)
|
||||
goto free_prog_sec;
|
||||
|
||||
/* run eBPF verifier */
|
||||
@ -2756,13 +2858,16 @@ free_used_maps:
|
||||
*/
|
||||
__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
|
||||
return err;
|
||||
|
||||
free_prog_sec:
|
||||
free_uid(prog->aux->user);
|
||||
security_bpf_prog_free(prog->aux);
|
||||
security_bpf_prog_free(prog);
|
||||
free_prog:
|
||||
free_uid(prog->aux->user);
|
||||
if (prog->aux->attach_btf)
|
||||
btf_put(prog->aux->attach_btf);
|
||||
bpf_prog_free(prog);
|
||||
put_token:
|
||||
bpf_token_put(token);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3752,7 +3857,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
|
||||
case BPF_PROG_TYPE_SK_LOOKUP:
|
||||
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
|
||||
case BPF_PROG_TYPE_CGROUP_SKB:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
|
||||
/* cg-skb progs can be loaded by unpriv user.
|
||||
* check permissions at attach time.
|
||||
*/
|
||||
@ -3955,7 +4060,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
||||
static int bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
if (!bpf_net_capable())
|
||||
return -EPERM;
|
||||
if (CHECK_ATTR(BPF_PROG_QUERY))
|
||||
return -EINVAL;
|
||||
@ -4723,15 +4828,31 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
|
||||
#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
|
||||
|
||||
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
|
||||
{
|
||||
struct bpf_token *token = NULL;
|
||||
|
||||
if (CHECK_ATTR(BPF_BTF_LOAD))
|
||||
return -EINVAL;
|
||||
|
||||
if (!bpf_capable())
|
||||
if (attr->btf_token_fd) {
|
||||
token = bpf_token_get_from_fd(attr->btf_token_fd);
|
||||
if (IS_ERR(token))
|
||||
return PTR_ERR(token);
|
||||
if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
|
||||
bpf_token_put(token);
|
||||
token = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bpf_token_capable(token, CAP_BPF)) {
|
||||
bpf_token_put(token);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
bpf_token_put(token);
|
||||
|
||||
return btf_new_fd(attr, uattr, uattr_size);
|
||||
}
|
||||
@ -4920,8 +5041,10 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
else
|
||||
BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
|
||||
err_put:
|
||||
if (has_write)
|
||||
if (has_write) {
|
||||
maybe_wait_bpf_programs(map);
|
||||
bpf_map_write_active_dec(map);
|
||||
}
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -5323,6 +5446,11 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* The bpf program will not access the bpf map, but for the sake of
|
||||
* simplicity, increase sleepable_refcnt for sleepable program as well.
|
||||
*/
|
||||
if (prog->aux->sleepable)
|
||||
atomic64_inc(&map->sleepable_refcnt);
|
||||
memcpy(used_maps_new, used_maps_old,
|
||||
sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
|
||||
used_maps_new[prog->aux->used_map_cnt] = map;
|
||||
@ -5342,6 +5470,20 @@ out_prog_put:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
|
||||
|
||||
static int token_create(union bpf_attr *attr)
|
||||
{
|
||||
if (CHECK_ATTR(BPF_TOKEN_CREATE))
|
||||
return -EINVAL;
|
||||
|
||||
/* no flags are supported yet */
|
||||
if (attr->token_create.flags)
|
||||
return -EINVAL;
|
||||
|
||||
return bpf_token_create(attr);
|
||||
}
|
||||
|
||||
static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
@ -5475,6 +5617,9 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
|
||||
case BPF_PROG_BIND_MAP:
|
||||
err = bpf_prog_bind_map(&attr);
|
||||
break;
|
||||
case BPF_TOKEN_CREATE:
|
||||
err = token_create(&attr);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
break;
|
||||
@ -5581,7 +5726,7 @@ static const struct bpf_func_proto bpf_sys_bpf_proto = {
|
||||
const struct bpf_func_proto * __weak
|
||||
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
|
||||
BPF_CALL_1(bpf_sys_close, u32, fd)
|
||||
@ -5631,7 +5776,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_sys_bpf:
|
||||
return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
|
||||
return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
|
||||
? NULL : &bpf_sys_bpf_proto;
|
||||
case BPF_FUNC_btf_find_by_name_kind:
|
||||
return &bpf_btf_find_by_name_kind_proto;
|
||||
case BPF_FUNC_sys_close:
|
||||
|
@ -172,12 +172,6 @@ bool tnum_in(struct tnum a, struct tnum b)
|
||||
return a.value == b.value;
|
||||
}
|
||||
|
||||
int tnum_strn(char *str, size_t size, struct tnum a)
|
||||
{
|
||||
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tnum_strn);
|
||||
|
||||
int tnum_sbin(char *str, size_t size, struct tnum a)
|
||||
{
|
||||
size_t n;
|
||||
|
271
kernel/bpf/token.c
Normal file
271
kernel/bpf/token.c
Normal file
@ -0,0 +1,271 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/security.h>
|
||||
|
||||
bool bpf_token_capable(const struct bpf_token *token, int cap)
|
||||
{
|
||||
/* BPF token allows ns_capable() level of capabilities, but only if
|
||||
* token's userns is *exactly* the same as current user's userns
|
||||
*/
|
||||
if (token && current_user_ns() == token->userns) {
|
||||
if (ns_capable(token->userns, cap) ||
|
||||
(cap != CAP_SYS_ADMIN && ns_capable(token->userns, CAP_SYS_ADMIN)))
|
||||
return security_bpf_token_capable(token, cap) == 0;
|
||||
}
|
||||
/* otherwise fallback to capable() checks */
|
||||
return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
|
||||
}
|
||||
|
||||
void bpf_token_inc(struct bpf_token *token)
|
||||
{
|
||||
atomic64_inc(&token->refcnt);
|
||||
}
|
||||
|
||||
static void bpf_token_free(struct bpf_token *token)
|
||||
{
|
||||
security_bpf_token_free(token);
|
||||
put_user_ns(token->userns);
|
||||
kvfree(token);
|
||||
}
|
||||
|
||||
static void bpf_token_put_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_token *token = container_of(work, struct bpf_token, work);
|
||||
|
||||
bpf_token_free(token);
|
||||
}
|
||||
|
||||
void bpf_token_put(struct bpf_token *token)
|
||||
{
|
||||
if (!token)
|
||||
return;
|
||||
|
||||
if (!atomic64_dec_and_test(&token->refcnt))
|
||||
return;
|
||||
|
||||
INIT_WORK(&token->work, bpf_token_put_deferred);
|
||||
schedule_work(&token->work);
|
||||
}
|
||||
|
||||
static int bpf_token_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct bpf_token *token = filp->private_data;
|
||||
|
||||
bpf_token_put(token);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
{
|
||||
struct bpf_token *token = filp->private_data;
|
||||
u64 mask;
|
||||
|
||||
BUILD_BUG_ON(__MAX_BPF_CMD >= 64);
|
||||
mask = (1ULL << __MAX_BPF_CMD) - 1;
|
||||
if ((token->allowed_cmds & mask) == mask)
|
||||
seq_printf(m, "allowed_cmds:\tany\n");
|
||||
else
|
||||
seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
|
||||
|
||||
BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
|
||||
mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
|
||||
if ((token->allowed_maps & mask) == mask)
|
||||
seq_printf(m, "allowed_maps:\tany\n");
|
||||
else
|
||||
seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
|
||||
|
||||
BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64);
|
||||
mask = (1ULL << __MAX_BPF_PROG_TYPE) - 1;
|
||||
if ((token->allowed_progs & mask) == mask)
|
||||
seq_printf(m, "allowed_progs:\tany\n");
|
||||
else
|
||||
seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs);
|
||||
|
||||
BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64);
|
||||
mask = (1ULL << __MAX_BPF_ATTACH_TYPE) - 1;
|
||||
if ((token->allowed_attachs & mask) == mask)
|
||||
seq_printf(m, "allowed_attachs:\tany\n");
|
||||
else
|
||||
seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs);
|
||||
}
|
||||
|
||||
#define BPF_TOKEN_INODE_NAME "bpf-token"
|
||||
|
||||
static const struct inode_operations bpf_token_iops = { };
|
||||
|
||||
static const struct file_operations bpf_token_fops = {
|
||||
.release = bpf_token_release,
|
||||
.show_fdinfo = bpf_token_show_fdinfo,
|
||||
};
|
||||
|
||||
int bpf_token_create(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_mount_opts *mnt_opts;
|
||||
struct bpf_token *token = NULL;
|
||||
struct user_namespace *userns;
|
||||
struct inode *inode;
|
||||
struct file *file;
|
||||
struct path path;
|
||||
struct fd f;
|
||||
umode_t mode;
|
||||
int err, fd;
|
||||
|
||||
f = fdget(attr->token_create.bpffs_fd);
|
||||
if (!f.file)
|
||||
return -EBADF;
|
||||
|
||||
path = f.file->f_path;
|
||||
path_get(&path);
|
||||
fdput(f);
|
||||
|
||||
if (path.dentry != path.mnt->mnt_sb->s_root) {
|
||||
err = -EINVAL;
|
||||
goto out_path;
|
||||
}
|
||||
if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
|
||||
err = -EINVAL;
|
||||
goto out_path;
|
||||
}
|
||||
err = path_permission(&path, MAY_ACCESS);
|
||||
if (err)
|
||||
goto out_path;
|
||||
|
||||
userns = path.dentry->d_sb->s_user_ns;
|
||||
/*
|
||||
* Enforce that creators of BPF tokens are in the same user
|
||||
* namespace as the BPF FS instance. This makes reasoning about
|
||||
* permissions a lot easier and we can always relax this later.
|
||||
*/
|
||||
if (current_user_ns() != userns) {
|
||||
err = -EPERM;
|
||||
goto out_path;
|
||||
}
|
||||
if (!ns_capable(userns, CAP_BPF)) {
|
||||
err = -EPERM;
|
||||
goto out_path;
|
||||
}
|
||||
|
||||
mnt_opts = path.dentry->d_sb->s_fs_info;
|
||||
if (mnt_opts->delegate_cmds == 0 &&
|
||||
mnt_opts->delegate_maps == 0 &&
|
||||
mnt_opts->delegate_progs == 0 &&
|
||||
mnt_opts->delegate_attachs == 0) {
|
||||
err = -ENOENT; /* no BPF token delegation is set up */
|
||||
goto out_path;
|
||||
}
|
||||
|
||||
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
|
||||
inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto out_path;
|
||||
}
|
||||
|
||||
inode->i_op = &bpf_token_iops;
|
||||
inode->i_fop = &bpf_token_fops;
|
||||
clear_nlink(inode); /* make sure it is unlinked */
|
||||
|
||||
file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
|
||||
if (IS_ERR(file)) {
|
||||
iput(inode);
|
||||
err = PTR_ERR(file);
|
||||
goto out_path;
|
||||
}
|
||||
|
||||
token = kvzalloc(sizeof(*token), GFP_USER);
|
||||
if (!token) {
|
||||
err = -ENOMEM;
|
||||
goto out_file;
|
||||
}
|
||||
|
||||
atomic64_set(&token->refcnt, 1);
|
||||
|
||||
/* remember bpffs owning userns for future ns_capable() checks */
|
||||
token->userns = get_user_ns(userns);
|
||||
|
||||
token->allowed_cmds = mnt_opts->delegate_cmds;
|
||||
token->allowed_maps = mnt_opts->delegate_maps;
|
||||
token->allowed_progs = mnt_opts->delegate_progs;
|
||||
token->allowed_attachs = mnt_opts->delegate_attachs;
|
||||
|
||||
err = security_bpf_token_create(token, attr, &path);
|
||||
if (err)
|
||||
goto out_token;
|
||||
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
err = fd;
|
||||
goto out_token;
|
||||
}
|
||||
|
||||
file->private_data = token;
|
||||
fd_install(fd, file);
|
||||
|
||||
path_put(&path);
|
||||
return fd;
|
||||
|
||||
out_token:
|
||||
bpf_token_free(token);
|
||||
out_file:
|
||||
fput(file);
|
||||
out_path:
|
||||
path_put(&path);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct bpf_token *bpf_token_get_from_fd(u32 ufd)
|
||||
{
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_token *token;
|
||||
|
||||
if (!f.file)
|
||||
return ERR_PTR(-EBADF);
|
||||
if (f.file->f_op != &bpf_token_fops) {
|
||||
fdput(f);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
token = f.file->private_data;
|
||||
bpf_token_inc(token);
|
||||
fdput(f);
|
||||
|
||||
return token;
|
||||
}
|
||||
|
||||
bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
|
||||
{
|
||||
/* BPF token can be used only within exactly the same userns in which
|
||||
* it was created
|
||||
*/
|
||||
if (!token || current_user_ns() != token->userns)
|
||||
return false;
|
||||
if (!(token->allowed_cmds & (1ULL << cmd)))
|
||||
return false;
|
||||
return security_bpf_token_cmd(token, cmd) == 0;
|
||||
}
|
||||
|
||||
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
|
||||
{
|
||||
if (!token || type >= __MAX_BPF_MAP_TYPE)
|
||||
return false;
|
||||
|
||||
return token->allowed_maps & (1ULL << type);
|
||||
}
|
||||
|
||||
bool bpf_token_allow_prog_type(const struct bpf_token *token,
|
||||
enum bpf_prog_type prog_type,
|
||||
enum bpf_attach_type attach_type)
|
||||
{
|
||||
if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE)
|
||||
return false;
|
||||
|
||||
return (token->allowed_progs & (1ULL << prog_type)) &&
|
||||
(token->allowed_attachs & (1ULL << attach_type));
|
||||
}
|
@ -115,10 +115,10 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
|
||||
(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
|
||||
}
|
||||
|
||||
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
|
||||
void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym)
|
||||
{
|
||||
ksym->start = (unsigned long) data;
|
||||
ksym->end = ksym->start + PAGE_SIZE;
|
||||
ksym->end = ksym->start + size;
|
||||
bpf_ksym_add(ksym);
|
||||
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
|
||||
PAGE_SIZE, false, ksym->name);
|
||||
@ -254,8 +254,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
|
||||
static void bpf_tramp_image_free(struct bpf_tramp_image *im)
|
||||
{
|
||||
bpf_image_ksym_del(&im->ksym);
|
||||
bpf_jit_free_exec(im->image);
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
arch_free_bpf_trampoline(im->image, im->size);
|
||||
bpf_jit_uncharge_modmem(im->size);
|
||||
percpu_ref_exit(&im->pcref);
|
||||
kfree_rcu(im, rcu);
|
||||
}
|
||||
@ -349,7 +349,7 @@ static void bpf_tramp_image_put(struct bpf_tramp_image *im)
|
||||
call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
|
||||
}
|
||||
|
||||
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
|
||||
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size)
|
||||
{
|
||||
struct bpf_tramp_image *im;
|
||||
struct bpf_ksym *ksym;
|
||||
@ -360,15 +360,15 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
|
||||
if (!im)
|
||||
goto out;
|
||||
|
||||
err = bpf_jit_charge_modmem(PAGE_SIZE);
|
||||
err = bpf_jit_charge_modmem(size);
|
||||
if (err)
|
||||
goto out_free_im;
|
||||
im->size = size;
|
||||
|
||||
err = -ENOMEM;
|
||||
im->image = image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
im->image = image = arch_alloc_bpf_trampoline(size);
|
||||
if (!image)
|
||||
goto out_uncharge;
|
||||
set_vm_flush_reset_perms(image);
|
||||
|
||||
err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
|
||||
if (err)
|
||||
@ -377,13 +377,13 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
|
||||
ksym = &im->ksym;
|
||||
INIT_LIST_HEAD_RCU(&ksym->lnode);
|
||||
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
|
||||
bpf_image_ksym_add(image, ksym);
|
||||
bpf_image_ksym_add(image, size, ksym);
|
||||
return im;
|
||||
|
||||
out_free_image:
|
||||
bpf_jit_free_exec(im->image);
|
||||
arch_free_bpf_trampoline(im->image, im->size);
|
||||
out_uncharge:
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
out_free_im:
|
||||
kfree(im);
|
||||
out:
|
||||
@ -396,7 +396,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
|
||||
struct bpf_tramp_links *tlinks;
|
||||
u32 orig_flags = tr->flags;
|
||||
bool ip_arg = false;
|
||||
int err, total;
|
||||
int err, total, size;
|
||||
|
||||
tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
|
||||
if (IS_ERR(tlinks))
|
||||
@ -409,12 +409,6 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
|
||||
goto out;
|
||||
}
|
||||
|
||||
im = bpf_tramp_image_alloc(tr->key);
|
||||
if (IS_ERR(im)) {
|
||||
err = PTR_ERR(im);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
|
||||
tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
|
||||
|
||||
@ -438,13 +432,31 @@ again:
|
||||
tr->flags |= BPF_TRAMP_F_ORIG_STACK;
|
||||
#endif
|
||||
|
||||
err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
|
||||
size = arch_bpf_trampoline_size(&tr->func.model, tr->flags,
|
||||
tlinks, tr->func.addr);
|
||||
if (size < 0) {
|
||||
err = size;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (size > PAGE_SIZE) {
|
||||
err = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
im = bpf_tramp_image_alloc(tr->key, size);
|
||||
if (IS_ERR(im)) {
|
||||
err = PTR_ERR(im);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = arch_prepare_bpf_trampoline(im, im->image, im->image + size,
|
||||
&tr->func.model, tr->flags, tlinks,
|
||||
tr->func.addr);
|
||||
if (err < 0)
|
||||
goto out_free;
|
||||
|
||||
set_memory_rox((long)im->image, 1);
|
||||
arch_protect_bpf_trampoline(im->image, im->size);
|
||||
|
||||
WARN_ON(tr->cur_image && total == 0);
|
||||
if (tr->cur_image)
|
||||
@ -464,9 +476,8 @@ again:
|
||||
tr->fops->func = NULL;
|
||||
tr->fops->trampoline = 0;
|
||||
|
||||
/* reset im->image memory attr for arch_prepare_bpf_trampoline */
|
||||
set_memory_nx((long)im->image, 1);
|
||||
set_memory_rw((long)im->image, 1);
|
||||
/* free im memory and reallocate later */
|
||||
bpf_tramp_image_free(im);
|
||||
goto again;
|
||||
}
|
||||
#endif
|
||||
@ -1032,10 +1043,50 @@ bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
|
||||
}
|
||||
|
||||
int __weak
|
||||
arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
|
||||
arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *orig_call)
|
||||
void *func_addr)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
void * __weak arch_alloc_bpf_trampoline(unsigned int size)
|
||||
{
|
||||
void *image;
|
||||
|
||||
if (WARN_ON_ONCE(size > PAGE_SIZE))
|
||||
return NULL;
|
||||
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (image)
|
||||
set_vm_flush_reset_perms(image);
|
||||
return image;
|
||||
}
|
||||
|
||||
void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
WARN_ON_ONCE(size > PAGE_SIZE);
|
||||
/* bpf_jit_free_exec doesn't need "size", but
|
||||
* bpf_prog_pack_free() needs it.
|
||||
*/
|
||||
bpf_jit_free_exec(image);
|
||||
}
|
||||
|
||||
void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
WARN_ON_ONCE(size > PAGE_SIZE);
|
||||
set_memory_rox((long)image, 1);
|
||||
}
|
||||
|
||||
void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
WARN_ON_ONCE(size > PAGE_SIZE);
|
||||
set_memory_nx((long)image, 1);
|
||||
set_memory_rw((long)image, 1);
|
||||
}
|
||||
|
||||
int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks, void *func_addr)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,6 +24,7 @@
|
||||
#include <linux/key.h>
|
||||
#include <linux/verification.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/fileattr.h>
|
||||
|
||||
#include <net/bpf_sk_storage.h>
|
||||
|
||||
@ -41,6 +42,9 @@
|
||||
#define bpf_event_rcu_dereference(p) \
|
||||
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
|
||||
|
||||
#define MAX_UPROBE_MULTI_CNT (1U << 20)
|
||||
#define MAX_KPROBE_MULTI_CNT (1U << 20)
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
struct bpf_trace_module {
|
||||
struct module *module;
|
||||
@ -1431,6 +1435,72 @@ static int __init bpf_key_sig_kfuncs_init(void)
|
||||
late_initcall(bpf_key_sig_kfuncs_init);
|
||||
#endif /* CONFIG_KEYS */
|
||||
|
||||
/* filesystem kfuncs */
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
/**
|
||||
* bpf_get_file_xattr - get xattr of a file
|
||||
* @file: file to get xattr from
|
||||
* @name__str: name of the xattr
|
||||
* @value_ptr: output buffer of the xattr value
|
||||
*
|
||||
* Get xattr *name__str* of *file* and store the output in *value_ptr*.
|
||||
*
|
||||
* For security reasons, only *name__str* with prefix "user." is allowed.
|
||||
*
|
||||
* Return: 0 on success, a negative value on error.
|
||||
*/
|
||||
__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
|
||||
struct bpf_dynptr_kern *value_ptr)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
u32 value_len;
|
||||
void *value;
|
||||
int ret;
|
||||
|
||||
if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
|
||||
return -EPERM;
|
||||
|
||||
value_len = __bpf_dynptr_size(value_ptr);
|
||||
value = __bpf_dynptr_data_rw(value_ptr, value_len);
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
dentry = file_dentry(file);
|
||||
ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
|
||||
if (ret)
|
||||
return ret;
|
||||
return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_SET8_START(fs_kfunc_set_ids)
|
||||
BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
|
||||
BTF_SET8_END(fs_kfunc_set_ids)
|
||||
|
||||
static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
|
||||
{
|
||||
if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
|
||||
return 0;
|
||||
|
||||
/* Only allow to attach from LSM hooks, to avoid recursion */
|
||||
return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
|
||||
}
|
||||
|
||||
static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &fs_kfunc_set_ids,
|
||||
.filter = bpf_get_file_xattr_filter,
|
||||
};
|
||||
|
||||
static int __init bpf_fs_kfuncs_init(void)
|
||||
{
|
||||
return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
|
||||
}
|
||||
|
||||
late_initcall(bpf_fs_kfuncs_init);
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
@ -1559,7 +1629,7 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_trace_vprintk:
|
||||
return bpf_get_trace_vprintk_proto();
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2903,6 +2973,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
|
||||
cnt = attr->link_create.kprobe_multi.cnt;
|
||||
if (!cnt)
|
||||
return -EINVAL;
|
||||
if (cnt > MAX_KPROBE_MULTI_CNT)
|
||||
return -E2BIG;
|
||||
|
||||
size = cnt * sizeof(*addrs);
|
||||
addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
|
||||
@ -3277,6 +3349,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
|
||||
|
||||
if (!upath || !uoffsets || !cnt)
|
||||
return -EINVAL;
|
||||
if (cnt > MAX_UPROBE_MULTI_CNT)
|
||||
return -E2BIG;
|
||||
|
||||
uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
|
||||
ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
|
||||
@ -3317,15 +3391,19 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
|
||||
goto error_free;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
|
||||
if (__get_user(uprobes[i].offset, uoffsets + i)) {
|
||||
err = -EFAULT;
|
||||
goto error_free;
|
||||
}
|
||||
if (uprobes[i].offset < 0) {
|
||||
err = -EINVAL;
|
||||
goto error_free;
|
||||
}
|
||||
if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
|
||||
err = -EFAULT;
|
||||
goto error_free;
|
||||
}
|
||||
if (__get_user(uprobes[i].offset, uoffsets + i)) {
|
||||
if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
|
||||
err = -EFAULT;
|
||||
goto error_free;
|
||||
}
|
||||
|
@ -6277,7 +6277,7 @@ static struct bpf_test tests[] = {
|
||||
},
|
||||
/* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
|
||||
{
|
||||
"ALU64_SMOD_X: -7 % 2 = -1",
|
||||
"ALU64_SMOD_K: -7 % 2 = -1",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -7),
|
||||
BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
|
||||
|
@ -12,6 +12,11 @@ extern struct bpf_struct_ops bpf_bpf_dummy_ops;
|
||||
/* A common type for test_N with return value in bpf_dummy_ops */
|
||||
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
|
||||
|
||||
static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_dummy_ops_test_args {
|
||||
u64 args[MAX_BPF_FUNC_ARGS];
|
||||
struct bpf_dummy_ops_state state;
|
||||
@ -62,7 +67,7 @@ static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
|
||||
|
||||
static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
|
||||
{
|
||||
dummy_ops_test_ret_fn test = (void *)image;
|
||||
dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset();
|
||||
struct bpf_dummy_ops_state *state = NULL;
|
||||
|
||||
/* state needs to be NULL if args[0] is 0 */
|
||||
@ -101,12 +106,11 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
image = arch_alloc_bpf_trampoline(PAGE_SIZE);
|
||||
if (!image) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
set_vm_flush_reset_perms(image);
|
||||
|
||||
link = kzalloc(sizeof(*link), GFP_USER);
|
||||
if (!link) {
|
||||
@ -120,11 +124,12 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
op_idx = prog->expected_attach_type;
|
||||
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
|
||||
&st_ops->func_models[op_idx],
|
||||
&dummy_ops_test_ret_function,
|
||||
image, image + PAGE_SIZE);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
set_memory_rox((long)image, 1);
|
||||
arch_protect_bpf_trampoline(image, PAGE_SIZE);
|
||||
prog_ret = dummy_ops_call_op(image, args);
|
||||
|
||||
err = dummy_ops_copy_args(args);
|
||||
@ -134,7 +139,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
err = -EFAULT;
|
||||
out:
|
||||
kfree(args);
|
||||
bpf_jit_free_exec(image);
|
||||
arch_free_bpf_trampoline(image, PAGE_SIZE);
|
||||
if (link)
|
||||
bpf_link_put(&link->link);
|
||||
kfree(tlinks);
|
||||
@ -220,6 +225,28 @@ static void bpf_dummy_unreg(void *kdata)
|
||||
{
|
||||
}
|
||||
|
||||
static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
|
||||
char a3, unsigned long a4)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
|
||||
.test_1 = bpf_dummy_test_1,
|
||||
.test_2 = bpf_dummy_test_2,
|
||||
.test_sleepable = bpf_dummy_test_sleepable,
|
||||
};
|
||||
|
||||
struct bpf_struct_ops bpf_bpf_dummy_ops = {
|
||||
.verifier_ops = &bpf_dummy_verifier_ops,
|
||||
.init = bpf_dummy_init,
|
||||
@ -228,4 +255,5 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = {
|
||||
.reg = bpf_dummy_reg,
|
||||
.unreg = bpf_dummy_unreg,
|
||||
.name = "bpf_dummy_ops",
|
||||
.cfi_stubs = &__bpf_bpf_dummy_ops,
|
||||
};
|
||||
|
@ -600,10 +600,21 @@ __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
|
||||
refcount_dec(&p->cnt);
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
|
||||
{
|
||||
bpf_kfunc_call_test_release(p);
|
||||
}
|
||||
CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
|
||||
|
||||
__bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
|
||||
{
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
|
||||
{
|
||||
}
|
||||
CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_SET8_START(bpf_test_modify_return_ids)
|
||||
@ -1671,9 +1682,9 @@ static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
|
||||
|
||||
BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
|
||||
BTF_ID(struct, prog_test_ref_kfunc)
|
||||
BTF_ID(func, bpf_kfunc_call_test_release)
|
||||
BTF_ID(func, bpf_kfunc_call_test_release_dtor)
|
||||
BTF_ID(struct, prog_test_member)
|
||||
BTF_ID(func, bpf_kfunc_call_memb_release)
|
||||
BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
|
||||
|
||||
static int __init bpf_prog_test_run_init(void)
|
||||
{
|
||||
|
@ -87,7 +87,7 @@
|
||||
#include "dev.h"
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
bpf_sk_base_func_proto(enum bpf_func_id func_id);
|
||||
bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
||||
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
|
||||
{
|
||||
@ -7862,7 +7862,7 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7955,7 +7955,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return NULL;
|
||||
}
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7974,7 +7974,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_skb_event_output_proto;
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8161,7 +8161,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
#endif
|
||||
#endif
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8220,7 +8220,7 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
#endif
|
||||
#endif
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
|
||||
#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
|
||||
@ -8281,7 +8281,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_tcp_sock_proto;
|
||||
#endif /* CONFIG_INET */
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8323,7 +8323,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_get_cgroup_classid_curr_proto;
|
||||
#endif
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8367,7 +8367,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_skc_lookup_tcp_proto;
|
||||
#endif
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8378,7 +8378,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_skb_load_bytes:
|
||||
return &bpf_flow_dissector_load_bytes_proto;
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8405,7 +8405,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_skb_under_cgroup:
|
||||
return &bpf_skb_under_cgroup_proto;
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8580,7 +8580,7 @@ static bool cg_skb_is_valid_access(int off, int size,
|
||||
return false;
|
||||
case bpf_ctx_range(struct __sk_buff, data):
|
||||
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||
if (!bpf_capable())
|
||||
if (!bpf_token_capable(prog->aux->token, CAP_BPF))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
@ -8592,7 +8592,7 @@ static bool cg_skb_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
|
||||
break;
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
if (!bpf_capable())
|
||||
if (!bpf_token_capable(prog->aux->token, CAP_BPF))
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
@ -11236,7 +11236,7 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -11418,7 +11418,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_sk_release:
|
||||
return &bpf_sk_release_proto;
|
||||
default:
|
||||
return bpf_sk_base_func_proto(func_id);
|
||||
return bpf_sk_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -11752,7 +11752,7 @@ const struct bpf_func_proto bpf_sock_from_file_proto = {
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
bpf_sk_base_func_proto(enum bpf_func_id func_id)
|
||||
bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
const struct bpf_func_proto *func;
|
||||
|
||||
@ -11781,10 +11781,10 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
|
||||
if (!perfmon_capable())
|
||||
if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
|
||||
return NULL;
|
||||
|
||||
return func;
|
||||
|
@ -736,6 +736,39 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag
|
||||
* @ctx: XDP context pointer.
|
||||
* @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID).
|
||||
* @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP)
|
||||
*
|
||||
* In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*,
|
||||
* usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use
|
||||
* custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)**
|
||||
* and should be used as follows:
|
||||
* ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();``
|
||||
*
|
||||
* ``vlan_tci`` contains the remaining 16 bits of a VLAN tag.
|
||||
* Driver is expected to provide those in **host byte order (usually LE)**,
|
||||
* so the bpf program should not perform byte conversion.
|
||||
* According to 802.1Q standard, *VLAN TCI (Tag control information)*
|
||||
* is a bit field that contains:
|
||||
* *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``,
|
||||
* *Drop eligible indicator (DEI)* - 1 bit,
|
||||
* *Priority code point (PCP)* - 3 bits.
|
||||
* For detailed meaning of DEI and PCP, please refer to other sources.
|
||||
*
|
||||
* Return:
|
||||
* * Returns 0 on success or ``-errno`` on error.
|
||||
* * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc
|
||||
* * ``-ENODATA`` : VLAN tag was not stripped or is not available
|
||||
*/
|
||||
__bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
|
||||
__be16 *vlan_proto, u16 *vlan_tci)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_SET8_START(xdp_metadata_kfunc_ids)
|
||||
|
@ -191,7 +191,7 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -271,6 +271,74 @@ static int bpf_tcp_ca_validate(void *kdata)
|
||||
return tcp_validate_congestion_control(kdata);
|
||||
}
|
||||
|
||||
static u32 bpf_tcp_ca_ssthresh(struct sock *sk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
{
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state)
|
||||
{
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
|
||||
{
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags)
|
||||
{
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample)
|
||||
{
|
||||
}
|
||||
|
||||
static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_cong_control(struct sock *sk, const struct rate_sample *rs)
|
||||
{
|
||||
}
|
||||
|
||||
static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __bpf_tcp_ca_init(struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
static void __bpf_tcp_ca_release(struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
|
||||
.ssthresh = bpf_tcp_ca_ssthresh,
|
||||
.cong_avoid = bpf_tcp_ca_cong_avoid,
|
||||
.set_state = bpf_tcp_ca_set_state,
|
||||
.cwnd_event = bpf_tcp_ca_cwnd_event,
|
||||
.in_ack_event = bpf_tcp_ca_in_ack_event,
|
||||
.pkts_acked = bpf_tcp_ca_pkts_acked,
|
||||
.min_tso_segs = bpf_tcp_ca_min_tso_segs,
|
||||
.cong_control = bpf_tcp_ca_cong_control,
|
||||
.undo_cwnd = bpf_tcp_ca_undo_cwnd,
|
||||
.sndbuf_expand = bpf_tcp_ca_sndbuf_expand,
|
||||
|
||||
.init = __bpf_tcp_ca_init,
|
||||
.release = __bpf_tcp_ca_release,
|
||||
};
|
||||
|
||||
struct bpf_struct_ops bpf_tcp_congestion_ops = {
|
||||
.verifier_ops = &bpf_tcp_ca_verifier_ops,
|
||||
.reg = bpf_tcp_ca_reg,
|
||||
@ -281,6 +349,7 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = {
|
||||
.init = bpf_tcp_ca_init,
|
||||
.validate = bpf_tcp_ca_validate,
|
||||
.name = "tcp_congestion_ops",
|
||||
.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
|
||||
};
|
||||
|
||||
static int __init bpf_tcp_ca_kfunc_init(void)
|
||||
|
@ -314,7 +314,7 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
static const struct bpf_func_proto *
|
||||
bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
return bpf_base_func_proto(func_id);
|
||||
return bpf_base_func_proto(func_id, prog);
|
||||
}
|
||||
|
||||
const struct bpf_verifier_ops netfilter_verifier_ops = {
|
||||
|
@ -125,6 +125,18 @@ void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
|
||||
}
|
||||
EXPORT_SYMBOL(xp_set_rxq_info);
|
||||
|
||||
void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < pool->heads_cnt; i++) {
|
||||
struct xdp_buff_xsk *xskb = &pool->heads[i];
|
||||
|
||||
memcpy(xskb->cb + desc->off, desc->src, desc->bytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(xp_fill_cb);
|
||||
|
||||
static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
|
||||
{
|
||||
struct netdev_bpf bpf;
|
||||
|
@ -21,3 +21,4 @@ obj-$(CONFIG_XFRM_USER_COMPAT) += xfrm_compat.o
|
||||
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
|
||||
obj-$(CONFIG_XFRM_INTERFACE) += xfrm_interface.o
|
||||
obj-$(CONFIG_XFRM_ESPINTCP) += espintcp.o
|
||||
obj-$(CONFIG_DEBUG_INFO_BTF) += xfrm_state_bpf.o
|
||||
|
@ -4218,6 +4218,8 @@ void __init xfrm_init(void)
|
||||
#ifdef CONFIG_XFRM_ESPINTCP
|
||||
espintcp_init();
|
||||
#endif
|
||||
|
||||
register_xfrm_state_bpf();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
|
134
net/xfrm/xfrm_state_bpf.c
Normal file
134
net/xfrm/xfrm_state_bpf.c
Normal file
@ -0,0 +1,134 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Unstable XFRM state BPF helpers.
|
||||
*
|
||||
* Note that it is allowed to break compatibility for these functions since the
|
||||
* interface they are exposed through to BPF programs is explicitly unstable.
|
||||
*/
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <net/xdp.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
/* bpf_xfrm_state_opts - Options for XFRM state lookup helpers
|
||||
*
|
||||
* Members:
|
||||
* @error - Out parameter, set for any errors encountered
|
||||
* Values:
|
||||
* -EINVAL - netns_id is less than -1
|
||||
* -EINVAL - opts__sz isn't BPF_XFRM_STATE_OPTS_SZ
|
||||
* -ENONET - No network namespace found for netns_id
|
||||
* -ENOENT - No xfrm_state found
|
||||
* @netns_id - Specify the network namespace for lookup
|
||||
* Values:
|
||||
* BPF_F_CURRENT_NETNS (-1)
|
||||
* Use namespace associated with ctx
|
||||
* [0, S32_MAX]
|
||||
* Network Namespace ID
|
||||
* @mark - XFRM mark to match on
|
||||
* @daddr - Destination address to match on
|
||||
* @spi - Security parameter index to match on
|
||||
* @proto - IP protocol to match on (eg. IPPROTO_ESP)
|
||||
* @family - Protocol family to match on (AF_INET/AF_INET6)
|
||||
*/
|
||||
struct bpf_xfrm_state_opts {
|
||||
s32 error;
|
||||
s32 netns_id;
|
||||
u32 mark;
|
||||
xfrm_address_t daddr;
|
||||
__be32 spi;
|
||||
u8 proto;
|
||||
u16 family;
|
||||
};
|
||||
|
||||
enum {
|
||||
BPF_XFRM_STATE_OPTS_SZ = sizeof(struct bpf_xfrm_state_opts),
|
||||
};
|
||||
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
/* bpf_xdp_get_xfrm_state - Get XFRM state
|
||||
*
|
||||
* A `struct xfrm_state *`, if found, must be released with a corresponding
|
||||
* bpf_xdp_xfrm_state_release.
|
||||
*
|
||||
* Parameters:
|
||||
* @ctx - Pointer to ctx (xdp_md) in XDP program
|
||||
* Cannot be NULL
|
||||
* @opts - Options for lookup (documented above)
|
||||
* Cannot be NULL
|
||||
* @opts__sz - Length of the bpf_xfrm_state_opts structure
|
||||
* Must be BPF_XFRM_STATE_OPTS_SZ
|
||||
*/
|
||||
__bpf_kfunc struct xfrm_state *
|
||||
bpf_xdp_get_xfrm_state(struct xdp_md *ctx, struct bpf_xfrm_state_opts *opts, u32 opts__sz)
|
||||
{
|
||||
struct xdp_buff *xdp = (struct xdp_buff *)ctx;
|
||||
struct net *net = dev_net(xdp->rxq->dev);
|
||||
struct xfrm_state *x;
|
||||
|
||||
if (!opts || opts__sz < sizeof(opts->error))
|
||||
return NULL;
|
||||
|
||||
if (opts__sz != BPF_XFRM_STATE_OPTS_SZ) {
|
||||
opts->error = -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS)) {
|
||||
opts->error = -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (opts->netns_id >= 0) {
|
||||
net = get_net_ns_by_id(net, opts->netns_id);
|
||||
if (unlikely(!net)) {
|
||||
opts->error = -ENONET;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
x = xfrm_state_lookup(net, opts->mark, &opts->daddr, opts->spi,
|
||||
opts->proto, opts->family);
|
||||
|
||||
if (opts->netns_id >= 0)
|
||||
put_net(net);
|
||||
if (!x)
|
||||
opts->error = -ENOENT;
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
/* bpf_xdp_xfrm_state_release - Release acquired xfrm_state object
|
||||
*
|
||||
* This must be invoked for referenced PTR_TO_BTF_ID, and the verifier rejects
|
||||
* the program if any references remain in the program in all of the explored
|
||||
* states.
|
||||
*
|
||||
* Parameters:
|
||||
* @x - Pointer to referenced xfrm_state object, obtained using
|
||||
* bpf_xdp_get_xfrm_state.
|
||||
*/
|
||||
__bpf_kfunc void bpf_xdp_xfrm_state_release(struct xfrm_state *x)
|
||||
{
|
||||
xfrm_state_put(x);
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_SET8_START(xfrm_state_kfunc_set)
|
||||
BTF_ID_FLAGS(func, bpf_xdp_get_xfrm_state, KF_RET_NULL | KF_ACQUIRE)
|
||||
BTF_ID_FLAGS(func, bpf_xdp_xfrm_state_release, KF_RELEASE)
|
||||
BTF_SET8_END(xfrm_state_kfunc_set)
|
||||
|
||||
static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &xfrm_state_kfunc_set,
|
||||
};
|
||||
|
||||
int __init register_xfrm_state_bpf(void)
|
||||
{
|
||||
return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
|
||||
&xfrm_state_xdp_kfunc_set);
|
||||
}
|
@ -5167,29 +5167,87 @@ int security_bpf_prog(struct bpf_prog *prog)
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_map_alloc() - Allocate a bpf map LSM blob
|
||||
* @map: bpf map
|
||||
* security_bpf_map_create() - Check if BPF map creation is allowed
|
||||
* @map: BPF map object
|
||||
* @attr: BPF syscall attributes used to create BPF map
|
||||
* @token: BPF token used to grant user access
|
||||
*
|
||||
* Initialize the security field inside bpf map.
|
||||
* Do a check when the kernel creates a new BPF map. This is also the
|
||||
* point where LSM blob is allocated for LSMs that need them.
|
||||
*
|
||||
* Return: Returns 0 on success, error on failure.
|
||||
*/
|
||||
int security_bpf_map_alloc(struct bpf_map *map)
|
||||
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
{
|
||||
return call_int_hook(bpf_map_alloc_security, 0, map);
|
||||
return call_int_hook(bpf_map_create, 0, map, attr, token);
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_prog_alloc() - Allocate a bpf program LSM blob
|
||||
* @aux: bpf program aux info struct
|
||||
* security_bpf_prog_load() - Check if loading of BPF program is allowed
|
||||
* @prog: BPF program object
|
||||
* @attr: BPF syscall attributes used to create BPF program
|
||||
* @token: BPF token used to grant user access to BPF subsystem
|
||||
*
|
||||
* Initialize the security field inside bpf program.
|
||||
* Perform an access control check when the kernel loads a BPF program and
|
||||
* allocates associated BPF program object. This hook is also responsible for
|
||||
* allocating any required LSM state for the BPF program.
|
||||
*
|
||||
* Return: Returns 0 on success, error on failure.
|
||||
*/
|
||||
int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
|
||||
int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
{
|
||||
return call_int_hook(bpf_prog_alloc_security, 0, aux);
|
||||
return call_int_hook(bpf_prog_load, 0, prog, attr, token);
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_token_create() - Check if creating of BPF token is allowed
|
||||
* @token: BPF token object
|
||||
* @attr: BPF syscall attributes used to create BPF token
|
||||
* @path: path pointing to BPF FS mount point from which BPF token is created
|
||||
*
|
||||
* Do a check when the kernel instantiates a new BPF token object from BPF FS
|
||||
* instance. This is also the point where LSM blob can be allocated for LSMs.
|
||||
*
|
||||
* Return: Returns 0 on success, error on failure.
|
||||
*/
|
||||
int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
|
||||
struct path *path)
|
||||
{
|
||||
return call_int_hook(bpf_token_create, 0, token, attr, path);
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_token_cmd() - Check if BPF token is allowed to delegate
|
||||
* requested BPF syscall command
|
||||
* @token: BPF token object
|
||||
* @cmd: BPF syscall command requested to be delegated by BPF token
|
||||
*
|
||||
* Do a check when the kernel decides whether provided BPF token should allow
|
||||
* delegation of requested BPF syscall command.
|
||||
*
|
||||
* Return: Returns 0 on success, error on failure.
|
||||
*/
|
||||
int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
|
||||
{
|
||||
return call_int_hook(bpf_token_cmd, 0, token, cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_token_capable() - Check if BPF token is allowed to delegate
|
||||
* requested BPF-related capability
|
||||
* @token: BPF token object
|
||||
* @cap: capabilities requested to be delegated by BPF token
|
||||
*
|
||||
* Do a check when the kernel decides whether provided BPF token should allow
|
||||
* delegation of requested BPF-related capabilities.
|
||||
*
|
||||
* Return: Returns 0 on success, error on failure.
|
||||
*/
|
||||
int security_bpf_token_capable(const struct bpf_token *token, int cap)
|
||||
{
|
||||
return call_int_hook(bpf_token_capable, 0, token, cap);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5200,18 +5258,29 @@ int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
|
||||
*/
|
||||
void security_bpf_map_free(struct bpf_map *map)
|
||||
{
|
||||
call_void_hook(bpf_map_free_security, map);
|
||||
call_void_hook(bpf_map_free, map);
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_prog_free() - Free a bpf program's LSM blob
|
||||
* @aux: bpf program aux info struct
|
||||
* security_bpf_prog_free() - Free a BPF program's LSM blob
|
||||
* @prog: BPF program struct
|
||||
*
|
||||
* Clean up the security information stored inside bpf prog.
|
||||
* Clean up the security information stored inside BPF program.
|
||||
*/
|
||||
void security_bpf_prog_free(struct bpf_prog_aux *aux)
|
||||
void security_bpf_prog_free(struct bpf_prog *prog)
|
||||
{
|
||||
call_void_hook(bpf_prog_free_security, aux);
|
||||
call_void_hook(bpf_prog_free, prog);
|
||||
}
|
||||
|
||||
/**
|
||||
* security_bpf_token_free() - Free a BPF token's LSM blob
|
||||
* @token: BPF token struct
|
||||
*
|
||||
* Clean up the security information stored inside BPF token.
|
||||
*/
|
||||
void security_bpf_token_free(struct bpf_token *token)
|
||||
{
|
||||
call_void_hook(bpf_token_free, token);
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
|
@ -6783,7 +6783,8 @@ static int selinux_bpf_prog(struct bpf_prog *prog)
|
||||
BPF__PROG_RUN, NULL);
|
||||
}
|
||||
|
||||
static int selinux_bpf_map_alloc(struct bpf_map *map)
|
||||
static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
{
|
||||
struct bpf_security_struct *bpfsec;
|
||||
|
||||
@ -6805,7 +6806,8 @@ static void selinux_bpf_map_free(struct bpf_map *map)
|
||||
kfree(bpfsec);
|
||||
}
|
||||
|
||||
static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
|
||||
static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
|
||||
struct bpf_token *token)
|
||||
{
|
||||
struct bpf_security_struct *bpfsec;
|
||||
|
||||
@ -6814,16 +6816,39 @@ static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
|
||||
return -ENOMEM;
|
||||
|
||||
bpfsec->sid = current_sid();
|
||||
aux->security = bpfsec;
|
||||
prog->aux->security = bpfsec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
|
||||
static void selinux_bpf_prog_free(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_security_struct *bpfsec = aux->security;
|
||||
struct bpf_security_struct *bpfsec = prog->aux->security;
|
||||
|
||||
aux->security = NULL;
|
||||
prog->aux->security = NULL;
|
||||
kfree(bpfsec);
|
||||
}
|
||||
|
||||
static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
|
||||
struct path *path)
|
||||
{
|
||||
struct bpf_security_struct *bpfsec;
|
||||
|
||||
bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
|
||||
if (!bpfsec)
|
||||
return -ENOMEM;
|
||||
|
||||
bpfsec->sid = current_sid();
|
||||
token->security = bpfsec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void selinux_bpf_token_free(struct bpf_token *token)
|
||||
{
|
||||
struct bpf_security_struct *bpfsec = token->security;
|
||||
|
||||
token->security = NULL;
|
||||
kfree(bpfsec);
|
||||
}
|
||||
#endif
|
||||
@ -7179,8 +7204,9 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
|
||||
LSM_HOOK_INIT(bpf, selinux_bpf),
|
||||
LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
|
||||
LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
|
||||
LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
|
||||
LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
|
||||
LSM_HOOK_INIT(bpf_map_free, selinux_bpf_map_free),
|
||||
LSM_HOOK_INIT(bpf_prog_free, selinux_bpf_prog_free),
|
||||
LSM_HOOK_INIT(bpf_token_free, selinux_bpf_token_free),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
@ -7237,8 +7263,9 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
|
||||
LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
|
||||
#endif
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
|
||||
LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
|
||||
LSM_HOOK_INIT(bpf_map_create, selinux_bpf_map_create),
|
||||
LSM_HOOK_INIT(bpf_prog_load, selinux_bpf_prog_load),
|
||||
LSM_HOOK_INIT(bpf_token_create, selinux_bpf_token_create),
|
||||
#endif
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
|
||||
|
@ -847,6 +847,36 @@ union bpf_iter_link_info {
|
||||
* Returns zero on success. On error, -1 is returned and *errno*
|
||||
* is set appropriately.
|
||||
*
|
||||
* BPF_TOKEN_CREATE
|
||||
* Description
|
||||
* Create BPF token with embedded information about what
|
||||
* BPF-related functionality it allows:
|
||||
* - a set of allowed bpf() syscall commands;
|
||||
* - a set of allowed BPF map types to be created with
|
||||
* BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
|
||||
* - a set of allowed BPF program types and BPF program attach
|
||||
* types to be loaded with BPF_PROG_LOAD command, if
|
||||
* BPF_PROG_LOAD itself is allowed.
|
||||
*
|
||||
* BPF token is created (derived) from an instance of BPF FS,
|
||||
* assuming it has necessary delegation mount options specified.
|
||||
* This BPF token can be passed as an extra parameter to various
|
||||
* bpf() syscall commands to grant BPF subsystem functionality to
|
||||
* unprivileged processes.
|
||||
*
|
||||
* When created, BPF token is "associated" with the owning
|
||||
* user namespace of BPF FS instance (super block) that it was
|
||||
* derived from, and subsequent BPF operations performed with
|
||||
* BPF token would be performing capabilities checks (i.e.,
|
||||
* CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
|
||||
* that user namespace. Without BPF token, such capabilities
|
||||
* have to be granted in init user namespace, making bpf()
|
||||
* syscall incompatible with user namespace, for the most part.
|
||||
*
|
||||
* Return
|
||||
* A new file descriptor (a nonnegative integer), or -1 if an
|
||||
* error occurred (in which case, *errno* is set appropriately).
|
||||
*
|
||||
* NOTES
|
||||
* eBPF objects (maps and programs) can be shared between processes.
|
||||
*
|
||||
@ -901,6 +931,8 @@ enum bpf_cmd {
|
||||
BPF_ITER_CREATE,
|
||||
BPF_LINK_DETACH,
|
||||
BPF_PROG_BIND_MAP,
|
||||
BPF_TOKEN_CREATE,
|
||||
__MAX_BPF_CMD,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
@ -951,6 +983,7 @@ enum bpf_map_type {
|
||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
BPF_MAP_TYPE_USER_RINGBUF,
|
||||
BPF_MAP_TYPE_CGRP_STORAGE,
|
||||
__MAX_BPF_MAP_TYPE
|
||||
};
|
||||
|
||||
/* Note that tracing related programs such as
|
||||
@ -995,6 +1028,7 @@ enum bpf_prog_type {
|
||||
BPF_PROG_TYPE_SK_LOOKUP,
|
||||
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
|
||||
BPF_PROG_TYPE_NETFILTER,
|
||||
__MAX_BPF_PROG_TYPE
|
||||
};
|
||||
|
||||
enum bpf_attach_type {
|
||||
@ -1074,9 +1108,11 @@ enum bpf_link_type {
|
||||
BPF_LINK_TYPE_TCX = 11,
|
||||
BPF_LINK_TYPE_UPROBE_MULTI = 12,
|
||||
BPF_LINK_TYPE_NETKIT = 13,
|
||||
MAX_BPF_LINK_TYPE,
|
||||
__MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
|
||||
#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
|
||||
|
||||
enum bpf_perf_event_type {
|
||||
BPF_PERF_EVENT_UNSPEC = 0,
|
||||
BPF_PERF_EVENT_UPROBE = 1,
|
||||
@ -1401,6 +1437,7 @@ union bpf_attr {
|
||||
* to using 5 hash functions).
|
||||
*/
|
||||
__u64 map_extra;
|
||||
__u32 map_token_fd;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||
@ -1470,6 +1507,7 @@ union bpf_attr {
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
__u32 prog_token_fd;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1582,6 +1620,7 @@ union bpf_attr {
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
__u32 btf_token_fd;
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -1712,6 +1751,11 @@ union bpf_attr {
|
||||
__u32 flags; /* extra flags */
|
||||
} prog_bind_map;
|
||||
|
||||
struct { /* struct used by BPF_TOKEN_CREATE command */
|
||||
__u32 flags;
|
||||
__u32 bpffs_fd;
|
||||
} token_create;
|
||||
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* The description below is an attempt at providing documentation to eBPF
|
||||
|
@ -44,10 +44,13 @@ enum netdev_xdp_act {
|
||||
* timestamp via bpf_xdp_metadata_rx_timestamp().
|
||||
* @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
|
||||
* hash via bpf_xdp_metadata_rx_hash().
|
||||
* @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive
|
||||
* packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
|
||||
*/
|
||||
enum netdev_xdp_rx_metadata {
|
||||
NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
|
||||
NETDEV_XDP_RX_METADATA_HASH = 2,
|
||||
NETDEV_XDP_RX_METADATA_VLAN_TAG = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1,4 +1,4 @@
|
||||
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
|
||||
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
|
||||
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
|
||||
usdt.o zip.o elf.o
|
||||
usdt.o zip.o elf.o features.o
|
||||
|
@ -103,7 +103,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
|
||||
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
|
||||
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
|
||||
*/
|
||||
int probe_memcg_account(void)
|
||||
int probe_memcg_account(int token_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
@ -120,6 +120,7 @@ int probe_memcg_account(void)
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = insn_cnt;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
attr.prog_token_fd = token_fd;
|
||||
|
||||
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
|
||||
if (prog_fd >= 0) {
|
||||
@ -146,7 +147,7 @@ int bump_rlimit_memlock(void)
|
||||
struct rlimit rlim;
|
||||
|
||||
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
|
||||
if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
|
||||
return 0;
|
||||
|
||||
memlock_bumped = true;
|
||||
@ -169,7 +170,7 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
__u32 max_entries,
|
||||
const struct bpf_map_create_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
@ -181,7 +182,7 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
attr.map_type = map_type;
|
||||
if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
|
||||
if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
@ -198,6 +199,8 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
attr.numa_node = OPTS_GET(opts, numa_node, 0);
|
||||
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
|
||||
|
||||
attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
|
||||
|
||||
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
@ -232,7 +235,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||
@ -261,8 +264,9 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
|
||||
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
|
||||
attr.kern_version = OPTS_GET(opts, kern_version, 0);
|
||||
attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
|
||||
|
||||
if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
|
||||
if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
attr.license = ptr_to_u64(license);
|
||||
|
||||
@ -1182,7 +1186,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
|
||||
union bpf_attr attr;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
@ -1207,6 +1211,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts
|
||||
|
||||
attr.btf = ptr_to_u64(btf_data);
|
||||
attr.btf_size = btf_size;
|
||||
attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
|
||||
|
||||
/* log_level == 0 and log_buf != NULL means "try loading without
|
||||
* log_buf, but retry with log_buf and log_level=1 on error", which is
|
||||
* consistent across low-level and high-level BTF and program loading
|
||||
@ -1287,3 +1293,20 @@ int bpf_prog_bind_map(int prog_fd, int map_fd,
|
||||
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, token_create);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_token_create_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.token_create.bpffs_fd = bpffs_fd;
|
||||
attr.token_create.flags = OPTS_GET(opts, flags, 0);
|
||||
|
||||
fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
@ -51,8 +51,11 @@ struct bpf_map_create_opts {
|
||||
|
||||
__u32 numa_node;
|
||||
__u32 map_ifindex;
|
||||
|
||||
__u32 token_fd;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_map_create_opts__last_field map_ifindex
|
||||
#define bpf_map_create_opts__last_field token_fd
|
||||
|
||||
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
|
||||
const char *map_name,
|
||||
@ -102,9 +105,10 @@ struct bpf_prog_load_opts {
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
__u32 token_fd;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_prog_load_opts__last_field log_true_size
|
||||
#define bpf_prog_load_opts__last_field token_fd
|
||||
|
||||
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
@ -130,9 +134,10 @@ struct bpf_btf_load_opts {
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
__u32 token_fd;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_btf_load_opts__last_field log_true_size
|
||||
#define bpf_btf_load_opts__last_field token_fd
|
||||
|
||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
struct bpf_btf_load_opts *opts);
|
||||
@ -640,6 +645,30 @@ struct bpf_test_run_opts {
|
||||
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
|
||||
struct bpf_test_run_opts *opts);
|
||||
|
||||
struct bpf_token_create_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 flags;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_token_create_opts__last_field flags
|
||||
|
||||
/**
|
||||
* @brief **bpf_token_create()** creates a new instance of BPF token derived
|
||||
* from specified BPF FS mount point.
|
||||
*
|
||||
* BPF token created with this API can be passed to bpf() syscall for
|
||||
* commands like BPF_PROG_LOAD, BPF_MAP_CREATE, etc.
|
||||
*
|
||||
* @param bpffs_fd FD for BPF FS instance from which to derive a BPF token
|
||||
* instance.
|
||||
* @param opts optional BPF token creation options, can be NULL
|
||||
*
|
||||
* @return BPF token FD > 0, on success; negative error code, otherwise (errno
|
||||
* is also set to the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_token_create(int bpffs_fd,
|
||||
struct bpf_token_create_opts *opts);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
@ -111,6 +111,38 @@ enum bpf_enum_value_kind {
|
||||
val; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Write to a bitfield, identified by s->field.
|
||||
* This is the inverse of BPF_CORE_WRITE_BITFIELD().
|
||||
*/
|
||||
#define BPF_CORE_WRITE_BITFIELD(s, field, new_val) ({ \
|
||||
void *p = (void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
|
||||
unsigned int byte_size = __CORE_RELO(s, field, BYTE_SIZE); \
|
||||
unsigned int lshift = __CORE_RELO(s, field, LSHIFT_U64); \
|
||||
unsigned int rshift = __CORE_RELO(s, field, RSHIFT_U64); \
|
||||
unsigned long long mask, val, nval = new_val; \
|
||||
unsigned int rpad = rshift - lshift; \
|
||||
\
|
||||
asm volatile("" : "+r"(p)); \
|
||||
\
|
||||
switch (byte_size) { \
|
||||
case 1: val = *(unsigned char *)p; break; \
|
||||
case 2: val = *(unsigned short *)p; break; \
|
||||
case 4: val = *(unsigned int *)p; break; \
|
||||
case 8: val = *(unsigned long long *)p; break; \
|
||||
} \
|
||||
\
|
||||
mask = (~0ULL << rshift) >> lshift; \
|
||||
val = (val & ~mask) | ((nval << rpad) & mask); \
|
||||
\
|
||||
switch (byte_size) { \
|
||||
case 1: *(unsigned char *)p = val; break; \
|
||||
case 2: *(unsigned short *)p = val; break; \
|
||||
case 4: *(unsigned int *)p = val; break; \
|
||||
case 8: *(unsigned long long *)p = val; break; \
|
||||
} \
|
||||
})
|
||||
|
||||
#define ___bpf_field_ref1(field) (field)
|
||||
#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
|
||||
#define ___bpf_field_ref(args...) \
|
||||
|
@ -1317,7 +1317,9 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
|
||||
|
||||
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
|
||||
|
||||
int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
|
||||
int btf_load_into_kernel(struct btf *btf,
|
||||
char *log_buf, size_t log_sz, __u32 log_level,
|
||||
int token_fd)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts);
|
||||
__u32 buf_sz = 0, raw_size;
|
||||
@ -1367,6 +1369,7 @@ retry_load:
|
||||
opts.log_level = log_level;
|
||||
}
|
||||
|
||||
opts.token_fd = token_fd;
|
||||
btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
|
||||
if (btf->fd < 0) {
|
||||
/* time to turn on verbose mode and try again */
|
||||
@ -1394,7 +1397,7 @@ done:
|
||||
|
||||
int btf__load_into_kernel(struct btf *btf)
|
||||
{
|
||||
return btf_load_into_kernel(btf, NULL, 0, 0);
|
||||
return btf_load_into_kernel(btf, NULL, 0, 0, 0);
|
||||
}
|
||||
|
||||
int btf__fd(const struct btf *btf)
|
||||
|
@ -11,8 +11,6 @@
|
||||
#include "libbpf_internal.h"
|
||||
#include "str_error.h"
|
||||
|
||||
#define STRERR_BUFSIZE 128
|
||||
|
||||
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
|
||||
* the symbol is hidden and can only be seen when referenced using an
|
||||
* explicit version number. This is a GNU extension.
|
||||
|
478
tools/lib/bpf/features.c
Normal file
478
tools/lib/bpf/features.c
Normal file
@ -0,0 +1,478 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/filter.h>
|
||||
#include "bpf.h"
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_common.h"
|
||||
#include "libbpf_internal.h"
|
||||
#include "str_error.h"
|
||||
|
||||
static inline __u64 ptr_to_u64(const void *ptr)
|
||||
{
|
||||
return (__u64)(unsigned long)ptr;
|
||||
}
|
||||
|
||||
static int probe_fd(int fd)
|
||||
{
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
return fd >= 0;
|
||||
}
|
||||
|
||||
static int probe_kern_prog_name(int token_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
|
||||
attr.prog_token_fd = token_fd;
|
||||
libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
|
||||
|
||||
/* make sure loading with name works */
|
||||
ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_global_data(int token_fd)
|
||||
{
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
LIBBPF_OPTS(bpf_map_create_opts, map_opts, .token_fd = token_fd);
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, .token_fd = token_fd);
|
||||
int ret, map, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
|
||||
if (map < 0) {
|
||||
ret = -errno;
|
||||
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
|
||||
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
|
||||
__func__, cp, -ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
insns[0].imm = map;
|
||||
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
|
||||
close(map);
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_btf(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0int";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_func(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0int\0x\0a";
|
||||
/* void x(int a) {} */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* FUNC_PROTO */ /* [2] */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
||||
BTF_PARAM_ENC(7, 1),
|
||||
/* FUNC x */ /* [3] */
|
||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_func_global(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0int\0x\0a";
|
||||
/* static void x(int a) {} */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* FUNC_PROTO */ /* [2] */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
||||
BTF_PARAM_ENC(7, 1),
|
||||
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
|
||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_datasec(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0x\0.data";
|
||||
/* static int a; */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* DATASEC val */ /* [3] */
|
||||
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
||||
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_float(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0float";
|
||||
__u32 types[] = {
|
||||
/* float */
|
||||
BTF_TYPE_FLOAT_ENC(1, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_decl_tag(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0tag";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* attr */
|
||||
BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_type_tag(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0tag";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* attr */
|
||||
BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
|
||||
/* ptr */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_array_mmap(int token_fd)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts,
|
||||
.map_flags = BPF_F_MMAPABLE,
|
||||
.token_fd = token_fd,
|
||||
);
|
||||
int fd;
|
||||
|
||||
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
|
||||
return probe_fd(fd);
|
||||
}
|
||||
|
||||
static int probe_kern_exp_attach_type(int token_fd)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
|
||||
.token_fd = token_fd,
|
||||
);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int fd, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
/* use any valid combination of program type and (optional)
|
||||
* non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
|
||||
* to see if kernel supports expected_attach_type field for
|
||||
* BPF_PROG_LOAD command
|
||||
*/
|
||||
fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
|
||||
return probe_fd(fd);
|
||||
}
|
||||
|
||||
static int probe_kern_probe_read_kernel(int token_fd)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int fd, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
|
||||
return probe_fd(fd);
|
||||
}
|
||||
|
||||
static int probe_prog_bind_map(int token_fd)
|
||||
{
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
LIBBPF_OPTS(bpf_map_create_opts, map_opts, .token_fd = token_fd);
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, .token_fd = token_fd);
|
||||
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
|
||||
if (map < 0) {
|
||||
ret = -errno;
|
||||
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
|
||||
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
|
||||
__func__, cp, -ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
|
||||
if (prog < 0) {
|
||||
close(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = bpf_prog_bind_map(prog, map, NULL);
|
||||
|
||||
close(map);
|
||||
close(prog);
|
||||
|
||||
return ret >= 0;
|
||||
}
|
||||
|
||||
static int probe_module_btf(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0int";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
||||
};
|
||||
struct bpf_btf_info info;
|
||||
__u32 len = sizeof(info);
|
||||
char name[16];
|
||||
int fd, err;
|
||||
|
||||
fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
|
||||
if (fd < 0)
|
||||
return 0; /* BTF not supported at all */
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.name = ptr_to_u64(name);
|
||||
info.name_len = sizeof(name);
|
||||
|
||||
/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
|
||||
* kernel's module BTF support coincides with support for
|
||||
* name/name_len fields in struct bpf_btf_info.
|
||||
*/
|
||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
||||
close(fd);
|
||||
return !err;
|
||||
}
|
||||
|
||||
static int probe_perf_link(int token_fd)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd);
|
||||
int prog_fd, link_fd, err;
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
|
||||
insns, ARRAY_SIZE(insns), &opts);
|
||||
if (prog_fd < 0)
|
||||
return -errno;
|
||||
|
||||
/* use invalid perf_event FD to get EBADF, if link is supported;
|
||||
* otherwise EINVAL should be returned
|
||||
*/
|
||||
link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
|
||||
err = -errno; /* close() can clobber errno */
|
||||
|
||||
if (link_fd >= 0)
|
||||
close(link_fd);
|
||||
close(prog_fd);
|
||||
|
||||
return link_fd < 0 && err == -EBADF;
|
||||
}
|
||||
|
||||
static int probe_uprobe_multi_link(int token_fd)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
|
||||
.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
|
||||
.token_fd = token_fd,
|
||||
);
|
||||
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int prog_fd, link_fd, err;
|
||||
unsigned long offset = 0;
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
|
||||
insns, ARRAY_SIZE(insns), &load_opts);
|
||||
if (prog_fd < 0)
|
||||
return -errno;
|
||||
|
||||
/* Creating uprobe in '/' binary should fail with -EBADF. */
|
||||
link_opts.uprobe_multi.path = "/";
|
||||
link_opts.uprobe_multi.offsets = &offset;
|
||||
link_opts.uprobe_multi.cnt = 1;
|
||||
|
||||
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
|
||||
err = -errno; /* close() can clobber errno */
|
||||
|
||||
if (link_fd >= 0)
|
||||
close(link_fd);
|
||||
close(prog_fd);
|
||||
|
||||
return link_fd < 0 && err == -EBADF;
|
||||
}
|
||||
|
||||
static int probe_kern_bpf_cookie(int token_fd)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd);
|
||||
int ret, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_btf_enum64(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0enum64";
|
||||
__u32 types[] = {
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
typedef int (*feature_probe_fn)(int /* token_fd */);
|
||||
|
||||
static struct kern_feature_cache feature_cache;
|
||||
|
||||
static struct kern_feature_desc {
|
||||
const char *desc;
|
||||
feature_probe_fn probe;
|
||||
} feature_probes[__FEAT_CNT] = {
|
||||
[FEAT_PROG_NAME] = {
|
||||
"BPF program name", probe_kern_prog_name,
|
||||
},
|
||||
[FEAT_GLOBAL_DATA] = {
|
||||
"global variables", probe_kern_global_data,
|
||||
},
|
||||
[FEAT_BTF] = {
|
||||
"minimal BTF", probe_kern_btf,
|
||||
},
|
||||
[FEAT_BTF_FUNC] = {
|
||||
"BTF functions", probe_kern_btf_func,
|
||||
},
|
||||
[FEAT_BTF_GLOBAL_FUNC] = {
|
||||
"BTF global function", probe_kern_btf_func_global,
|
||||
},
|
||||
[FEAT_BTF_DATASEC] = {
|
||||
"BTF data section and variable", probe_kern_btf_datasec,
|
||||
},
|
||||
[FEAT_ARRAY_MMAP] = {
|
||||
"ARRAY map mmap()", probe_kern_array_mmap,
|
||||
},
|
||||
[FEAT_EXP_ATTACH_TYPE] = {
|
||||
"BPF_PROG_LOAD expected_attach_type attribute",
|
||||
probe_kern_exp_attach_type,
|
||||
},
|
||||
[FEAT_PROBE_READ_KERN] = {
|
||||
"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
|
||||
},
|
||||
[FEAT_PROG_BIND_MAP] = {
|
||||
"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
|
||||
},
|
||||
[FEAT_MODULE_BTF] = {
|
||||
"module BTF support", probe_module_btf,
|
||||
},
|
||||
[FEAT_BTF_FLOAT] = {
|
||||
"BTF_KIND_FLOAT support", probe_kern_btf_float,
|
||||
},
|
||||
[FEAT_PERF_LINK] = {
|
||||
"BPF perf link support", probe_perf_link,
|
||||
},
|
||||
[FEAT_BTF_DECL_TAG] = {
|
||||
"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
|
||||
},
|
||||
[FEAT_BTF_TYPE_TAG] = {
|
||||
"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
|
||||
},
|
||||
[FEAT_MEMCG_ACCOUNT] = {
|
||||
"memcg-based memory accounting", probe_memcg_account,
|
||||
},
|
||||
[FEAT_BPF_COOKIE] = {
|
||||
"BPF cookie support", probe_kern_bpf_cookie,
|
||||
},
|
||||
[FEAT_BTF_ENUM64] = {
|
||||
"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
|
||||
},
|
||||
[FEAT_SYSCALL_WRAPPER] = {
|
||||
"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
|
||||
},
|
||||
[FEAT_UPROBE_MULTI_LINK] = {
|
||||
"BPF multi-uprobe link support", probe_uprobe_multi_link,
|
||||
},
|
||||
};
|
||||
|
||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
|
||||
{
|
||||
struct kern_feature_desc *feat = &feature_probes[feat_id];
|
||||
int ret;
|
||||
|
||||
/* assume global feature cache, unless custom one is provided */
|
||||
if (!cache)
|
||||
cache = &feature_cache;
|
||||
|
||||
if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
|
||||
ret = feat->probe(cache->token_fd);
|
||||
if (ret > 0) {
|
||||
WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
|
||||
} else if (ret == 0) {
|
||||
WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
|
||||
} else {
|
||||
pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
|
||||
WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
|
||||
}
|
||||
}
|
||||
|
||||
return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
|
||||
}
|
@ -59,6 +59,8 @@
|
||||
#define BPF_FS_MAGIC 0xcafe4a11
|
||||
#endif
|
||||
|
||||
#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
|
||||
|
||||
#define BPF_INSN_SZ (sizeof(struct bpf_insn))
|
||||
|
||||
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
|
||||
@ -693,6 +695,10 @@ struct bpf_object {
|
||||
|
||||
struct usdt_manager *usdt_man;
|
||||
|
||||
struct kern_feature_cache *feat_cache;
|
||||
char *token_path;
|
||||
int token_fd;
|
||||
|
||||
char path[];
|
||||
};
|
||||
|
||||
@ -2192,7 +2198,7 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
|
||||
int err;
|
||||
|
||||
if (!path)
|
||||
path = "/sys/fs/bpf";
|
||||
path = BPF_FS_DEFAULT_PATH;
|
||||
|
||||
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
|
||||
if (err)
|
||||
@ -3054,9 +3060,15 @@ static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool map_needs_vmlinux_btf(struct bpf_map *map)
|
||||
{
|
||||
return bpf_map__is_struct_ops(map);
|
||||
}
|
||||
|
||||
static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_program *prog;
|
||||
struct bpf_map *map;
|
||||
int i;
|
||||
|
||||
/* CO-RE relocations need kernel BTF, only when btf_custom_path
|
||||
@ -3081,6 +3093,11 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
|
||||
return true;
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
if (map_needs_vmlinux_btf(map))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3268,7 +3285,7 @@ skip_exception_cb:
|
||||
} else {
|
||||
/* currently BPF_BTF_LOAD only supports log_level 1 */
|
||||
err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
|
||||
obj->log_level ? 1 : 0);
|
||||
obj->log_level ? 1 : 0, obj->token_fd);
|
||||
}
|
||||
if (sanitize) {
|
||||
if (!err) {
|
||||
@ -4591,6 +4608,63 @@ int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object_prepare_token(struct bpf_object *obj)
|
||||
{
|
||||
const char *bpffs_path;
|
||||
int bpffs_fd = -1, token_fd, err;
|
||||
bool mandatory;
|
||||
enum libbpf_print_level level;
|
||||
|
||||
/* token is already set up */
|
||||
if (obj->token_fd > 0)
|
||||
return 0;
|
||||
/* token is explicitly prevented */
|
||||
if (obj->token_fd < 0) {
|
||||
pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
|
||||
/* reset to zero to avoid extra checks during map_create and prog_load steps */
|
||||
obj->token_fd = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mandatory = obj->token_path != NULL;
|
||||
level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
|
||||
|
||||
bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
|
||||
bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
|
||||
if (bpffs_fd < 0) {
|
||||
err = -errno;
|
||||
__pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
|
||||
obj->name, err, bpffs_path,
|
||||
mandatory ? "" : ", skipping optional step...");
|
||||
return mandatory ? err : 0;
|
||||
}
|
||||
|
||||
token_fd = bpf_token_create(bpffs_fd, 0);
|
||||
close(bpffs_fd);
|
||||
if (token_fd < 0) {
|
||||
if (!mandatory && token_fd == -ENOENT) {
|
||||
pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
|
||||
obj->name, bpffs_path);
|
||||
return 0;
|
||||
}
|
||||
__pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
|
||||
obj->name, token_fd, bpffs_path,
|
||||
mandatory ? "" : ", skipping optional step...");
|
||||
return mandatory ? token_fd : 0;
|
||||
}
|
||||
|
||||
obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
|
||||
if (!obj->feat_cache) {
|
||||
close(token_fd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
obj->token_fd = token_fd;
|
||||
obj->feat_cache->token_fd = token_fd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_object__probe_loading(struct bpf_object *obj)
|
||||
{
|
||||
@ -4600,6 +4674,7 @@ bpf_object__probe_loading(struct bpf_object *obj)
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int ret, insn_cnt = ARRAY_SIZE(insns);
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = obj->token_fd);
|
||||
|
||||
if (obj->gen_loader)
|
||||
return 0;
|
||||
@ -4609,9 +4684,9 @@ bpf_object__probe_loading(struct bpf_object *obj)
|
||||
pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
|
||||
|
||||
/* make sure basic loading works */
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
|
||||
if (ret < 0)
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
|
||||
if (ret < 0) {
|
||||
ret = errno;
|
||||
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
|
||||
@ -4626,462 +4701,18 @@ bpf_object__probe_loading(struct bpf_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int probe_fd(int fd)
|
||||
{
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
return fd >= 0;
|
||||
}
|
||||
|
||||
static int probe_kern_prog_name(void)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
|
||||
libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
|
||||
|
||||
/* make sure loading with name works */
|
||||
ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_global_data(void)
|
||||
{
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int ret, map, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
|
||||
if (map < 0) {
|
||||
ret = -errno;
|
||||
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
|
||||
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
|
||||
__func__, cp, -ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
insns[0].imm = map;
|
||||
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
close(map);
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_btf(void)
|
||||
{
|
||||
static const char strs[] = "\0int";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_func(void)
|
||||
{
|
||||
static const char strs[] = "\0int\0x\0a";
|
||||
/* void x(int a) {} */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* FUNC_PROTO */ /* [2] */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
||||
BTF_PARAM_ENC(7, 1),
|
||||
/* FUNC x */ /* [3] */
|
||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_func_global(void)
|
||||
{
|
||||
static const char strs[] = "\0int\0x\0a";
|
||||
/* static void x(int a) {} */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* FUNC_PROTO */ /* [2] */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
||||
BTF_PARAM_ENC(7, 1),
|
||||
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
|
||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_datasec(void)
|
||||
{
|
||||
static const char strs[] = "\0x\0.data";
|
||||
/* static int a; */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* DATASEC val */ /* [3] */
|
||||
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
||||
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_float(void)
|
||||
{
|
||||
static const char strs[] = "\0float";
|
||||
__u32 types[] = {
|
||||
/* float */
|
||||
BTF_TYPE_FLOAT_ENC(1, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_decl_tag(void)
|
||||
{
|
||||
static const char strs[] = "\0tag";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* attr */
|
||||
BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_type_tag(void)
|
||||
{
|
||||
static const char strs[] = "\0tag";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* attr */
|
||||
BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
|
||||
/* ptr */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_array_mmap(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
|
||||
int fd;
|
||||
|
||||
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
|
||||
return probe_fd(fd);
|
||||
}
|
||||
|
||||
static int probe_kern_exp_attach_type(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int fd, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
/* use any valid combination of program type and (optional)
|
||||
* non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
|
||||
* to see if kernel supports expected_attach_type field for
|
||||
* BPF_PROG_LOAD command
|
||||
*/
|
||||
fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
|
||||
return probe_fd(fd);
|
||||
}
|
||||
|
||||
static int probe_kern_probe_read_kernel(void)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int fd, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
return probe_fd(fd);
|
||||
}
|
||||
|
||||
static int probe_prog_bind_map(void)
|
||||
{
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
|
||||
if (map < 0) {
|
||||
ret = -errno;
|
||||
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
|
||||
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
|
||||
__func__, cp, -ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
if (prog < 0) {
|
||||
close(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = bpf_prog_bind_map(prog, map, NULL);
|
||||
|
||||
close(map);
|
||||
close(prog);
|
||||
|
||||
return ret >= 0;
|
||||
}
|
||||
|
||||
static int probe_module_btf(void)
|
||||
{
|
||||
static const char strs[] = "\0int";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
||||
};
|
||||
struct bpf_btf_info info;
|
||||
__u32 len = sizeof(info);
|
||||
char name[16];
|
||||
int fd, err;
|
||||
|
||||
fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
|
||||
if (fd < 0)
|
||||
return 0; /* BTF not supported at all */
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.name = ptr_to_u64(name);
|
||||
info.name_len = sizeof(name);
|
||||
|
||||
/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
|
||||
* kernel's module BTF support coincides with support for
|
||||
* name/name_len fields in struct bpf_btf_info.
|
||||
*/
|
||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
||||
close(fd);
|
||||
return !err;
|
||||
}
|
||||
|
||||
static int probe_perf_link(void)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int prog_fd, link_fd, err;
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
|
||||
insns, ARRAY_SIZE(insns), NULL);
|
||||
if (prog_fd < 0)
|
||||
return -errno;
|
||||
|
||||
/* use invalid perf_event FD to get EBADF, if link is supported;
|
||||
* otherwise EINVAL should be returned
|
||||
*/
|
||||
link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
|
||||
err = -errno; /* close() can clobber errno */
|
||||
|
||||
if (link_fd >= 0)
|
||||
close(link_fd);
|
||||
close(prog_fd);
|
||||
|
||||
return link_fd < 0 && err == -EBADF;
|
||||
}
|
||||
|
||||
static int probe_uprobe_multi_link(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
|
||||
.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
|
||||
);
|
||||
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int prog_fd, link_fd, err;
|
||||
unsigned long offset = 0;
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
|
||||
insns, ARRAY_SIZE(insns), &load_opts);
|
||||
if (prog_fd < 0)
|
||||
return -errno;
|
||||
|
||||
/* Creating uprobe in '/' binary should fail with -EBADF. */
|
||||
link_opts.uprobe_multi.path = "/";
|
||||
link_opts.uprobe_multi.offsets = &offset;
|
||||
link_opts.uprobe_multi.cnt = 1;
|
||||
|
||||
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
|
||||
err = -errno; /* close() can clobber errno */
|
||||
|
||||
if (link_fd >= 0)
|
||||
close(link_fd);
|
||||
close(prog_fd);
|
||||
|
||||
return link_fd < 0 && err == -EBADF;
|
||||
}
|
||||
|
||||
static int probe_kern_bpf_cookie(void)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int ret, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
return probe_fd(ret);
|
||||
}
|
||||
|
||||
static int probe_kern_btf_enum64(void)
|
||||
{
|
||||
static const char strs[] = "\0enum64";
|
||||
__u32 types[] = {
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_syscall_wrapper(void);
|
||||
|
||||
enum kern_feature_result {
|
||||
FEAT_UNKNOWN = 0,
|
||||
FEAT_SUPPORTED = 1,
|
||||
FEAT_MISSING = 2,
|
||||
};
|
||||
|
||||
typedef int (*feature_probe_fn)(void);
|
||||
|
||||
static struct kern_feature_desc {
|
||||
const char *desc;
|
||||
feature_probe_fn probe;
|
||||
enum kern_feature_result res;
|
||||
} feature_probes[__FEAT_CNT] = {
|
||||
[FEAT_PROG_NAME] = {
|
||||
"BPF program name", probe_kern_prog_name,
|
||||
},
|
||||
[FEAT_GLOBAL_DATA] = {
|
||||
"global variables", probe_kern_global_data,
|
||||
},
|
||||
[FEAT_BTF] = {
|
||||
"minimal BTF", probe_kern_btf,
|
||||
},
|
||||
[FEAT_BTF_FUNC] = {
|
||||
"BTF functions", probe_kern_btf_func,
|
||||
},
|
||||
[FEAT_BTF_GLOBAL_FUNC] = {
|
||||
"BTF global function", probe_kern_btf_func_global,
|
||||
},
|
||||
[FEAT_BTF_DATASEC] = {
|
||||
"BTF data section and variable", probe_kern_btf_datasec,
|
||||
},
|
||||
[FEAT_ARRAY_MMAP] = {
|
||||
"ARRAY map mmap()", probe_kern_array_mmap,
|
||||
},
|
||||
[FEAT_EXP_ATTACH_TYPE] = {
|
||||
"BPF_PROG_LOAD expected_attach_type attribute",
|
||||
probe_kern_exp_attach_type,
|
||||
},
|
||||
[FEAT_PROBE_READ_KERN] = {
|
||||
"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
|
||||
},
|
||||
[FEAT_PROG_BIND_MAP] = {
|
||||
"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
|
||||
},
|
||||
[FEAT_MODULE_BTF] = {
|
||||
"module BTF support", probe_module_btf,
|
||||
},
|
||||
[FEAT_BTF_FLOAT] = {
|
||||
"BTF_KIND_FLOAT support", probe_kern_btf_float,
|
||||
},
|
||||
[FEAT_PERF_LINK] = {
|
||||
"BPF perf link support", probe_perf_link,
|
||||
},
|
||||
[FEAT_BTF_DECL_TAG] = {
|
||||
"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
|
||||
},
|
||||
[FEAT_BTF_TYPE_TAG] = {
|
||||
"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
|
||||
},
|
||||
[FEAT_MEMCG_ACCOUNT] = {
|
||||
"memcg-based memory accounting", probe_memcg_account,
|
||||
},
|
||||
[FEAT_BPF_COOKIE] = {
|
||||
"BPF cookie support", probe_kern_bpf_cookie,
|
||||
},
|
||||
[FEAT_BTF_ENUM64] = {
|
||||
"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
|
||||
},
|
||||
[FEAT_SYSCALL_WRAPPER] = {
|
||||
"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
|
||||
},
|
||||
[FEAT_UPROBE_MULTI_LINK] = {
|
||||
"BPF multi-uprobe link support", probe_uprobe_multi_link,
|
||||
},
|
||||
};
|
||||
|
||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
|
||||
{
|
||||
struct kern_feature_desc *feat = &feature_probes[feat_id];
|
||||
int ret;
|
||||
|
||||
if (obj && obj->gen_loader)
|
||||
/* To generate loader program assume the latest kernel
|
||||
* to avoid doing extra prog_load, map_create syscalls.
|
||||
*/
|
||||
return true;
|
||||
|
||||
if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
|
||||
ret = feat->probe();
|
||||
if (ret > 0) {
|
||||
WRITE_ONCE(feat->res, FEAT_SUPPORTED);
|
||||
} else if (ret == 0) {
|
||||
WRITE_ONCE(feat->res, FEAT_MISSING);
|
||||
} else {
|
||||
pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
|
||||
WRITE_ONCE(feat->res, FEAT_MISSING);
|
||||
}
|
||||
}
|
||||
if (obj->token_fd)
|
||||
return feat_supported(obj->feat_cache, feat_id);
|
||||
|
||||
return READ_ONCE(feat->res) == FEAT_SUPPORTED;
|
||||
return feat_supported(NULL, feat_id);
|
||||
}
|
||||
|
||||
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
|
||||
@ -5200,6 +4831,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
create_attr.map_flags = def->map_flags;
|
||||
create_attr.numa_node = map->numa_node;
|
||||
create_attr.map_extra = map->map_extra;
|
||||
create_attr.token_fd = obj->token_fd;
|
||||
|
||||
if (bpf_map__is_struct_ops(map))
|
||||
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
|
||||
@ -7035,6 +6667,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
|
||||
load_attr.attach_btf_id = prog->attach_btf_id;
|
||||
load_attr.kern_version = kern_version;
|
||||
load_attr.prog_ifindex = prog->prog_ifindex;
|
||||
load_attr.token_fd = obj->token_fd;
|
||||
|
||||
/* specify func_info/line_info only if kernel supports them */
|
||||
btf_fd = bpf_object__btf_fd(obj);
|
||||
@ -7496,10 +7129,10 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
|
||||
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
const char *obj_name, *kconfig, *btf_tmp_path;
|
||||
const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
|
||||
struct bpf_object *obj;
|
||||
char tmp_name[64];
|
||||
int err;
|
||||
int err, token_fd;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
__u32 log_level;
|
||||
@ -7533,6 +7166,28 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
|
||||
if (log_size && !log_buf)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
token_path = OPTS_GET(opts, bpf_token_path, NULL);
|
||||
token_fd = OPTS_GET(opts, bpf_token_fd, -1);
|
||||
/* non-empty token path can't be combined with invalid token FD */
|
||||
if (token_path && token_path[0] != '\0' && token_fd < 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
/* empty token path can't be combined with valid token FD */
|
||||
if (token_path && token_path[0] == '\0' && token_fd > 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
/* if user didn't specify bpf_token_path/bpf_token_fd explicitly,
|
||||
* check if LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as
|
||||
* bpf_token_path option
|
||||
*/
|
||||
if (token_fd == 0 && !token_path)
|
||||
token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
|
||||
/* empty token_path is equivalent to invalid token_fd */
|
||||
if (token_path && token_path[0] == '\0') {
|
||||
token_path = NULL;
|
||||
token_fd = -1;
|
||||
}
|
||||
if (token_path && strlen(token_path) >= PATH_MAX)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
@ -7541,6 +7196,19 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
|
||||
obj->log_size = log_size;
|
||||
obj->log_level = log_level;
|
||||
|
||||
obj->token_fd = token_fd <= 0 ? token_fd : dup_good_fd(token_fd);
|
||||
if (token_fd > 0 && obj->token_fd < 0) {
|
||||
err = -errno;
|
||||
goto out;
|
||||
}
|
||||
if (token_path) {
|
||||
obj->token_path = strdup(token_path);
|
||||
if (!obj->token_path) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
|
||||
if (btf_tmp_path) {
|
||||
if (strlen(btf_tmp_path) >= PATH_MAX) {
|
||||
@ -8051,7 +7719,8 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
|
||||
if (obj->gen_loader)
|
||||
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
|
||||
|
||||
err = bpf_object__probe_loading(obj);
|
||||
err = bpf_object_prepare_token(obj);
|
||||
err = err ? : bpf_object__probe_loading(obj);
|
||||
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
|
||||
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
|
||||
err = err ? : bpf_object__sanitize_and_load_btf(obj);
|
||||
@ -8588,6 +8257,11 @@ void bpf_object__close(struct bpf_object *obj)
|
||||
}
|
||||
zfree(&obj->programs);
|
||||
|
||||
zfree(&obj->feat_cache);
|
||||
zfree(&obj->token_path);
|
||||
if (obj->token_fd > 0)
|
||||
close(obj->token_fd);
|
||||
|
||||
free(obj);
|
||||
}
|
||||
|
||||
@ -10601,7 +10275,7 @@ static const char *arch_specific_syscall_pfx(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int probe_kern_syscall_wrapper(void)
|
||||
int probe_kern_syscall_wrapper(int token_fd)
|
||||
{
|
||||
char syscall_name[64];
|
||||
const char *ksys_pfx;
|
||||
|
@ -177,10 +177,45 @@ struct bpf_object_open_opts {
|
||||
* logs through its print callback.
|
||||
*/
|
||||
__u32 kernel_log_level;
|
||||
/* FD of a BPF token instantiated by user through bpf_token_create()
|
||||
* API. BPF object will keep dup()'ed FD internally, so passed token
|
||||
* FD can be closed after BPF object/skeleton open step.
|
||||
*
|
||||
* Setting bpf_token_fd to negative value disables libbpf's automatic
|
||||
* attempt to create BPF token from default BPF FS mount point
|
||||
* (/sys/fs/bpf), in case this default behavior is undesirable.
|
||||
*
|
||||
* If bpf_token_path and bpf_token_fd are not specified, libbpf will
|
||||
* consult LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will
|
||||
* be taken as a value of bpf_token_path option and will force libbpf
|
||||
* to either create BPF token from provided custom BPF FS path, or
|
||||
* will disable implicit BPF token creation, if envvar value is an
|
||||
* empty string.
|
||||
*
|
||||
* bpf_token_path and bpf_token_fd are mutually exclusive and only one
|
||||
* of those options should be set. Either of them overrides
|
||||
* LIBBPF_BPF_TOKEN_PATH envvar.
|
||||
*/
|
||||
int bpf_token_fd;
|
||||
/* Path to BPF FS mount point to derive BPF token from.
|
||||
*
|
||||
* Created BPF token will be used for all bpf() syscall operations
|
||||
* that accept BPF token (e.g., map creation, BTF and program loads,
|
||||
* etc) automatically within instantiated BPF object.
|
||||
*
|
||||
* Setting bpf_token_path option to empty string disables libbpf's
|
||||
* automatic attempt to create BPF token from default BPF FS mount
|
||||
* point (/sys/fs/bpf), in case this default behavior is undesirable.
|
||||
*
|
||||
* bpf_token_path and bpf_token_fd are mutually exclusive and only one
|
||||
* of those options should be set. Either of them overrides
|
||||
* LIBBPF_BPF_TOKEN_PATH envvar.
|
||||
*/
|
||||
const char *bpf_token_path;
|
||||
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_object_open_opts__last_field kernel_log_level
|
||||
#define bpf_object_open_opts__last_field bpf_token_path
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__open()** creates a bpf_object by opening
|
||||
|
@ -401,6 +401,7 @@ LIBBPF_1.3.0 {
|
||||
bpf_program__attach_netkit;
|
||||
bpf_program__attach_tcx;
|
||||
bpf_program__attach_uprobe_multi;
|
||||
bpf_token_create;
|
||||
ring__avail_data_size;
|
||||
ring__consume;
|
||||
ring__consumer_pos;
|
||||
|
@ -360,15 +360,32 @@ enum kern_feature_id {
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
int probe_memcg_account(void);
|
||||
enum kern_feature_result {
|
||||
FEAT_UNKNOWN = 0,
|
||||
FEAT_SUPPORTED = 1,
|
||||
FEAT_MISSING = 2,
|
||||
};
|
||||
|
||||
struct kern_feature_cache {
|
||||
enum kern_feature_result res[__FEAT_CNT];
|
||||
int token_fd;
|
||||
};
|
||||
|
||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
|
||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
|
||||
|
||||
int probe_kern_syscall_wrapper(int token_fd);
|
||||
int probe_memcg_account(int token_fd);
|
||||
int bump_rlimit_memlock(void);
|
||||
|
||||
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
|
||||
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
|
||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
const char *str_sec, size_t str_len);
|
||||
int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
|
||||
const char *str_sec, size_t str_len,
|
||||
int token_fd);
|
||||
int btf_load_into_kernel(struct btf *btf,
|
||||
char *log_buf, size_t log_sz, __u32 log_level,
|
||||
int token_fd);
|
||||
|
||||
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
|
||||
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
|
||||
@ -532,6 +549,17 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
|
||||
* Original FD is not closed or altered in any other way.
|
||||
* Preserves original FD value, if it's invalid (negative).
|
||||
*/
|
||||
static inline int dup_good_fd(int fd)
|
||||
{
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
return fcntl(fd, F_DUPFD_CLOEXEC, 3);
|
||||
}
|
||||
|
||||
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
|
||||
* Takes ownership of the fd passed in, and closes it if calling
|
||||
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
|
||||
@ -543,7 +571,7 @@ static inline int ensure_good_fd(int fd)
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
if (fd < 3) {
|
||||
fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
|
||||
fd = dup_good_fd(fd);
|
||||
saved_errno = errno;
|
||||
close(old_fd);
|
||||
errno = saved_errno;
|
||||
|
@ -219,7 +219,8 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
|
||||
}
|
||||
|
||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
const char *str_sec, size_t str_len)
|
||||
const char *str_sec, size_t str_len,
|
||||
int token_fd)
|
||||
{
|
||||
struct btf_header hdr = {
|
||||
.magic = BTF_MAGIC,
|
||||
@ -229,6 +230,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
.str_off = types_len,
|
||||
.str_len = str_len,
|
||||
};
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts, .token_fd = token_fd);
|
||||
int btf_fd, btf_len;
|
||||
__u8 *raw_btf;
|
||||
|
||||
@ -241,7 +243,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
|
||||
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
|
||||
|
||||
btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
|
||||
btf_fd = bpf_btf_load(raw_btf, btf_len, &opts);
|
||||
|
||||
free(raw_btf);
|
||||
return btf_fd;
|
||||
@ -271,7 +273,7 @@ static int load_local_storage_btf(void)
|
||||
};
|
||||
|
||||
return libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs));
|
||||
strs, sizeof(strs), 0);
|
||||
}
|
||||
|
||||
static int probe_map_create(enum bpf_map_type map_type)
|
||||
|
@ -719,13 +719,25 @@ static int linker_sanity_check_elf(struct src_obj *obj)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign))
|
||||
if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign)) {
|
||||
pr_warn("ELF section #%zu alignment %llu is non pow-of-2 alignment in %s\n",
|
||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
|
||||
obj->filename);
|
||||
return -EINVAL;
|
||||
if (sec->shdr->sh_addralign != sec->data->d_align)
|
||||
}
|
||||
if (sec->shdr->sh_addralign != sec->data->d_align) {
|
||||
pr_warn("ELF section #%zu has inconsistent alignment addr=%llu != d=%llu in %s\n",
|
||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
|
||||
(long long unsigned)sec->data->d_align, obj->filename);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sec->shdr->sh_size != sec->data->d_size)
|
||||
if (sec->shdr->sh_size != sec->data->d_size) {
|
||||
pr_warn("ELF section #%zu has inconsistent section size sh=%llu != d=%llu in %s\n",
|
||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_size,
|
||||
(long long unsigned)sec->data->d_size, obj->filename);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (sec->shdr->sh_type) {
|
||||
case SHT_SYMTAB:
|
||||
@ -737,8 +749,12 @@ static int linker_sanity_check_elf(struct src_obj *obj)
|
||||
break;
|
||||
case SHT_PROGBITS:
|
||||
if (sec->shdr->sh_flags & SHF_EXECINSTR) {
|
||||
if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0)
|
||||
if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0) {
|
||||
pr_warn("ELF section #%zu has unexpected size alignment %llu in %s\n",
|
||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_size,
|
||||
obj->filename);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SHT_NOBITS:
|
||||
|
@ -2,5 +2,8 @@
|
||||
#ifndef __LIBBPF_STR_ERROR_H
|
||||
#define __LIBBPF_STR_ERROR_H
|
||||
|
||||
#define STRERR_BUFSIZE 128
|
||||
|
||||
char *libbpf_strerror_r(int err, char *dst, int len);
|
||||
|
||||
#endif /* __LIBBPF_STR_ERROR_H */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user