mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
f3694e0012
This work adds BPF_CALL_<n>() macros and converts all the eBPF helper functions to use them, in a similar fashion like we do with SYSCALL_DEFINE<n>() macros that are used today. Motivation for this is to hide all the register handling and all necessary casts from the user, so that it is done automatically in the background when adding a BPF_CALL_<n>() call. This makes current helpers easier to review, eases to write future helpers, avoids getting the casting mess wrong, and allows for extending all helpers at once (f.e. build time checks, etc). It also helps detecting more easily in code reviews that unused registers are not instrumented in the code by accident, breaking compatibility with existing programs. BPF_CALL_<n>() internals are quite similar to SYSCALL_DEFINE<n>() ones with some fundamental differences, for example, for generating the actual helper function that carries all u64 regs, we need to fill unused regs, so that we always end up with 5 u64 regs as an argument. I reviewed several 0-5 generated BPF_CALL_<n>() variants of the .i results and they look all as expected. No sparse issue spotted. We let this also sit for a few days with Fengguang's kbuild test robot, and there were no issues seen. On s390, it barked on the "uses dynamic stack allocation" notice, which is an old one from bpf_perf_event_output{,_tp}() reappearing here due to the conversion to the call wrapper, just telling that the perf raw record/frag sits on stack (gcc with s390's -mwarn-dynamicstack), but that's all. Did various runtime tests and they were fine as well. All eBPF helpers are now converted to use these macros, getting rid of a good chunk of all the raw castings. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
738 lines
20 KiB
C
738 lines
20 KiB
C
/*
|
|
* Linux Socket Filter Data Structures
|
|
*/
|
|
#ifndef __LINUX_FILTER_H__
|
|
#define __LINUX_FILTER_H__
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/compat.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/capability.h>
|
|
|
|
#include <net/sch_generic.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <uapi/linux/filter.h>
|
|
#include <uapi/linux/bpf.h>
|
|
|
|
struct sk_buff;
|
|
struct sock;
|
|
struct seccomp_data;
|
|
struct bpf_prog_aux;
|
|
|
|
/* ArgX, context and stack frame pointer register positions. Note,
|
|
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
|
|
* calls in BPF_CALL instruction.
|
|
*/
|
|
#define BPF_REG_ARG1 BPF_REG_1
|
|
#define BPF_REG_ARG2 BPF_REG_2
|
|
#define BPF_REG_ARG3 BPF_REG_3
|
|
#define BPF_REG_ARG4 BPF_REG_4
|
|
#define BPF_REG_ARG5 BPF_REG_5
|
|
#define BPF_REG_CTX BPF_REG_6
|
|
#define BPF_REG_FP BPF_REG_10
|
|
|
|
/* Additional register mappings for converted user programs. */
|
|
#define BPF_REG_A BPF_REG_0
|
|
#define BPF_REG_X BPF_REG_7
|
|
#define BPF_REG_TMP BPF_REG_8
|
|
|
|
/* Kernel hidden auxiliary/helper register for hardening step.
|
|
* Only used by eBPF JITs. It's nothing more than a temporary
|
|
* register that JITs use internally, only that here it's part
|
|
* of eBPF instructions that have been rewritten for blinding
|
|
* constants. See JIT pre-step in bpf_jit_blind_constants().
|
|
*/
|
|
#define BPF_REG_AX MAX_BPF_REG
|
|
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
|
|
|
|
/* BPF program can access up to 512 bytes of stack space. */
|
|
#define MAX_BPF_STACK 512
|
|
|
|
/* Helper macros for filter block array initializers. */
|
|
|
|
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
|
|
|
|
#define BPF_ALU64_REG(OP, DST, SRC) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = 0 })
|
|
|
|
#define BPF_ALU32_REG(OP, DST, SRC) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = 0 })
|
|
|
|
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
|
|
|
|
#define BPF_ALU64_IMM(OP, DST, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
#define BPF_ALU32_IMM(OP, DST, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
|
|
|
|
#define BPF_ENDIAN(TYPE, DST, LEN) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = LEN })
|
|
|
|
/* Short form of mov, dst_reg = src_reg */
|
|
|
|
#define BPF_MOV64_REG(DST, SRC) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = 0 })
|
|
|
|
#define BPF_MOV32_REG(DST, SRC) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU | BPF_MOV | BPF_X, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = 0 })
|
|
|
|
/* Short form of mov, dst_reg = imm32 */
|
|
|
|
#define BPF_MOV64_IMM(DST, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
#define BPF_MOV32_IMM(DST, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU | BPF_MOV | BPF_K, \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
|
|
#define BPF_LD_IMM64(DST, IMM) \
|
|
BPF_LD_IMM64_RAW(DST, 0, IMM)
|
|
|
|
#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_LD | BPF_DW | BPF_IMM, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = (__u32) (IMM) }), \
|
|
((struct bpf_insn) { \
|
|
.code = 0, /* zero is reserved opcode */ \
|
|
.dst_reg = 0, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = ((__u64) (IMM)) >> 32 })
|
|
|
|
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
|
|
#define BPF_LD_MAP_FD(DST, MAP_FD) \
|
|
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
|
|
|
|
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
|
|
|
|
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
|
|
|
|
#define BPF_LD_ABS(SIZE, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
|
|
.dst_reg = 0, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
|
|
|
|
#define BPF_LD_IND(SIZE, SRC, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
|
|
.dst_reg = 0, \
|
|
.src_reg = SRC, \
|
|
.off = 0, \
|
|
.imm = IMM })
|
|
|
|
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
|
|
|
|
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = OFF, \
|
|
.imm = 0 })
|
|
|
|
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
|
|
|
|
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = OFF, \
|
|
.imm = 0 })
|
|
|
|
/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
|
|
|
|
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = OFF, \
|
|
.imm = 0 })
|
|
|
|
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
|
|
|
|
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = OFF, \
|
|
.imm = IMM })
|
|
|
|
/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
|
|
|
|
#define BPF_JMP_REG(OP, DST, SRC, OFF) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_JMP | BPF_OP(OP) | BPF_X, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = OFF, \
|
|
.imm = 0 })
|
|
|
|
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
|
|
|
|
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
|
|
.dst_reg = DST, \
|
|
.src_reg = 0, \
|
|
.off = OFF, \
|
|
.imm = IMM })
|
|
|
|
/* Function call */
|
|
|
|
#define BPF_EMIT_CALL(FUNC) \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_JMP | BPF_CALL, \
|
|
.dst_reg = 0, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = ((FUNC) - __bpf_call_base) })
|
|
|
|
/* Raw code statement block */
|
|
|
|
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
|
|
((struct bpf_insn) { \
|
|
.code = CODE, \
|
|
.dst_reg = DST, \
|
|
.src_reg = SRC, \
|
|
.off = OFF, \
|
|
.imm = IMM })
|
|
|
|
/* Program exit */
|
|
|
|
#define BPF_EXIT_INSN() \
|
|
((struct bpf_insn) { \
|
|
.code = BPF_JMP | BPF_EXIT, \
|
|
.dst_reg = 0, \
|
|
.src_reg = 0, \
|
|
.off = 0, \
|
|
.imm = 0 })
|
|
|
|
/* Internal classic blocks for direct assignment */
|
|
|
|
#define __BPF_STMT(CODE, K) \
|
|
((struct sock_filter) BPF_STMT(CODE, K))
|
|
|
|
#define __BPF_JUMP(CODE, K, JT, JF) \
|
|
((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
|
|
|
|
#define bytes_to_bpf_size(bytes) \
|
|
({ \
|
|
int bpf_size = -EINVAL; \
|
|
\
|
|
if (bytes == sizeof(u8)) \
|
|
bpf_size = BPF_B; \
|
|
else if (bytes == sizeof(u16)) \
|
|
bpf_size = BPF_H; \
|
|
else if (bytes == sizeof(u32)) \
|
|
bpf_size = BPF_W; \
|
|
else if (bytes == sizeof(u64)) \
|
|
bpf_size = BPF_DW; \
|
|
\
|
|
bpf_size; \
|
|
})
|
|
|
|
#define BPF_SIZEOF(type) \
|
|
({ \
|
|
const int __size = bytes_to_bpf_size(sizeof(type)); \
|
|
BUILD_BUG_ON(__size < 0); \
|
|
__size; \
|
|
})
|
|
|
|
#define BPF_FIELD_SIZEOF(type, field) \
|
|
({ \
|
|
const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
|
|
BUILD_BUG_ON(__size < 0); \
|
|
__size; \
|
|
})
|
|
|
|
#define __BPF_MAP_0(m, v, ...) v
|
|
#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
|
|
#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
|
|
#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
|
|
#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
|
|
#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
|
|
|
|
#define __BPF_REG_0(...) __BPF_PAD(5)
|
|
#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
|
|
#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
|
|
#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
|
|
#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
|
|
#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
|
|
|
|
#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
|
|
#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
|
|
|
|
#define __BPF_CAST(t, a) \
|
|
(__force t) \
|
|
(__force \
|
|
typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
|
|
(unsigned long)0, (t)0))) a
|
|
#define __BPF_V void
|
|
#define __BPF_N
|
|
|
|
#define __BPF_DECL_ARGS(t, a) t a
|
|
#define __BPF_DECL_REGS(t, a) u64 a
|
|
|
|
#define __BPF_PAD(n) \
|
|
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
|
|
u64, __ur_3, u64, __ur_4, u64, __ur_5)
|
|
|
|
#define BPF_CALL_x(x, name, ...) \
|
|
static __always_inline \
|
|
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
|
|
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
|
|
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
|
|
{ \
|
|
return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
|
|
} \
|
|
static __always_inline \
|
|
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
|
|
|
|
#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
|
|
#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
|
|
#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
|
|
#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
|
|
#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
|
|
#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
/* A struct sock_filter is architecture independent. */
|
|
struct compat_sock_fprog {
|
|
u16 len;
|
|
compat_uptr_t filter; /* struct sock_filter * */
|
|
};
|
|
#endif
|
|
|
|
struct sock_fprog_kern {
|
|
u16 len;
|
|
struct sock_filter *filter;
|
|
};
|
|
|
|
struct bpf_binary_header {
|
|
unsigned int pages;
|
|
u8 image[];
|
|
};
|
|
|
|
struct bpf_prog {
|
|
u16 pages; /* Number of allocated pages */
|
|
kmemcheck_bitfield_begin(meta);
|
|
u16 jited:1, /* Is our filter JIT'ed? */
|
|
gpl_compatible:1, /* Is filter GPL compatible? */
|
|
cb_access:1, /* Is control block accessed? */
|
|
dst_needed:1; /* Do we need dst entry? */
|
|
kmemcheck_bitfield_end(meta);
|
|
u32 len; /* Number of filter blocks */
|
|
enum bpf_prog_type type; /* Type of BPF program */
|
|
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
|
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
|
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
|
const struct bpf_insn *filter);
|
|
/* Instructions for interpreter */
|
|
union {
|
|
struct sock_filter insns[0];
|
|
struct bpf_insn insnsi[0];
|
|
};
|
|
};
|
|
|
|
struct sk_filter {
|
|
atomic_t refcnt;
|
|
struct rcu_head rcu;
|
|
struct bpf_prog *prog;
|
|
};
|
|
|
|
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
|
|
|
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
|
|
|
|
struct bpf_skb_data_end {
|
|
struct qdisc_skb_cb qdisc_cb;
|
|
void *data_end;
|
|
};
|
|
|
|
struct xdp_buff {
|
|
void *data;
|
|
void *data_end;
|
|
};
|
|
|
|
/* compute the linear packet data range [data, data_end) which
|
|
* will be accessed by cls_bpf and act_bpf programs
|
|
*/
|
|
static inline void bpf_compute_data_end(struct sk_buff *skb)
|
|
{
|
|
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
|
|
|
|
BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
|
|
cb->data_end = skb->data + skb_headlen(skb);
|
|
}
|
|
|
|
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
|
{
|
|
/* eBPF programs may read/write skb->cb[] area to transfer meta
|
|
* data between tail calls. Since this also needs to work with
|
|
* tc, that scratch memory is mapped to qdisc_skb_cb's data area.
|
|
*
|
|
* In some socket filter cases, the cb unfortunately needs to be
|
|
* saved/restored so that protocol specific skb->cb[] data won't
|
|
* be lost. In any case, due to unpriviledged eBPF programs
|
|
* attached to sockets, we need to clear the bpf_skb_cb() area
|
|
* to not leak previous contents to user space.
|
|
*/
|
|
BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
|
|
BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
|
|
FIELD_SIZEOF(struct qdisc_skb_cb, data));
|
|
|
|
return qdisc_skb_cb(skb)->data;
|
|
}
|
|
|
|
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
|
struct sk_buff *skb)
|
|
{
|
|
u8 *cb_data = bpf_skb_cb(skb);
|
|
u8 cb_saved[BPF_SKB_CB_LEN];
|
|
u32 res;
|
|
|
|
if (unlikely(prog->cb_access)) {
|
|
memcpy(cb_saved, cb_data, sizeof(cb_saved));
|
|
memset(cb_data, 0, sizeof(cb_saved));
|
|
}
|
|
|
|
res = BPF_PROG_RUN(prog, skb);
|
|
|
|
if (unlikely(prog->cb_access))
|
|
memcpy(cb_data, cb_saved, sizeof(cb_saved));
|
|
|
|
return res;
|
|
}
|
|
|
|
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
|
struct sk_buff *skb)
|
|
{
|
|
u8 *cb_data = bpf_skb_cb(skb);
|
|
|
|
if (unlikely(prog->cb_access))
|
|
memset(cb_data, 0, BPF_SKB_CB_LEN);
|
|
|
|
return BPF_PROG_RUN(prog, skb);
|
|
}
|
|
|
|
static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
|
struct xdp_buff *xdp)
|
|
{
|
|
u32 ret;
|
|
|
|
rcu_read_lock();
|
|
ret = BPF_PROG_RUN(prog, (void *)xdp);
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline unsigned int bpf_prog_size(unsigned int proglen)
|
|
{
|
|
return max(sizeof(struct bpf_prog),
|
|
offsetof(struct bpf_prog, insns[proglen]));
|
|
}
|
|
|
|
static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
|
|
{
|
|
/* When classic BPF programs have been loaded and the arch
|
|
* does not have a classic BPF JIT (anymore), they have been
|
|
* converted via bpf_migrate_filter() to eBPF and thus always
|
|
* have an unspec program type.
|
|
*/
|
|
return prog->type == BPF_PROG_TYPE_UNSPEC;
|
|
}
|
|
|
|
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
|
|
|
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
|
|
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
|
{
|
|
set_memory_ro((unsigned long)fp, fp->pages);
|
|
}
|
|
|
|
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
|
{
|
|
set_memory_rw((unsigned long)fp, fp->pages);
|
|
}
|
|
#else
|
|
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
|
{
|
|
}
|
|
|
|
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
|
{
|
|
}
|
|
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
|
|
|
|
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
|
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
return sk_filter_trim_cap(sk, skb, 1);
|
|
}
|
|
|
|
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
|
|
void bpf_prog_free(struct bpf_prog *fp);
|
|
|
|
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
|
|
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
|
gfp_t gfp_extra_flags);
|
|
void __bpf_prog_free(struct bpf_prog *fp);
|
|
|
|
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
|
|
{
|
|
bpf_prog_unlock_ro(fp);
|
|
__bpf_prog_free(fp);
|
|
}
|
|
|
|
typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
|
|
unsigned int flen);
|
|
|
|
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
|
|
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
|
|
bpf_aux_classic_check_t trans, bool save_orig);
|
|
void bpf_prog_destroy(struct bpf_prog *fp);
|
|
|
|
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
|
int sk_attach_bpf(u32 ufd, struct sock *sk);
|
|
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
|
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
|
|
int sk_detach_filter(struct sock *sk);
|
|
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
|
|
unsigned int len);
|
|
|
|
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
|
|
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
|
|
|
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
|
|
|
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
|
bool bpf_helper_changes_skb_data(void *func);
|
|
|
|
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
|
const struct bpf_insn *patch, u32 len);
|
|
void bpf_warn_invalid_xdp_action(u32 act);
|
|
|
|
#ifdef CONFIG_BPF_JIT
|
|
extern int bpf_jit_enable;
|
|
extern int bpf_jit_harden;
|
|
|
|
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
|
|
|
struct bpf_binary_header *
|
|
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
|
unsigned int alignment,
|
|
bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
|
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
|
|
|
|
void bpf_jit_compile(struct bpf_prog *fp);
|
|
void bpf_jit_free(struct bpf_prog *fp);
|
|
|
|
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
|
|
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
|
|
|
|
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
|
u32 pass, void *image)
|
|
{
|
|
pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
|
|
proglen, pass, image, current->comm, task_pid_nr(current));
|
|
|
|
if (image)
|
|
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
|
|
16, 1, image, proglen, false);
|
|
}
|
|
|
|
static inline bool bpf_jit_is_ebpf(void)
|
|
{
|
|
# ifdef CONFIG_HAVE_EBPF_JIT
|
|
return true;
|
|
# else
|
|
return false;
|
|
# endif
|
|
}
|
|
|
|
static inline bool bpf_jit_blinding_enabled(void)
|
|
{
|
|
/* These are the prerequisites, should someone ever have the
|
|
* idea to call blinding outside of them, we make sure to
|
|
* bail out.
|
|
*/
|
|
if (!bpf_jit_is_ebpf())
|
|
return false;
|
|
if (!bpf_jit_enable)
|
|
return false;
|
|
if (!bpf_jit_harden)
|
|
return false;
|
|
if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
#else
|
|
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
|
{
|
|
}
|
|
|
|
static inline void bpf_jit_free(struct bpf_prog *fp)
|
|
{
|
|
bpf_prog_unlock_free(fp);
|
|
}
|
|
#endif /* CONFIG_BPF_JIT */
|
|
|
|
#define BPF_ANC BIT(15)
|
|
|
|
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
|
|
{
|
|
switch (first->code) {
|
|
case BPF_RET | BPF_K:
|
|
case BPF_LD | BPF_W | BPF_LEN:
|
|
return false;
|
|
|
|
case BPF_LD | BPF_W | BPF_ABS:
|
|
case BPF_LD | BPF_H | BPF_ABS:
|
|
case BPF_LD | BPF_B | BPF_ABS:
|
|
if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
|
|
return true;
|
|
return false;
|
|
|
|
default:
|
|
return true;
|
|
}
|
|
}
|
|
|
|
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
|
|
{
|
|
BUG_ON(ftest->code & BPF_ANC);
|
|
|
|
switch (ftest->code) {
|
|
case BPF_LD | BPF_W | BPF_ABS:
|
|
case BPF_LD | BPF_H | BPF_ABS:
|
|
case BPF_LD | BPF_B | BPF_ABS:
|
|
#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
|
|
return BPF_ANC | SKF_AD_##CODE
|
|
switch (ftest->k) {
|
|
BPF_ANCILLARY(PROTOCOL);
|
|
BPF_ANCILLARY(PKTTYPE);
|
|
BPF_ANCILLARY(IFINDEX);
|
|
BPF_ANCILLARY(NLATTR);
|
|
BPF_ANCILLARY(NLATTR_NEST);
|
|
BPF_ANCILLARY(MARK);
|
|
BPF_ANCILLARY(QUEUE);
|
|
BPF_ANCILLARY(HATYPE);
|
|
BPF_ANCILLARY(RXHASH);
|
|
BPF_ANCILLARY(CPU);
|
|
BPF_ANCILLARY(ALU_XOR_X);
|
|
BPF_ANCILLARY(VLAN_TAG);
|
|
BPF_ANCILLARY(VLAN_TAG_PRESENT);
|
|
BPF_ANCILLARY(PAY_OFFSET);
|
|
BPF_ANCILLARY(RANDOM);
|
|
BPF_ANCILLARY(VLAN_TPID);
|
|
}
|
|
/* Fallthrough. */
|
|
default:
|
|
return ftest->code;
|
|
}
|
|
}
|
|
|
|
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
|
|
int k, unsigned int size);
|
|
|
|
static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
|
|
unsigned int size, void *buffer)
|
|
{
|
|
if (k >= 0)
|
|
return skb_header_pointer(skb, k, size, buffer);
|
|
|
|
return bpf_internal_load_pointer_neg_helper(skb, k, size);
|
|
}
|
|
|
|
static inline int bpf_tell_extensions(void)
|
|
{
|
|
return SKF_AD_MAX;
|
|
}
|
|
|
|
#endif /* __LINUX_FILTER_H__ */
|