mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
Merge branch 'Introduce composable bpf types'
Hao Luo says: ==================== This patch set consists of two changes: - a cleanup of arg_type, ret_type and reg_type which try to make those types composable. (patch 1/9 - patch 6/9) - a bug fix that prevents bpf programs from writing kernel memory. (patch 7/9 - patch 9/9) The purpose of the cleanup is to find a scalable way to express type nullness and read-onliness. This patchset introduces two flags that can be applied on all three types: PTR_MAYBE_NULL and MEM_RDONLY. Previous types such as ARG_XXX_OR_NULL can now be written as ARG_XXX | PTR_MAYBE_NULL Similarly, PTR_TO_RDONLY_BUF is now "PTR_TO_BUF | MEM_RDONLY". Flags can be composed, as ARGs can be both MEM_RDONLY and MAYBE_NULL. ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY Based on this new composable types, patch 7/9 applies MEM_RDONLY on PTR_TO_MEM, in order to tag the returned memory from per_cpu_ptr as read-only. Therefore fixing a previous bug that one can leverage per_cpu_ptr to modify kernel memory within BPF programs. Patch 8/9 generalizes the use of MEM_RDONLY further by tagging a set of helper arguments ARG_PTR_TO_MEM with MEM_RDONLY. Some helper functions may override their arguments, such as bpf_d_path, bpf_snprintf. In this patch, we narrow the ARG_PTR_TO_MEM to be compatible with only a subset of memory types. This prevents these helpers from writing read-only memories. For the helpers that do not write its arguments, we add tag MEM_RDONLY to allow taking a RDONLY memory as argument. Changes since v1: - use %u to print base_type(type) instead of %lu. (Andrii, patch 3/9) - improve reg_type_str() by appending '_or_null' and prepending 'rdonly_'. use preallocated buffer in 'bpf_env'. - unified handling of the previous XXX_OR_NULL in adjust_ptr_min_max_vals (Andrii, patch 4/9) - move PTR_TO_MAP_KEY up to PTR_TO_MAP_VALUE so that we don't have to change to drivers that assume the numeric values of bpf_reg. (patch 4/9) - reintroduce the typo from previous commits in fixes tags (Andrii, patch 7/9) - extensive comments on the reason behind folding flags in check_reg_type (Andrii, patch 8/9) Changes since RFC v2: - renamed BPF_BASE_TYPE to a more succinct name base_type and move its definition to bpf_verifier.h. Same for BPF_TYPE_FLAG. (Alexei) - made checking MEM_RDONLY in check_reg_type() universal (Alexei) - ran through majority of test_progs and fixed bugs in RFC v2: - fixed incorrect BPF_BASE_TYPE_MASK. The high bit of GENMASK should be BITS - 1, rather than BITS. patch 1/9. - fixed incorrect conditions when checking ARG_PTR_TO_MAP_VALUE in check_func_arg(). See patch 2/9. - fixed a bug where PTR_TO_BTF_ID may be combined with MEM_RDONLY, causing the check in check_mem_access() to fall through to the 'else' branch. See check_helper_call() in patch 7/9. - fixed build failure on netronome driver. Entries in bpf_reg_type have been ordered. patch 4/9. - fixed build warnings of using '%d' to print base_type. patch 4/9 - unify arg_type_may_be_null() and reg_type_may_be_null() into a single type_may_be_null(). Previous versions: v1: https://lwn.net/Articles/877938/ RFC v2: https://lwn.net/Articles/877171/ RFC v1: https://lore.kernel.org/bpf/20211109003052.3499225-1-haoluo@google.com/T/ https://lore.kernel.org/bpf/20211109021624.1140446-8-haoluo@google.com/T/ ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
7f16d2aa40
@ -297,6 +297,34 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
|
||||
|
||||
extern const struct bpf_map_ops bpf_map_offload_ops;
|
||||
|
||||
/* bpf_type_flag contains a set of flags that are applicable to the values of
|
||||
* arg_type, ret_type and reg_type. For example, a pointer value may be null,
|
||||
* or a memory is read-only. We classify types into two categories: base types
|
||||
* and extended types. Extended types are base types combined with a type flag.
|
||||
*
|
||||
* Currently there are no more than 32 base types in arg_type, ret_type and
|
||||
* reg_types.
|
||||
*/
|
||||
#define BPF_BASE_TYPE_BITS 8
|
||||
|
||||
enum bpf_type_flag {
|
||||
/* PTR may be NULL. */
|
||||
PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* MEM is read-only. When applied on bpf_arg, it indicates the arg is
|
||||
* compatible with both mutable and immutable memory.
|
||||
*/
|
||||
MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_RDONLY,
|
||||
};
|
||||
|
||||
/* Max number of base types. */
|
||||
#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
|
||||
|
||||
/* Max number of all types. */
|
||||
#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
|
||||
|
||||
/* function argument constraints */
|
||||
enum bpf_arg_type {
|
||||
ARG_DONTCARE = 0, /* unused argument in helper function */
|
||||
@ -308,13 +336,11 @@ enum bpf_arg_type {
|
||||
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
|
||||
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
|
||||
ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
|
||||
ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
|
||||
|
||||
/* the following constraints used to prototype bpf_memcmp() and other
|
||||
* functions that access data on eBPF program stack
|
||||
*/
|
||||
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
|
||||
ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
|
||||
ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
|
||||
* helper function must fill all bytes or clear
|
||||
* them in error case.
|
||||
@ -324,42 +350,65 @@ enum bpf_arg_type {
|
||||
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
|
||||
|
||||
ARG_PTR_TO_CTX, /* pointer to context */
|
||||
ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
|
||||
ARG_ANYTHING, /* any (initialized) argument is ok */
|
||||
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
|
||||
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
|
||||
ARG_PTR_TO_INT, /* pointer to int */
|
||||
ARG_PTR_TO_LONG, /* pointer to long */
|
||||
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
|
||||
ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */
|
||||
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
|
||||
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
|
||||
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
|
||||
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
|
||||
ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
|
||||
ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
|
||||
ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
|
||||
ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
|
||||
ARG_PTR_TO_STACK, /* pointer to stack */
|
||||
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
|
||||
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
|
||||
__BPF_ARG_TYPE_MAX,
|
||||
|
||||
/* Extended arg_types. */
|
||||
ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
|
||||
ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
|
||||
ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
|
||||
ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
|
||||
ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
|
||||
ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
|
||||
|
||||
/* This must be the last entry. Its purpose is to ensure the enum is
|
||||
* wide enough to hold the higher bits reserved for bpf_type_flag.
|
||||
*/
|
||||
__BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
|
||||
};
|
||||
static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
|
||||
|
||||
/* type of values returned from helper functions */
|
||||
enum bpf_return_type {
|
||||
RET_INTEGER, /* function returns integer */
|
||||
RET_VOID, /* function doesn't return anything */
|
||||
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
|
||||
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
|
||||
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
|
||||
RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
|
||||
RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
|
||||
RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
|
||||
RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
|
||||
RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
|
||||
RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
|
||||
RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
|
||||
RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
|
||||
RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
|
||||
RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
|
||||
RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
|
||||
__BPF_RET_TYPE_MAX,
|
||||
|
||||
/* Extended ret_types. */
|
||||
RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
|
||||
RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
|
||||
RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
|
||||
RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
|
||||
RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
|
||||
RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
|
||||
|
||||
/* This must be the last entry. Its purpose is to ensure the enum is
|
||||
* wide enough to hold the higher bits reserved for bpf_type_flag.
|
||||
*/
|
||||
__BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
|
||||
};
|
||||
static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
|
||||
|
||||
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
|
||||
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
|
||||
@ -421,18 +470,15 @@ enum bpf_reg_type {
|
||||
PTR_TO_CTX, /* reg points to bpf_context */
|
||||
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
|
||||
PTR_TO_MAP_VALUE, /* reg points to map element value */
|
||||
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
|
||||
PTR_TO_MAP_KEY, /* reg points to a map element key */
|
||||
PTR_TO_STACK, /* reg == frame_pointer + offset */
|
||||
PTR_TO_PACKET_META, /* skb->data - meta_len */
|
||||
PTR_TO_PACKET, /* reg points to skb->data */
|
||||
PTR_TO_PACKET_END, /* skb->data + headlen */
|
||||
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
|
||||
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
|
||||
PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
|
||||
PTR_TO_SOCK_COMMON, /* reg points to sock_common */
|
||||
PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
|
||||
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
|
||||
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
|
||||
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
|
||||
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
|
||||
/* PTR_TO_BTF_ID points to a kernel struct that does not need
|
||||
@ -450,18 +496,25 @@ enum bpf_reg_type {
|
||||
* been checked for null. Used primarily to inform the verifier
|
||||
* an explicit null check is required for this struct.
|
||||
*/
|
||||
PTR_TO_BTF_ID_OR_NULL,
|
||||
PTR_TO_MEM, /* reg points to valid memory region */
|
||||
PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
|
||||
PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
|
||||
PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
|
||||
PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
|
||||
PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
|
||||
PTR_TO_BUF, /* reg points to a read/write buffer */
|
||||
PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
|
||||
PTR_TO_FUNC, /* reg points to a bpf program function */
|
||||
PTR_TO_MAP_KEY, /* reg points to a map element key */
|
||||
__BPF_REG_TYPE_MAX,
|
||||
|
||||
/* Extended reg_types. */
|
||||
PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
|
||||
PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
|
||||
PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
|
||||
PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
|
||||
PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
|
||||
|
||||
/* This must be the last entry. Its purpose is to ensure the enum is
|
||||
* wide enough to hold the higher bits reserved for bpf_type_flag.
|
||||
*/
|
||||
__BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
|
||||
};
|
||||
static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
|
||||
|
||||
/* The information passed from prog-specific *_is_valid_access
|
||||
* back to the verifier.
|
||||
|
@ -18,6 +18,8 @@
|
||||
* that converting umax_value to int cannot overflow.
|
||||
*/
|
||||
#define BPF_MAX_VAR_SIZ (1 << 29)
|
||||
/* size of type_str_buf in bpf_verifier. */
|
||||
#define TYPE_STR_BUF_LEN 64
|
||||
|
||||
/* Liveness marks, used for registers and spilled-regs (in stack slots).
|
||||
* Read marks propagate upwards until they find a write mark; they record that
|
||||
@ -484,6 +486,8 @@ struct bpf_verifier_env {
|
||||
/* Same as scratched_regs but for stack slots */
|
||||
u64 scratched_stack_slots;
|
||||
u32 prev_log_len, prev_insn_print_len;
|
||||
/* buffer used in reg_type_str() to generate reg_type string */
|
||||
char type_str_buf[TYPE_STR_BUF_LEN];
|
||||
};
|
||||
|
||||
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
|
||||
@ -546,5 +550,18 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
||||
struct bpf_attach_target_info *tgt_info);
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
|
||||
|
||||
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
|
||||
|
||||
/* extract base type from bpf_{arg, return, reg}_type. */
|
||||
static inline u32 base_type(u32 type)
|
||||
{
|
||||
return type & BPF_BASE_TYPE_MASK;
|
||||
}
|
||||
|
||||
/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
|
||||
static inline u32 type_flag(u32 type)
|
||||
{
|
||||
return type & ~BPF_BASE_TYPE_MASK;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
@ -4940,10 +4940,12 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
||||
/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
|
||||
for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
|
||||
const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
|
||||
u32 type, flag;
|
||||
|
||||
if (ctx_arg_info->offset == off &&
|
||||
(ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL ||
|
||||
ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) {
|
||||
type = base_type(ctx_arg_info->reg_type);
|
||||
flag = type_flag(ctx_arg_info->reg_type);
|
||||
if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
|
||||
(flag & PTR_MAYBE_NULL)) {
|
||||
info->reg_type = ctx_arg_info->reg_type;
|
||||
return true;
|
||||
}
|
||||
@ -5857,7 +5859,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg->type = PTR_TO_MEM_OR_NULL;
|
||||
reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
|
||||
reg->id = ++env->id_gen;
|
||||
|
||||
continue;
|
||||
@ -6351,7 +6353,7 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
|
||||
.func = bpf_btf_find_by_name_kind,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
|
@ -1789,7 +1789,7 @@ static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
|
@ -531,7 +531,7 @@ const struct bpf_func_proto bpf_strtol_proto = {
|
||||
.func = bpf_strtol,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_LONG,
|
||||
@ -559,7 +559,7 @@ const struct bpf_func_proto bpf_strtoul_proto = {
|
||||
.func = bpf_strtoul,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_LONG,
|
||||
@ -645,7 +645,7 @@ const struct bpf_func_proto bpf_event_output_data_proto = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -682,7 +682,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
|
||||
const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
|
||||
.func = bpf_per_cpu_ptr,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
|
||||
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -695,7 +695,7 @@ BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
|
||||
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
|
||||
.func = bpf_this_cpu_ptr,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
|
||||
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
|
||||
.arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
|
||||
};
|
||||
|
||||
@ -1026,7 +1026,7 @@ const struct bpf_func_proto bpf_snprintf_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_PTR_TO_CONST_STR,
|
||||
.arg4_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
|
@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map_elem_reg_info = {
|
||||
.ctx_arg_info_size = 2,
|
||||
.ctx_arg_info = {
|
||||
{ offsetof(struct bpf_iter__bpf_map_elem, key),
|
||||
PTR_TO_RDONLY_BUF_OR_NULL },
|
||||
PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
|
||||
{ offsetof(struct bpf_iter__bpf_map_elem, value),
|
||||
PTR_TO_RDWR_BUF_OR_NULL },
|
||||
PTR_TO_BUF | PTR_MAYBE_NULL },
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -444,7 +444,7 @@ const struct bpf_func_proto bpf_ringbuf_output_proto = {
|
||||
.func = bpf_ringbuf_output,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
@ -4773,7 +4773,7 @@ static const struct bpf_func_proto bpf_sys_bpf_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_ANYTHING,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
|
@ -442,18 +442,6 @@ static bool reg_type_not_null(enum bpf_reg_type type)
|
||||
type == PTR_TO_SOCK_COMMON;
|
||||
}
|
||||
|
||||
static bool reg_type_may_be_null(enum bpf_reg_type type)
|
||||
{
|
||||
return type == PTR_TO_MAP_VALUE_OR_NULL ||
|
||||
type == PTR_TO_SOCKET_OR_NULL ||
|
||||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
|
||||
type == PTR_TO_TCP_SOCK_OR_NULL ||
|
||||
type == PTR_TO_BTF_ID_OR_NULL ||
|
||||
type == PTR_TO_MEM_OR_NULL ||
|
||||
type == PTR_TO_RDONLY_BUF_OR_NULL ||
|
||||
type == PTR_TO_RDWR_BUF_OR_NULL;
|
||||
}
|
||||
|
||||
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
|
||||
{
|
||||
return reg->type == PTR_TO_MAP_VALUE &&
|
||||
@ -462,12 +450,14 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
|
||||
|
||||
static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
|
||||
{
|
||||
return type == PTR_TO_SOCKET ||
|
||||
type == PTR_TO_SOCKET_OR_NULL ||
|
||||
type == PTR_TO_TCP_SOCK ||
|
||||
type == PTR_TO_TCP_SOCK_OR_NULL ||
|
||||
type == PTR_TO_MEM ||
|
||||
type == PTR_TO_MEM_OR_NULL;
|
||||
return base_type(type) == PTR_TO_SOCKET ||
|
||||
base_type(type) == PTR_TO_TCP_SOCK ||
|
||||
base_type(type) == PTR_TO_MEM;
|
||||
}
|
||||
|
||||
static bool type_is_rdonly_mem(u32 type)
|
||||
{
|
||||
return type & MEM_RDONLY;
|
||||
}
|
||||
|
||||
static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
|
||||
@ -475,14 +465,9 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
|
||||
return type == ARG_PTR_TO_SOCK_COMMON;
|
||||
}
|
||||
|
||||
static bool arg_type_may_be_null(enum bpf_arg_type type)
|
||||
static bool type_may_be_null(u32 type)
|
||||
{
|
||||
return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
|
||||
type == ARG_PTR_TO_MEM_OR_NULL ||
|
||||
type == ARG_PTR_TO_CTX_OR_NULL ||
|
||||
type == ARG_PTR_TO_SOCKET_OR_NULL ||
|
||||
type == ARG_PTR_TO_ALLOC_MEM_OR_NULL ||
|
||||
type == ARG_PTR_TO_STACK_OR_NULL;
|
||||
return type & PTR_MAYBE_NULL;
|
||||
}
|
||||
|
||||
/* Determine whether the function releases some resources allocated by another
|
||||
@ -542,39 +527,54 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
|
||||
insn->imm == BPF_CMPXCHG;
|
||||
}
|
||||
|
||||
/* string representation of 'enum bpf_reg_type' */
|
||||
static const char * const reg_type_str[] = {
|
||||
[NOT_INIT] = "?",
|
||||
[SCALAR_VALUE] = "inv",
|
||||
[PTR_TO_CTX] = "ctx",
|
||||
[CONST_PTR_TO_MAP] = "map_ptr",
|
||||
[PTR_TO_MAP_VALUE] = "map_value",
|
||||
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
|
||||
[PTR_TO_STACK] = "fp",
|
||||
[PTR_TO_PACKET] = "pkt",
|
||||
[PTR_TO_PACKET_META] = "pkt_meta",
|
||||
[PTR_TO_PACKET_END] = "pkt_end",
|
||||
[PTR_TO_FLOW_KEYS] = "flow_keys",
|
||||
[PTR_TO_SOCKET] = "sock",
|
||||
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
|
||||
[PTR_TO_SOCK_COMMON] = "sock_common",
|
||||
[PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
|
||||
[PTR_TO_TCP_SOCK] = "tcp_sock",
|
||||
[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
|
||||
[PTR_TO_TP_BUFFER] = "tp_buffer",
|
||||
[PTR_TO_XDP_SOCK] = "xdp_sock",
|
||||
[PTR_TO_BTF_ID] = "ptr_",
|
||||
[PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
|
||||
[PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
|
||||
[PTR_TO_MEM] = "mem",
|
||||
[PTR_TO_MEM_OR_NULL] = "mem_or_null",
|
||||
[PTR_TO_RDONLY_BUF] = "rdonly_buf",
|
||||
[PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null",
|
||||
[PTR_TO_RDWR_BUF] = "rdwr_buf",
|
||||
[PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null",
|
||||
[PTR_TO_FUNC] = "func",
|
||||
[PTR_TO_MAP_KEY] = "map_key",
|
||||
};
|
||||
/* string representation of 'enum bpf_reg_type'
|
||||
*
|
||||
* Note that reg_type_str() can not appear more than once in a single verbose()
|
||||
* statement.
|
||||
*/
|
||||
static const char *reg_type_str(struct bpf_verifier_env *env,
|
||||
enum bpf_reg_type type)
|
||||
{
|
||||
char postfix[16] = {0}, prefix[16] = {0};
|
||||
static const char * const str[] = {
|
||||
[NOT_INIT] = "?",
|
||||
[SCALAR_VALUE] = "inv",
|
||||
[PTR_TO_CTX] = "ctx",
|
||||
[CONST_PTR_TO_MAP] = "map_ptr",
|
||||
[PTR_TO_MAP_VALUE] = "map_value",
|
||||
[PTR_TO_STACK] = "fp",
|
||||
[PTR_TO_PACKET] = "pkt",
|
||||
[PTR_TO_PACKET_META] = "pkt_meta",
|
||||
[PTR_TO_PACKET_END] = "pkt_end",
|
||||
[PTR_TO_FLOW_KEYS] = "flow_keys",
|
||||
[PTR_TO_SOCKET] = "sock",
|
||||
[PTR_TO_SOCK_COMMON] = "sock_common",
|
||||
[PTR_TO_TCP_SOCK] = "tcp_sock",
|
||||
[PTR_TO_TP_BUFFER] = "tp_buffer",
|
||||
[PTR_TO_XDP_SOCK] = "xdp_sock",
|
||||
[PTR_TO_BTF_ID] = "ptr_",
|
||||
[PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
|
||||
[PTR_TO_MEM] = "mem",
|
||||
[PTR_TO_BUF] = "buf",
|
||||
[PTR_TO_FUNC] = "func",
|
||||
[PTR_TO_MAP_KEY] = "map_key",
|
||||
};
|
||||
|
||||
if (type & PTR_MAYBE_NULL) {
|
||||
if (base_type(type) == PTR_TO_BTF_ID ||
|
||||
base_type(type) == PTR_TO_PERCPU_BTF_ID)
|
||||
strncpy(postfix, "or_null_", 16);
|
||||
else
|
||||
strncpy(postfix, "_or_null", 16);
|
||||
}
|
||||
|
||||
if (type & MEM_RDONLY)
|
||||
strncpy(prefix, "rdonly_", 16);
|
||||
|
||||
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
|
||||
prefix, str[base_type(type)], postfix);
|
||||
return env->type_str_buf;
|
||||
}
|
||||
|
||||
static char slot_type_char[] = {
|
||||
[STACK_INVALID] = '?',
|
||||
@ -680,7 +680,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
|
||||
continue;
|
||||
verbose(env, " R%d", i);
|
||||
print_liveness(env, reg->live);
|
||||
verbose(env, "=%s", reg_type_str[t]);
|
||||
verbose(env, "=%s", reg_type_str(env, t));
|
||||
if (t == SCALAR_VALUE && reg->precise)
|
||||
verbose(env, "P");
|
||||
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
|
||||
@ -688,9 +688,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
|
||||
/* reg->off should be 0 for SCALAR_VALUE */
|
||||
verbose(env, "%lld", reg->var_off.value + reg->off);
|
||||
} else {
|
||||
if (t == PTR_TO_BTF_ID ||
|
||||
t == PTR_TO_BTF_ID_OR_NULL ||
|
||||
t == PTR_TO_PERCPU_BTF_ID)
|
||||
if (base_type(t) == PTR_TO_BTF_ID ||
|
||||
base_type(t) == PTR_TO_PERCPU_BTF_ID)
|
||||
verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
|
||||
verbose(env, "(id=%d", reg->id);
|
||||
if (reg_type_may_be_refcounted_or_null(t))
|
||||
@ -699,10 +698,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
|
||||
verbose(env, ",off=%d", reg->off);
|
||||
if (type_is_pkt_pointer(t))
|
||||
verbose(env, ",r=%d", reg->range);
|
||||
else if (t == CONST_PTR_TO_MAP ||
|
||||
t == PTR_TO_MAP_KEY ||
|
||||
t == PTR_TO_MAP_VALUE ||
|
||||
t == PTR_TO_MAP_VALUE_OR_NULL)
|
||||
else if (base_type(t) == CONST_PTR_TO_MAP ||
|
||||
base_type(t) == PTR_TO_MAP_KEY ||
|
||||
base_type(t) == PTR_TO_MAP_VALUE)
|
||||
verbose(env, ",ks=%d,vs=%d",
|
||||
reg->map_ptr->key_size,
|
||||
reg->map_ptr->value_size);
|
||||
@ -774,7 +772,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
|
||||
if (is_spilled_reg(&state->stack[i])) {
|
||||
reg = &state->stack[i].spilled_ptr;
|
||||
t = reg->type;
|
||||
verbose(env, "=%s", reg_type_str[t]);
|
||||
verbose(env, "=%s", reg_type_str(env, t));
|
||||
if (t == SCALAR_VALUE && reg->precise)
|
||||
verbose(env, "P");
|
||||
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
|
||||
@ -1207,8 +1205,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
|
||||
|
||||
static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
|
||||
{
|
||||
switch (reg->type) {
|
||||
case PTR_TO_MAP_VALUE_OR_NULL: {
|
||||
if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
|
||||
const struct bpf_map *map = reg->map_ptr;
|
||||
|
||||
if (map->inner_map_meta) {
|
||||
@ -1227,32 +1224,10 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
|
||||
} else {
|
||||
reg->type = PTR_TO_MAP_VALUE;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
reg->type = PTR_TO_SOCKET;
|
||||
break;
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
reg->type = PTR_TO_SOCK_COMMON;
|
||||
break;
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
reg->type = PTR_TO_TCP_SOCK;
|
||||
break;
|
||||
case PTR_TO_BTF_ID_OR_NULL:
|
||||
reg->type = PTR_TO_BTF_ID;
|
||||
break;
|
||||
case PTR_TO_MEM_OR_NULL:
|
||||
reg->type = PTR_TO_MEM;
|
||||
break;
|
||||
case PTR_TO_RDONLY_BUF_OR_NULL:
|
||||
reg->type = PTR_TO_RDONLY_BUF;
|
||||
break;
|
||||
case PTR_TO_RDWR_BUF_OR_NULL:
|
||||
reg->type = PTR_TO_RDWR_BUF;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "unknown nullable register type");
|
||||
return;
|
||||
}
|
||||
|
||||
reg->type &= ~PTR_MAYBE_NULL;
|
||||
}
|
||||
|
||||
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
|
||||
@ -2108,7 +2083,7 @@ static int mark_reg_read(struct bpf_verifier_env *env,
|
||||
break;
|
||||
if (parent->live & REG_LIVE_DONE) {
|
||||
verbose(env, "verifier BUG type %s var_off %lld off %d\n",
|
||||
reg_type_str[parent->type],
|
||||
reg_type_str(env, parent->type),
|
||||
parent->var_off.value, parent->off);
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -2773,9 +2748,8 @@ static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
|
||||
|
||||
static bool is_spillable_regtype(enum bpf_reg_type type)
|
||||
{
|
||||
switch (type) {
|
||||
switch (base_type(type)) {
|
||||
case PTR_TO_MAP_VALUE:
|
||||
case PTR_TO_MAP_VALUE_OR_NULL:
|
||||
case PTR_TO_STACK:
|
||||
case PTR_TO_CTX:
|
||||
case PTR_TO_PACKET:
|
||||
@ -2784,21 +2758,13 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
||||
case PTR_TO_FLOW_KEYS:
|
||||
case CONST_PTR_TO_MAP:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
case PTR_TO_XDP_SOCK:
|
||||
case PTR_TO_BTF_ID:
|
||||
case PTR_TO_BTF_ID_OR_NULL:
|
||||
case PTR_TO_RDONLY_BUF:
|
||||
case PTR_TO_RDONLY_BUF_OR_NULL:
|
||||
case PTR_TO_RDWR_BUF:
|
||||
case PTR_TO_RDWR_BUF_OR_NULL:
|
||||
case PTR_TO_BUF:
|
||||
case PTR_TO_PERCPU_BTF_ID:
|
||||
case PTR_TO_MEM:
|
||||
case PTR_TO_MEM_OR_NULL:
|
||||
case PTR_TO_FUNC:
|
||||
case PTR_TO_MAP_KEY:
|
||||
return true;
|
||||
@ -3638,7 +3604,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
|
||||
*/
|
||||
*reg_type = info.reg_type;
|
||||
|
||||
if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) {
|
||||
if (base_type(*reg_type) == PTR_TO_BTF_ID) {
|
||||
*btf = info.btf;
|
||||
*btf_id = info.btf_id;
|
||||
} else {
|
||||
@ -3706,7 +3672,7 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
|
||||
}
|
||||
|
||||
verbose(env, "R%d invalid %s access off=%d size=%d\n",
|
||||
regno, reg_type_str[reg->type], off, size);
|
||||
regno, reg_type_str(env, reg->type), off, size);
|
||||
|
||||
return -EACCES;
|
||||
}
|
||||
@ -4433,15 +4399,30 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
}
|
||||
}
|
||||
} else if (reg->type == PTR_TO_MEM) {
|
||||
} else if (base_type(reg->type) == PTR_TO_MEM) {
|
||||
bool rdonly_mem = type_is_rdonly_mem(reg->type);
|
||||
|
||||
if (type_may_be_null(reg->type)) {
|
||||
verbose(env, "R%d invalid mem access '%s'\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (t == BPF_WRITE && rdonly_mem) {
|
||||
verbose(env, "R%d cannot write into %s\n",
|
||||
regno, reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (t == BPF_WRITE && value_regno >= 0 &&
|
||||
is_pointer_value(env, value_regno)) {
|
||||
verbose(env, "R%d leaks addr into mem\n", value_regno);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
err = check_mem_region_access(env, regno, off, size,
|
||||
reg->mem_size, false);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else if (reg->type == PTR_TO_CTX) {
|
||||
enum bpf_reg_type reg_type = SCALAR_VALUE;
|
||||
@ -4471,7 +4452,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
} else {
|
||||
mark_reg_known_zero(env, regs,
|
||||
value_regno);
|
||||
if (reg_type_may_be_null(reg_type))
|
||||
if (type_may_be_null(reg_type))
|
||||
regs[value_regno].id = ++env->id_gen;
|
||||
/* A load of ctx field could have different
|
||||
* actual load size with the one encoded in the
|
||||
@ -4479,8 +4460,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
* a sub-register.
|
||||
*/
|
||||
regs[value_regno].subreg_def = DEF_NOT_SUBREG;
|
||||
if (reg_type == PTR_TO_BTF_ID ||
|
||||
reg_type == PTR_TO_BTF_ID_OR_NULL) {
|
||||
if (base_type(reg_type) == PTR_TO_BTF_ID) {
|
||||
regs[value_regno].btf = btf;
|
||||
regs[value_regno].btf_id = btf_id;
|
||||
}
|
||||
@ -4533,7 +4513,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
} else if (type_is_sk_pointer(reg->type)) {
|
||||
if (t == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n",
|
||||
regno, reg_type_str[reg->type]);
|
||||
regno, reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_sock_access(env, insn_idx, regno, off, size, t);
|
||||
@ -4549,26 +4529,32 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
} else if (reg->type == CONST_PTR_TO_MAP) {
|
||||
err = check_ptr_to_map_access(env, regs, regno, off, size, t,
|
||||
value_regno);
|
||||
} else if (reg->type == PTR_TO_RDONLY_BUF) {
|
||||
if (t == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n",
|
||||
regno, reg_type_str[reg->type]);
|
||||
return -EACCES;
|
||||
} else if (base_type(reg->type) == PTR_TO_BUF) {
|
||||
bool rdonly_mem = type_is_rdonly_mem(reg->type);
|
||||
const char *buf_info;
|
||||
u32 *max_access;
|
||||
|
||||
if (rdonly_mem) {
|
||||
if (t == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n",
|
||||
regno, reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
}
|
||||
buf_info = "rdonly";
|
||||
max_access = &env->prog->aux->max_rdonly_access;
|
||||
} else {
|
||||
buf_info = "rdwr";
|
||||
max_access = &env->prog->aux->max_rdwr_access;
|
||||
}
|
||||
|
||||
err = check_buffer_access(env, reg, regno, off, size, false,
|
||||
"rdonly",
|
||||
&env->prog->aux->max_rdonly_access);
|
||||
if (!err && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else if (reg->type == PTR_TO_RDWR_BUF) {
|
||||
err = check_buffer_access(env, reg, regno, off, size, false,
|
||||
"rdwr",
|
||||
&env->prog->aux->max_rdwr_access);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
buf_info, max_access);
|
||||
|
||||
if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else {
|
||||
verbose(env, "R%d invalid mem access '%s'\n", regno,
|
||||
reg_type_str[reg->type]);
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
@ -4635,7 +4621,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
||||
is_sk_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg,
|
||||
reg_type_str[reg_state(env, insn->dst_reg)->type]);
|
||||
reg_type_str(env, reg_state(env, insn->dst_reg)->type));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
@ -4812,8 +4798,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
const char *buf_info;
|
||||
u32 *max_access;
|
||||
|
||||
switch (reg->type) {
|
||||
switch (base_type(reg->type)) {
|
||||
case PTR_TO_PACKET:
|
||||
case PTR_TO_PACKET_META:
|
||||
return check_packet_access(env, regno, reg->off, access_size,
|
||||
@ -4832,18 +4820,20 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return check_mem_region_access(env, regno, reg->off,
|
||||
access_size, reg->mem_size,
|
||||
zero_size_allowed);
|
||||
case PTR_TO_RDONLY_BUF:
|
||||
if (meta && meta->raw_mode)
|
||||
return -EACCES;
|
||||
case PTR_TO_BUF:
|
||||
if (type_is_rdonly_mem(reg->type)) {
|
||||
if (meta && meta->raw_mode)
|
||||
return -EACCES;
|
||||
|
||||
buf_info = "rdonly";
|
||||
max_access = &env->prog->aux->max_rdonly_access;
|
||||
} else {
|
||||
buf_info = "rdwr";
|
||||
max_access = &env->prog->aux->max_rdwr_access;
|
||||
}
|
||||
return check_buffer_access(env, reg, regno, reg->off,
|
||||
access_size, zero_size_allowed,
|
||||
"rdonly",
|
||||
&env->prog->aux->max_rdonly_access);
|
||||
case PTR_TO_RDWR_BUF:
|
||||
return check_buffer_access(env, reg, regno, reg->off,
|
||||
access_size, zero_size_allowed,
|
||||
"rdwr",
|
||||
&env->prog->aux->max_rdwr_access);
|
||||
buf_info, max_access);
|
||||
case PTR_TO_STACK:
|
||||
return check_stack_range_initialized(
|
||||
env,
|
||||
@ -4855,9 +4845,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
verbose(env, "R%d type=%s expected=%s\n", regno,
|
||||
reg_type_str[reg->type],
|
||||
reg_type_str[PTR_TO_STACK]);
|
||||
verbose(env, "R%d type=%s ", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
|
||||
return -EACCES;
|
||||
}
|
||||
}
|
||||
@ -4868,7 +4858,7 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
|
||||
if (register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
if (reg_type_may_be_null(reg->type)) {
|
||||
if (type_may_be_null(reg->type)) {
|
||||
/* Assuming that the register contains a value check if the memory
|
||||
* access is safe. Temporarily save and restore the register's state as
|
||||
* the conversion shouldn't be visible to a caller.
|
||||
@ -5016,9 +5006,8 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
|
||||
|
||||
static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
|
||||
{
|
||||
return type == ARG_PTR_TO_MEM ||
|
||||
type == ARG_PTR_TO_MEM_OR_NULL ||
|
||||
type == ARG_PTR_TO_UNINIT_MEM;
|
||||
return base_type(type) == ARG_PTR_TO_MEM ||
|
||||
base_type(type) == ARG_PTR_TO_UNINIT_MEM;
|
||||
}
|
||||
|
||||
static bool arg_type_is_mem_size(enum bpf_arg_type type)
|
||||
@ -5123,8 +5112,7 @@ static const struct bpf_reg_types mem_types = {
|
||||
PTR_TO_MAP_KEY,
|
||||
PTR_TO_MAP_VALUE,
|
||||
PTR_TO_MEM,
|
||||
PTR_TO_RDONLY_BUF,
|
||||
PTR_TO_RDWR_BUF,
|
||||
PTR_TO_BUF,
|
||||
},
|
||||
};
|
||||
|
||||
@ -5155,31 +5143,26 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
|
||||
[ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
|
||||
[ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
|
||||
[ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
|
||||
[ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
|
||||
[ARG_CONST_SIZE] = &scalar_types,
|
||||
[ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
|
||||
[ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
|
||||
[ARG_CONST_MAP_PTR] = &const_map_ptr_types,
|
||||
[ARG_PTR_TO_CTX] = &context_types,
|
||||
[ARG_PTR_TO_CTX_OR_NULL] = &context_types,
|
||||
[ARG_PTR_TO_SOCK_COMMON] = &sock_types,
|
||||
#ifdef CONFIG_NET
|
||||
[ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
|
||||
#endif
|
||||
[ARG_PTR_TO_SOCKET] = &fullsock_types,
|
||||
[ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
|
||||
[ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
|
||||
[ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
|
||||
[ARG_PTR_TO_MEM] = &mem_types,
|
||||
[ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
|
||||
[ARG_PTR_TO_UNINIT_MEM] = &mem_types,
|
||||
[ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
|
||||
[ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
|
||||
[ARG_PTR_TO_INT] = &int_ptr_types,
|
||||
[ARG_PTR_TO_LONG] = &int_ptr_types,
|
||||
[ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
|
||||
[ARG_PTR_TO_FUNC] = &func_ptr_types,
|
||||
[ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types,
|
||||
[ARG_PTR_TO_STACK] = &stack_ptr_types,
|
||||
[ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
|
||||
[ARG_PTR_TO_TIMER] = &timer_types,
|
||||
};
|
||||
@ -5193,12 +5176,27 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
|
||||
const struct bpf_reg_types *compatible;
|
||||
int i, j;
|
||||
|
||||
compatible = compatible_reg_types[arg_type];
|
||||
compatible = compatible_reg_types[base_type(arg_type)];
|
||||
if (!compatible) {
|
||||
verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
|
||||
* but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
|
||||
*
|
||||
* Same for MAYBE_NULL:
|
||||
*
|
||||
* ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
|
||||
* but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
|
||||
*
|
||||
* Therefore we fold these flags depending on the arg_type before comparison.
|
||||
*/
|
||||
if (arg_type & MEM_RDONLY)
|
||||
type &= ~MEM_RDONLY;
|
||||
if (arg_type & PTR_MAYBE_NULL)
|
||||
type &= ~PTR_MAYBE_NULL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
|
||||
expected = compatible->types[i];
|
||||
if (expected == NOT_INIT)
|
||||
@ -5208,14 +5206,14 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
|
||||
goto found;
|
||||
}
|
||||
|
||||
verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
|
||||
verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
|
||||
for (j = 0; j + 1 < i; j++)
|
||||
verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
|
||||
verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
|
||||
verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
|
||||
verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
|
||||
return -EACCES;
|
||||
|
||||
found:
|
||||
if (type == PTR_TO_BTF_ID) {
|
||||
if (reg->type == PTR_TO_BTF_ID) {
|
||||
if (!arg_btf_id) {
|
||||
if (!compatible->btf_id) {
|
||||
verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
|
||||
@ -5274,15 +5272,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (arg_type == ARG_PTR_TO_MAP_VALUE ||
|
||||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
|
||||
arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
|
||||
if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
|
||||
base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
|
||||
err = resolve_map_arg_type(env, meta, &arg_type);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (register_is_null(reg) && arg_type_may_be_null(arg_type))
|
||||
if (register_is_null(reg) && type_may_be_null(arg_type))
|
||||
/* A NULL register has a SCALAR_VALUE type, so skip
|
||||
* type checking.
|
||||
*/
|
||||
@ -5351,10 +5348,11 @@ skip_type_check:
|
||||
err = check_helper_mem_access(env, regno,
|
||||
meta->map_ptr->key_size, false,
|
||||
NULL);
|
||||
} else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
|
||||
(arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
|
||||
!register_is_null(reg)) ||
|
||||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
|
||||
} else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
|
||||
base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
|
||||
if (type_may_be_null(arg_type) && register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
/* bpf_map_xxx(..., map_ptr, ..., value) call:
|
||||
* check [value, value + map->value_size) validity
|
||||
*/
|
||||
@ -6484,6 +6482,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
int *insn_idx_p)
|
||||
{
|
||||
const struct bpf_func_proto *fn = NULL;
|
||||
enum bpf_return_type ret_type;
|
||||
enum bpf_type_flag ret_flag;
|
||||
struct bpf_reg_state *regs;
|
||||
struct bpf_call_arg_meta meta;
|
||||
int insn_idx = *insn_idx_p;
|
||||
@ -6623,13 +6623,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
|
||||
|
||||
/* update return register (already marked as written above) */
|
||||
if (fn->ret_type == RET_INTEGER) {
|
||||
ret_type = fn->ret_type;
|
||||
ret_flag = type_flag(fn->ret_type);
|
||||
if (ret_type == RET_INTEGER) {
|
||||
/* sets type to SCALAR_VALUE */
|
||||
mark_reg_unknown(env, regs, BPF_REG_0);
|
||||
} else if (fn->ret_type == RET_VOID) {
|
||||
} else if (ret_type == RET_VOID) {
|
||||
regs[BPF_REG_0].type = NOT_INIT;
|
||||
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
|
||||
fn->ret_type == RET_PTR_TO_MAP_VALUE) {
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
|
||||
/* There is no offset yet applied, variable or fixed */
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
/* remember map_ptr, so that check_map_access()
|
||||
@ -6643,28 +6644,25 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
}
|
||||
regs[BPF_REG_0].map_ptr = meta.map_ptr;
|
||||
regs[BPF_REG_0].map_uid = meta.map_uid;
|
||||
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
|
||||
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
|
||||
if (map_value_has_spin_lock(meta.map_ptr))
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
} else {
|
||||
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
|
||||
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
|
||||
if (!type_may_be_null(ret_type) &&
|
||||
map_value_has_spin_lock(meta.map_ptr)) {
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
}
|
||||
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
|
||||
} else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
|
||||
regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
|
||||
} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
|
||||
regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
|
||||
} else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
|
||||
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
|
||||
regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
|
||||
regs[BPF_REG_0].mem_size = meta.mem_size;
|
||||
} else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL ||
|
||||
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
|
||||
const struct btf_type *t;
|
||||
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
@ -6682,29 +6680,30 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
tname, PTR_ERR(ret));
|
||||
return -EINVAL;
|
||||
}
|
||||
regs[BPF_REG_0].type =
|
||||
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
|
||||
PTR_TO_MEM : PTR_TO_MEM_OR_NULL;
|
||||
regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
|
||||
regs[BPF_REG_0].mem_size = tsize;
|
||||
} else {
|
||||
regs[BPF_REG_0].type =
|
||||
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
|
||||
PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
|
||||
/* MEM_RDONLY may be carried from ret_flag, but it
|
||||
* doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
|
||||
* it will confuse the check of PTR_TO_BTF_ID in
|
||||
* check_mem_access().
|
||||
*/
|
||||
ret_flag &= ~MEM_RDONLY;
|
||||
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
|
||||
regs[BPF_REG_0].btf = meta.ret_btf;
|
||||
regs[BPF_REG_0].btf_id = meta.ret_btf_id;
|
||||
}
|
||||
} else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL ||
|
||||
fn->ret_type == RET_PTR_TO_BTF_ID) {
|
||||
} else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
|
||||
int ret_btf_id;
|
||||
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ?
|
||||
PTR_TO_BTF_ID :
|
||||
PTR_TO_BTF_ID_OR_NULL;
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
|
||||
ret_btf_id = *fn->ret_btf_id;
|
||||
if (ret_btf_id == 0) {
|
||||
verbose(env, "invalid return type %d of func %s#%d\n",
|
||||
fn->ret_type, func_id_name(func_id), func_id);
|
||||
verbose(env, "invalid return type %u of func %s#%d\n",
|
||||
base_type(ret_type), func_id_name(func_id),
|
||||
func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* current BPF helper definitions are only coming from
|
||||
@ -6713,12 +6712,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
regs[BPF_REG_0].btf = btf_vmlinux;
|
||||
regs[BPF_REG_0].btf_id = ret_btf_id;
|
||||
} else {
|
||||
verbose(env, "unknown return type %d of func %s#%d\n",
|
||||
fn->ret_type, func_id_name(func_id), func_id);
|
||||
verbose(env, "unknown return type %u of func %s#%d\n",
|
||||
base_type(ret_type), func_id_name(func_id), func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (reg_type_may_be_null(regs[BPF_REG_0].type))
|
||||
if (type_may_be_null(regs[BPF_REG_0].type))
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
|
||||
if (is_ptr_cast_function(func_id)) {
|
||||
@ -6927,25 +6926,25 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
||||
|
||||
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
|
||||
verbose(env, "math between %s pointer and %lld is not allowed\n",
|
||||
reg_type_str[type], val);
|
||||
reg_type_str(env, type), val);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
|
||||
verbose(env, "%s pointer offset %d is not allowed\n",
|
||||
reg_type_str[type], reg->off);
|
||||
reg_type_str(env, type), reg->off);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (smin == S64_MIN) {
|
||||
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
|
||||
reg_type_str[type]);
|
||||
reg_type_str(env, type));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
|
||||
verbose(env, "value %lld makes %s pointer be out of bounds\n",
|
||||
smin, reg_type_str[type]);
|
||||
smin, reg_type_str(env, type));
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -7322,11 +7321,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
switch (ptr_reg->type) {
|
||||
case PTR_TO_MAP_VALUE_OR_NULL:
|
||||
if (ptr_reg->type & PTR_MAYBE_NULL) {
|
||||
verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
|
||||
dst, reg_type_str[ptr_reg->type]);
|
||||
dst, reg_type_str(env, ptr_reg->type));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
switch (base_type(ptr_reg->type)) {
|
||||
case CONST_PTR_TO_MAP:
|
||||
/* smin_val represents the known value */
|
||||
if (known && smin_val == 0 && opcode == BPF_ADD)
|
||||
@ -7334,14 +7335,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
fallthrough;
|
||||
case PTR_TO_PACKET_END:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
case PTR_TO_XDP_SOCK:
|
||||
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
|
||||
dst, reg_type_str[ptr_reg->type]);
|
||||
dst, reg_type_str(env, ptr_reg->type));
|
||||
return -EACCES;
|
||||
default:
|
||||
break;
|
||||
@ -9060,7 +9058,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
||||
struct bpf_reg_state *reg, u32 id,
|
||||
bool is_null)
|
||||
{
|
||||
if (reg_type_may_be_null(reg->type) && reg->id == id &&
|
||||
if (type_may_be_null(reg->type) && reg->id == id &&
|
||||
!WARN_ON_ONCE(!reg->id)) {
|
||||
/* Old offset (both fixed and variable parts) should
|
||||
* have been known-zero, because we don't allow pointer
|
||||
@ -9438,7 +9436,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
||||
*/
|
||||
if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
|
||||
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
|
||||
reg_type_may_be_null(dst_reg->type)) {
|
||||
type_may_be_null(dst_reg->type)) {
|
||||
/* Mark all identical registers in each branch as either
|
||||
* safe or unknown depending R == 0 or R != 0 conditional.
|
||||
*/
|
||||
@ -9493,7 +9491,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
mark_reg_known_zero(env, regs, insn->dst_reg);
|
||||
|
||||
dst_reg->type = aux->btf_var.reg_type;
|
||||
switch (dst_reg->type) {
|
||||
switch (base_type(dst_reg->type)) {
|
||||
case PTR_TO_MEM:
|
||||
dst_reg->mem_size = aux->btf_var.mem_size;
|
||||
break;
|
||||
@ -9692,7 +9690,7 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
/* enforce return zero from async callbacks like timer */
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
verbose(env, "In async callback the register R0 is not a known value (%s)\n",
|
||||
reg_type_str[reg->type]);
|
||||
reg_type_str(env, reg->type));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -9706,7 +9704,7 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
if (is_subprog) {
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
|
||||
reg_type_str[reg->type]);
|
||||
reg_type_str(env, reg->type));
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
@ -9770,7 +9768,7 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
verbose(env, "At program exit the register R0 is not a known value (%s)\n",
|
||||
reg_type_str[reg->type]);
|
||||
reg_type_str(env, reg->type));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -10627,7 +10625,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
return true;
|
||||
if (rcur->type == NOT_INIT)
|
||||
return false;
|
||||
switch (rold->type) {
|
||||
switch (base_type(rold->type)) {
|
||||
case SCALAR_VALUE:
|
||||
if (env->explore_alu_limits)
|
||||
return false;
|
||||
@ -10649,6 +10647,22 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
}
|
||||
case PTR_TO_MAP_KEY:
|
||||
case PTR_TO_MAP_VALUE:
|
||||
/* a PTR_TO_MAP_VALUE could be safe to use as a
|
||||
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
|
||||
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
|
||||
* checked, doing so could have affected others with the same
|
||||
* id, and we can't check for that because we lost the id when
|
||||
* we converted to a PTR_TO_MAP_VALUE.
|
||||
*/
|
||||
if (type_may_be_null(rold->type)) {
|
||||
if (!type_may_be_null(rcur->type))
|
||||
return false;
|
||||
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
|
||||
return false;
|
||||
/* Check our ids match any regs they're supposed to */
|
||||
return check_ids(rold->id, rcur->id, idmap);
|
||||
}
|
||||
|
||||
/* If the new min/max/var_off satisfy the old ones and
|
||||
* everything else matches, we are OK.
|
||||
* 'id' is not compared, since it's only used for maps with
|
||||
@ -10660,20 +10674,6 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
|
||||
range_within(rold, rcur) &&
|
||||
tnum_in(rold->var_off, rcur->var_off);
|
||||
case PTR_TO_MAP_VALUE_OR_NULL:
|
||||
/* a PTR_TO_MAP_VALUE could be safe to use as a
|
||||
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
|
||||
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
|
||||
* checked, doing so could have affected others with the same
|
||||
* id, and we can't check for that because we lost the id when
|
||||
* we converted to a PTR_TO_MAP_VALUE.
|
||||
*/
|
||||
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
|
||||
return false;
|
||||
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
|
||||
return false;
|
||||
/* Check our ids match any regs they're supposed to */
|
||||
return check_ids(rold->id, rcur->id, idmap);
|
||||
case PTR_TO_PACKET_META:
|
||||
case PTR_TO_PACKET:
|
||||
if (rcur->type != rold->type)
|
||||
@ -10702,11 +10702,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
case PTR_TO_PACKET_END:
|
||||
case PTR_TO_FLOW_KEYS:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
case PTR_TO_XDP_SOCK:
|
||||
/* Only valid matches are exact, which memcmp() above
|
||||
* would have accepted
|
||||
@ -11232,17 +11229,13 @@ next:
|
||||
/* Return true if it's OK to have the same insn return a different type. */
|
||||
static bool reg_type_mismatch_ok(enum bpf_reg_type type)
|
||||
{
|
||||
switch (type) {
|
||||
switch (base_type(type)) {
|
||||
case PTR_TO_CTX:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
case PTR_TO_XDP_SOCK:
|
||||
case PTR_TO_BTF_ID:
|
||||
case PTR_TO_BTF_ID_OR_NULL:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
@ -11468,7 +11461,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
if (is_ctx_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg,
|
||||
reg_type_str[reg_state(env, insn->dst_reg)->type]);
|
||||
reg_type_str(env, reg_state(env, insn->dst_reg)->type));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
@ -11721,7 +11714,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
|
||||
err = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
aux->btf_var.reg_type = PTR_TO_MEM;
|
||||
aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
|
||||
aux->btf_var.mem_size = tsize;
|
||||
} else {
|
||||
aux->btf_var.reg_type = PTR_TO_BTF_ID;
|
||||
@ -13621,7 +13614,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
mark_reg_known_zero(env, regs, i);
|
||||
else if (regs[i].type == SCALAR_VALUE)
|
||||
mark_reg_unknown(env, regs, i);
|
||||
else if (regs[i].type == PTR_TO_MEM_OR_NULL) {
|
||||
else if (base_type(regs[i].type) == PTR_TO_MEM) {
|
||||
const u32 mem_size = regs[i].mem_size;
|
||||
|
||||
mark_reg_known_zero(env, regs, i);
|
||||
|
@ -345,7 +345,7 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_ANYTHING,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -394,7 +394,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
|
||||
.func = bpf_trace_printk,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -450,9 +450,9 @@ static const struct bpf_func_proto bpf_trace_vprintk_proto = {
|
||||
.func = bpf_trace_vprintk,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -492,9 +492,9 @@ static const struct bpf_func_proto bpf_seq_printf_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &btf_seq_file_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -509,7 +509,7 @@ static const struct bpf_func_proto bpf_seq_write_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &btf_seq_file_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -533,7 +533,7 @@ static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &btf_seq_file_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -694,7 +694,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -1004,7 +1004,7 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -1334,7 +1334,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -1556,7 +1556,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -1610,7 +1610,7 @@ static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
@ -929,7 +929,7 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
|
||||
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
|
||||
PTR_TO_BTF_ID_OR_NULL },
|
||||
{ offsetof(struct bpf_iter__bpf_sk_storage_map, value),
|
||||
PTR_TO_RDWR_BUF_OR_NULL },
|
||||
PTR_TO_BUF | PTR_MAYBE_NULL },
|
||||
},
|
||||
.seq_info = &iter_seq_info,
|
||||
};
|
||||
|
@ -1712,7 +1712,7 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -2017,9 +2017,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
|
||||
.gpl_only = false,
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -2540,7 +2540,7 @@ static const struct bpf_func_proto bpf_redirect_neigh_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_ANYTHING,
|
||||
.arg2_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -4173,7 +4173,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -4187,7 +4187,7 @@ const struct bpf_func_proto bpf_skb_output_proto = {
|
||||
.arg1_btf_id = &bpf_skb_output_btf_ids[0],
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -4370,7 +4370,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
@ -4396,7 +4396,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -4566,7 +4566,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -4580,7 +4580,7 @@ const struct bpf_func_proto bpf_xdp_output_proto = {
|
||||
.arg1_btf_id = &bpf_xdp_output_btf_ids[0],
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
@ -5066,7 +5066,7 @@ const struct bpf_func_proto bpf_sk_setsockopt_proto = {
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -5100,7 +5100,7 @@ static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -5134,7 +5134,7 @@ static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -5309,7 +5309,7 @@ static const struct bpf_func_proto bpf_bind_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -5897,7 +5897,7 @@ static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE
|
||||
};
|
||||
|
||||
@ -5907,7 +5907,7 @@ static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE
|
||||
};
|
||||
|
||||
@ -5950,7 +5950,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE
|
||||
};
|
||||
|
||||
@ -6038,7 +6038,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg4_type = ARG_CONST_SIZE
|
||||
};
|
||||
|
||||
@ -6263,7 +6263,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6282,7 +6282,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6301,7 +6301,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6338,7 +6338,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6361,7 +6361,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6384,7 +6384,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6403,7 +6403,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6422,7 +6422,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6441,7 +6441,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6754,9 +6754,9 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -6823,9 +6823,9 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
@ -7054,7 +7054,7 @@ static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
@ -1564,7 +1564,7 @@ static struct bpf_iter_reg sock_map_iter_reg = {
|
||||
.ctx_arg_info_size = 2,
|
||||
.ctx_arg_info = {
|
||||
{ offsetof(struct bpf_iter__sockmap, key),
|
||||
PTR_TO_RDONLY_BUF_OR_NULL },
|
||||
PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
|
||||
{ offsetof(struct bpf_iter__sockmap, sk),
|
||||
PTR_TO_BTF_ID_OR_NULL },
|
||||
},
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "test_ksyms_btf_null_check.skel.h"
|
||||
#include "test_ksyms_weak.skel.h"
|
||||
#include "test_ksyms_weak.lskel.h"
|
||||
#include "test_ksyms_btf_write_check.skel.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
@ -137,6 +138,16 @@ cleanup:
|
||||
test_ksyms_weak_lskel__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_write_check(void)
|
||||
{
|
||||
struct test_ksyms_btf_write_check *skel;
|
||||
|
||||
skel = test_ksyms_btf_write_check__open_and_load();
|
||||
ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n");
|
||||
|
||||
test_ksyms_btf_write_check__destroy(skel);
|
||||
}
|
||||
|
||||
void test_ksyms_btf(void)
|
||||
{
|
||||
int percpu_datasec;
|
||||
@ -167,4 +178,7 @@ void test_ksyms_btf(void)
|
||||
|
||||
if (test__start_subtest("weak_ksyms_lskel"))
|
||||
test_weak_syms_lskel();
|
||||
|
||||
if (test__start_subtest("write_check"))
|
||||
test_write_check();
|
||||
}
|
||||
|
@ -0,0 +1,29 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Google */
|
||||
|
||||
#include "vmlinux.h"
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
extern const int bpf_prog_active __ksym; /* int type global var. */
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handler(const void *ctx)
|
||||
{
|
||||
int *active;
|
||||
__u32 cpu;
|
||||
|
||||
cpu = bpf_get_smp_processor_id();
|
||||
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
|
||||
if (active) {
|
||||
/* Kernel memory obtained from bpf_{per,this}_cpu_ptr
|
||||
* is read-only, should _not_ pass verification.
|
||||
*/
|
||||
/* WRITE_ONCE */
|
||||
*(volatile int *)active = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue
Block a user