mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
Daniel Borkmann says: ==================== pull-request: bpf 2021-11-16 We've added 12 non-merge commits during the last 5 day(s) which contain a total of 23 files changed, 573 insertions(+), 73 deletions(-). The main changes are: 1) Fix pruning regression where verifier went overly conservative rejecting previsouly accepted programs, from Alexei Starovoitov and Lorenz Bauer. 2) Fix verifier TOCTOU bug when using read-only map's values as constant scalars during verification, from Daniel Borkmann. 3) Fix a crash due to a double free in XSK's buffer pool, from Magnus Karlsson. 4) Fix libbpf regression when cross-building runqslower, from Jean-Philippe Brucker. 5) Forbid use of bpf_ktime_get_coarse_ns() and bpf_timer_*() helpers in tracing programs due to deadlock possibilities, from Dmitrii Banshchikov. 6) Fix checksum validation in sockmap's udp_read_sock() callback, from Cong Wang. 7) Various BPF sample fixes such as XDP stats in xdp_sample_user, from Alexander Lobakin. 8) Fix libbpf gen_loader error handling wrt fd cleanup, from Kumar Kartikeya Dwivedi. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: udp: Validate checksum in udp_read_sock() bpf: Fix toctou on read-only map's constant scalar tracking samples/bpf: Fix build error due to -isystem removal selftests/bpf: Add tests for restricted helpers bpf: Forbid bpf_ktime_get_coarse_ns and bpf_timer_* in tracing progs libbpf: Perform map fd cleanup for gen_loader in case of error samples/bpf: Fix incorrect use of strlen in xdp_redirect_cpu tools/runqslower: Fix cross-build samples/bpf: Fix summary per-sec stats in xdp_sample_user selftests/bpf: Check map in map pruning bpf: Fix inner map state pruning regression. xsk: Fix crash on double free in buffer pool ==================== Link: https://lore.kernel.org/r/20211116141134.6490-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
f083ec3160
@ -193,7 +193,7 @@ struct bpf_map {
|
||||
atomic64_t usercnt;
|
||||
struct work_struct work;
|
||||
struct mutex freeze_mutex;
|
||||
u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
|
||||
atomic64_t writecnt;
|
||||
};
|
||||
|
||||
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
|
||||
@ -1419,6 +1419,7 @@ void bpf_map_put(struct bpf_map *map);
|
||||
void *bpf_map_area_alloc(u64 size, int numa_node);
|
||||
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
|
||||
void bpf_map_area_free(void *base);
|
||||
bool bpf_map_write_active(const struct bpf_map *map);
|
||||
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
|
||||
int generic_map_lookup_batch(struct bpf_map *map,
|
||||
const union bpf_attr *attr,
|
||||
|
@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_sysctl_get_new_value_proto;
|
||||
case BPF_FUNC_sysctl_set_new_value:
|
||||
return &bpf_sysctl_set_new_value_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return cgroup_base_func_proto(func_id, prog);
|
||||
}
|
||||
|
@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
case BPF_FUNC_ringbuf_output:
|
||||
return &bpf_ringbuf_output_proto;
|
||||
case BPF_FUNC_ringbuf_reserve:
|
||||
|
@ -132,6 +132,21 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
|
||||
return map;
|
||||
}
|
||||
|
||||
static void bpf_map_write_active_inc(struct bpf_map *map)
|
||||
{
|
||||
atomic64_inc(&map->writecnt);
|
||||
}
|
||||
|
||||
static void bpf_map_write_active_dec(struct bpf_map *map)
|
||||
{
|
||||
atomic64_dec(&map->writecnt);
|
||||
}
|
||||
|
||||
bool bpf_map_write_active(const struct bpf_map *map)
|
||||
{
|
||||
return atomic64_read(&map->writecnt) != 0;
|
||||
}
|
||||
|
||||
static u32 bpf_map_value_size(const struct bpf_map *map)
|
||||
{
|
||||
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
@ -601,11 +616,8 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct bpf_map *map = vma->vm_file->private_data;
|
||||
|
||||
if (vma->vm_flags & VM_MAYWRITE) {
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
map->writecnt++;
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
}
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
bpf_map_write_active_inc(map);
|
||||
}
|
||||
|
||||
/* called for all unmapped memory region (including initial) */
|
||||
@ -613,11 +625,8 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct bpf_map *map = vma->vm_file->private_data;
|
||||
|
||||
if (vma->vm_flags & VM_MAYWRITE) {
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
map->writecnt--;
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
}
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
bpf_map_write_active_dec(map);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct bpf_map_default_vmops = {
|
||||
@ -668,7 +677,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
goto out;
|
||||
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
map->writecnt++;
|
||||
bpf_map_write_active_inc(map);
|
||||
out:
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
return err;
|
||||
@ -1139,6 +1148,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bpf_map_write_active_inc(map);
|
||||
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
@ -1174,6 +1184,7 @@ free_value:
|
||||
free_key:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -1196,6 +1207,7 @@ static int map_delete_elem(union bpf_attr *attr)
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bpf_map_write_active_inc(map);
|
||||
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
@ -1226,6 +1238,7 @@ static int map_delete_elem(union bpf_attr *attr)
|
||||
out:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -1533,6 +1546,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bpf_map_write_active_inc(map);
|
||||
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
|
||||
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
@ -1597,6 +1611,7 @@ free_value:
|
||||
free_key:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -1624,8 +1639,7 @@ static int map_freeze(const union bpf_attr *attr)
|
||||
}
|
||||
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
|
||||
if (map->writecnt) {
|
||||
if (bpf_map_write_active(map)) {
|
||||
err = -EBUSY;
|
||||
goto err_put;
|
||||
}
|
||||
@ -4171,6 +4185,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr,
|
||||
int cmd)
|
||||
{
|
||||
bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
|
||||
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
|
||||
bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
|
||||
struct bpf_map *map;
|
||||
int err, ufd;
|
||||
struct fd f;
|
||||
@ -4183,16 +4200,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if ((cmd == BPF_MAP_LOOKUP_BATCH ||
|
||||
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
|
||||
!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
|
||||
if (has_write)
|
||||
bpf_map_write_active_inc(map);
|
||||
if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
if (cmd != BPF_MAP_LOOKUP_BATCH &&
|
||||
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
}
|
||||
@ -4205,8 +4219,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
BPF_DO_BATCH(map->ops->map_update_batch);
|
||||
else
|
||||
BPF_DO_BATCH(map->ops->map_delete_batch);
|
||||
|
||||
err_put:
|
||||
if (has_write)
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
@ -1151,7 +1151,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
|
||||
/* transfer reg's id which is unique for every map_lookup_elem
|
||||
* as UID of the inner map.
|
||||
*/
|
||||
reg->map_uid = reg->id;
|
||||
if (map_value_has_timer(map->inner_map_meta))
|
||||
reg->map_uid = reg->id;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||
reg->type = PTR_TO_XDP_SOCK;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
|
||||
@ -4055,7 +4056,22 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
||||
|
||||
static bool bpf_map_is_rdonly(const struct bpf_map *map)
|
||||
{
|
||||
return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
|
||||
/* A map is considered read-only if the following condition are true:
|
||||
*
|
||||
* 1) BPF program side cannot change any of the map content. The
|
||||
* BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
|
||||
* and was set at map creation time.
|
||||
* 2) The map value(s) have been initialized from user space by a
|
||||
* loader and then "frozen", such that no new map update/delete
|
||||
* operations from syscall side are possible for the rest of
|
||||
* the map's lifetime from that point onwards.
|
||||
* 3) Any parallel/pending map update/delete operations from syscall
|
||||
* side have been completed. Only after that point, it's safe to
|
||||
* assume that map value(s) are immutable.
|
||||
*/
|
||||
return (map->map_flags & BPF_F_RDONLY_PROG) &&
|
||||
READ_ONCE(map->frozen) &&
|
||||
!bpf_map_write_active(map);
|
||||
}
|
||||
|
||||
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
|
||||
@ -11631,6 +11647,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
|
||||
}
|
||||
}
|
||||
|
||||
if (map_value_has_timer(map)) {
|
||||
if (is_tracing_prog_type(prog_type)) {
|
||||
verbose(env, "tracing progs cannot use bpf_timer yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
|
||||
!bpf_offload_prog_map_match(prog, map)) {
|
||||
verbose(env, "offload device mismatch between prog and map\n");
|
||||
|
@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_get_current_pid_tgid:
|
||||
|
@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
#endif
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
return &bpf_sk_storage_get_cg_sock_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
|
||||
return &sk_reuseport_load_bytes_relative_proto;
|
||||
case BPF_FUNC_get_socket_cookie:
|
||||
return &bpf_get_socket_ptr_cookie_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
|
||||
case BPF_FUNC_skc_to_unix_sock:
|
||||
func = &bpf_skc_to_unix_sock_proto;
|
||||
break;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
|
||||
offsetof(struct tcp_congestion_ops, release))
|
||||
return &bpf_sk_getsockopt_proto;
|
||||
return NULL;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -1807,6 +1807,17 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
skb = skb_recv_udp(sk, 0, 1, &err);
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
if (udp_lib_checksum_complete(skb)) {
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
used = recv_actor(desc, skb, 0, skb->len);
|
||||
if (used <= 0) {
|
||||
if (!copied)
|
||||
|
@ -500,7 +500,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
|
||||
pool->free_list_cnt--;
|
||||
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
|
||||
free_list_node);
|
||||
list_del(&xskb->free_list_node);
|
||||
list_del_init(&xskb->free_list_node);
|
||||
}
|
||||
|
||||
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
|
||||
@ -568,7 +568,7 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
|
||||
i = nb_entries;
|
||||
while (i--) {
|
||||
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
|
||||
list_del(&xskb->free_list_node);
|
||||
list_del_init(&xskb->free_list_node);
|
||||
|
||||
*xdp = &xskb->xdp;
|
||||
xdp++;
|
||||
@ -615,6 +615,9 @@ EXPORT_SYMBOL(xp_can_alloc);
|
||||
|
||||
void xp_free(struct xdp_buff_xsk *xskb)
|
||||
{
|
||||
if (!list_empty(&xskb->free_list_node))
|
||||
return;
|
||||
|
||||
xskb->pool->free_list_cnt++;
|
||||
list_add(&xskb->free_list_node, &xskb->pool->free_list);
|
||||
}
|
||||
|
@ -9,8 +9,6 @@
|
||||
* Include file for sample Host Bandwidth Manager (HBM) BPF programs
|
||||
*/
|
||||
#define KBUILD_MODNAME "foo"
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <uapi/linux/if_ether.h>
|
||||
#include <uapi/linux/if_packet.h>
|
||||
|
@ -309,7 +309,6 @@ int main(int argc, char **argv)
|
||||
const char *mprog_filename = NULL, *mprog_name = NULL;
|
||||
struct xdp_redirect_cpu *skel;
|
||||
struct bpf_map_info info = {};
|
||||
char ifname_buf[IF_NAMESIZE];
|
||||
struct bpf_cpumap_val value;
|
||||
__u32 infosz = sizeof(info);
|
||||
int ret = EXIT_FAIL_OPTION;
|
||||
@ -390,10 +389,10 @@ int main(int argc, char **argv)
|
||||
case 'd':
|
||||
if (strlen(optarg) >= IF_NAMESIZE) {
|
||||
fprintf(stderr, "-d/--dev name too long\n");
|
||||
usage(argv, long_options, __doc__, mask, true, skel->obj);
|
||||
goto end_cpu;
|
||||
}
|
||||
safe_strncpy(ifname_buf, optarg, strlen(ifname_buf));
|
||||
ifindex = if_nametoindex(ifname_buf);
|
||||
ifindex = if_nametoindex(optarg);
|
||||
if (!ifindex)
|
||||
ifindex = strtoul(optarg, NULL, 0);
|
||||
if (!ifindex) {
|
||||
|
@ -120,7 +120,10 @@ struct sample_output {
|
||||
__u64 xmit;
|
||||
} totals;
|
||||
struct {
|
||||
__u64 pps;
|
||||
union {
|
||||
__u64 pps;
|
||||
__u64 num;
|
||||
};
|
||||
__u64 drop;
|
||||
__u64 err;
|
||||
} rx_cnt;
|
||||
@ -1322,7 +1325,7 @@ int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
|
||||
|
||||
static void sample_summary_print(void)
|
||||
{
|
||||
double period = sample_out.rx_cnt.pps;
|
||||
double num = sample_out.rx_cnt.num;
|
||||
|
||||
if (sample_out.totals.rx) {
|
||||
double pkts = sample_out.totals.rx;
|
||||
@ -1330,7 +1333,7 @@ static void sample_summary_print(void)
|
||||
print_always(" Packets received : %'-10llu\n",
|
||||
sample_out.totals.rx);
|
||||
print_always(" Average packets/s : %'-10.0f\n",
|
||||
sample_round(pkts / period));
|
||||
sample_round(pkts / num));
|
||||
}
|
||||
if (sample_out.totals.redir) {
|
||||
double pkts = sample_out.totals.redir;
|
||||
@ -1338,7 +1341,7 @@ static void sample_summary_print(void)
|
||||
print_always(" Packets redirected : %'-10llu\n",
|
||||
sample_out.totals.redir);
|
||||
print_always(" Average redir/s : %'-10.0f\n",
|
||||
sample_round(pkts / period));
|
||||
sample_round(pkts / num));
|
||||
}
|
||||
if (sample_out.totals.drop)
|
||||
print_always(" Rx dropped : %'-10llu\n",
|
||||
@ -1355,7 +1358,7 @@ static void sample_summary_print(void)
|
||||
print_always(" Packets transmitted : %'-10llu\n",
|
||||
sample_out.totals.xmit);
|
||||
print_always(" Average transmit/s : %'-10.0f\n",
|
||||
sample_round(pkts / period));
|
||||
sample_round(pkts / num));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1422,7 +1425,7 @@ static int sample_stats_collect(struct stats_record *rec)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sample_summary_update(struct sample_output *out, int interval)
|
||||
static void sample_summary_update(struct sample_output *out)
|
||||
{
|
||||
sample_out.totals.rx += out->totals.rx;
|
||||
sample_out.totals.redir += out->totals.redir;
|
||||
@ -1430,12 +1433,11 @@ static void sample_summary_update(struct sample_output *out, int interval)
|
||||
sample_out.totals.drop_xmit += out->totals.drop_xmit;
|
||||
sample_out.totals.err += out->totals.err;
|
||||
sample_out.totals.xmit += out->totals.xmit;
|
||||
sample_out.rx_cnt.pps += interval;
|
||||
sample_out.rx_cnt.num++;
|
||||
}
|
||||
|
||||
static void sample_stats_print(int mask, struct stats_record *cur,
|
||||
struct stats_record *prev, char *prog_name,
|
||||
int interval)
|
||||
struct stats_record *prev, char *prog_name)
|
||||
{
|
||||
struct sample_output out = {};
|
||||
|
||||
@ -1452,7 +1454,7 @@ static void sample_stats_print(int mask, struct stats_record *cur,
|
||||
else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
|
||||
stats_get_devmap_xmit_multi(cur, prev, 0, &out,
|
||||
mask & SAMPLE_DEVMAP_XMIT_CNT);
|
||||
sample_summary_update(&out, interval);
|
||||
sample_summary_update(&out);
|
||||
|
||||
stats_print(prog_name, mask, cur, prev, &out);
|
||||
}
|
||||
@ -1495,7 +1497,7 @@ static void swap(struct stats_record **a, struct stats_record **b)
|
||||
}
|
||||
|
||||
static int sample_timer_cb(int timerfd, struct stats_record **rec,
|
||||
struct stats_record **prev, int interval)
|
||||
struct stats_record **prev)
|
||||
{
|
||||
char line[64] = "Summary";
|
||||
int ret;
|
||||
@ -1524,7 +1526,7 @@ static int sample_timer_cb(int timerfd, struct stats_record **rec,
|
||||
snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?");
|
||||
}
|
||||
|
||||
sample_stats_print(sample_mask, *rec, *prev, line, interval);
|
||||
sample_stats_print(sample_mask, *rec, *prev, line);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1579,7 +1581,7 @@ int sample_run(int interval, void (*post_cb)(void *), void *ctx)
|
||||
if (pfd[0].revents & POLLIN)
|
||||
ret = sample_signal_cb();
|
||||
else if (pfd[1].revents & POLLIN)
|
||||
ret = sample_timer_cb(timerfd, &rec, &prev, interval);
|
||||
ret = sample_timer_cb(timerfd, &rec, &prev);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -88,5 +88,4 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU
|
||||
|
||||
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
|
||||
LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \
|
||||
LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
|
@ -45,8 +45,8 @@ struct bpf_gen {
|
||||
int nr_fd_array;
|
||||
};
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
||||
int bpf_gen__finish(struct bpf_gen *gen);
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
|
||||
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
|
||||
void bpf_gen__free(struct bpf_gen *gen);
|
||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
|
||||
|
||||
/* The following structure describes the stack layout of the loader program.
|
||||
* In addition R6 contains the pointer to context.
|
||||
@ -33,8 +33,8 @@
|
||||
*/
|
||||
struct loader_stack {
|
||||
__u32 btf_fd;
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
__u32 inner_map_fd;
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
};
|
||||
|
||||
#define stack_off(field) \
|
||||
@ -42,6 +42,11 @@ struct loader_stack {
|
||||
|
||||
#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
|
||||
{
|
||||
size_t off = gen->insn_cur - gen->insn_start;
|
||||
@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
|
||||
emit(gen, insn2);
|
||||
}
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
|
||||
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
|
||||
{
|
||||
size_t stack_sz = sizeof(struct loader_stack);
|
||||
size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
|
||||
int i;
|
||||
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
gen->log_level = log_level;
|
||||
/* save ctx pointer into R6 */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
|
||||
@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
|
||||
|
||||
/* amount of stack actually used, only used to calculate iterations, not stack offset */
|
||||
nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
|
||||
/* jump over cleanup code */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
|
||||
/* size of cleanup code below */
|
||||
(stack_sz / 4) * 3 + 2));
|
||||
/* size of cleanup code below (including map fd cleanup) */
|
||||
(nr_progs_sz / 4) * 3 + 2 +
|
||||
/* 6 insns for emit_sys_close_blob,
|
||||
* 6 insns for debug_regs in emit_sys_close_blob
|
||||
*/
|
||||
nr_maps * (6 + (gen->log_level ? 6 : 0))));
|
||||
|
||||
/* remember the label where all error branches will jump to */
|
||||
gen->cleanup_label = gen->insn_cur - gen->insn_start;
|
||||
/* emit cleanup code: close all temp FDs */
|
||||
for (i = 0; i < stack_sz; i += 4) {
|
||||
for (i = 0; i < nr_progs_sz; i += 4) {
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
|
||||
}
|
||||
for (i = 0; i < nr_maps; i++)
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
|
||||
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
|
||||
emit(gen, BPF_EXIT_INSN());
|
||||
@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
|
||||
*/
|
||||
static int add_map_fd(struct bpf_gen *gen)
|
||||
{
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_maps == MAX_USED_MAPS) {
|
||||
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
|
||||
gen->error = -E2BIG;
|
||||
@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
{
|
||||
int cur;
|
||||
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
|
||||
cur = add_data(gen, NULL, sizeof(int));
|
||||
return (cur - gen->fd_array) / sizeof(int);
|
||||
@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
return MAX_USED_MAPS + gen->nr_fd_array++;
|
||||
}
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int insn_bytes_to_bpf_size(__u32 sz)
|
||||
{
|
||||
switch (sz) {
|
||||
@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
|
||||
__emit_sys_close(gen);
|
||||
}
|
||||
|
||||
int bpf_gen__finish(struct bpf_gen *gen)
|
||||
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) {
|
||||
pr_warn("progs/maps mismatch\n");
|
||||
gen->error = -EFAULT;
|
||||
return gen->error;
|
||||
}
|
||||
emit_sys_close_stack(gen, stack_off(btf_fd));
|
||||
for (i = 0; i < gen->nr_progs; i++)
|
||||
move_stack2ctx(gen,
|
||||
|
@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
|
||||
}
|
||||
|
||||
if (obj->gen_loader)
|
||||
bpf_gen__init(obj->gen_loader, attr->log_level);
|
||||
bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps);
|
||||
|
||||
err = bpf_object__probe_loading(obj);
|
||||
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
|
||||
@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
|
||||
for (i = 0; i < obj->nr_maps; i++)
|
||||
obj->maps[i].fd = -1;
|
||||
if (!err)
|
||||
err = bpf_gen__finish(obj->gen_loader);
|
||||
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
|
||||
}
|
||||
|
||||
/* clean up fd_array */
|
||||
|
@ -187,7 +187,7 @@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
|
||||
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
|
||||
OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \
|
||||
BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/ \
|
||||
BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
|
||||
BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \
|
||||
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
|
||||
cp $(RUNQSLOWER_OUTPUT)runqslower $@
|
||||
|
33
tools/testing/selftests/bpf/prog_tests/helper_restricted.c
Normal file
33
tools/testing/selftests/bpf/prog_tests/helper_restricted.c
Normal file
@ -0,0 +1,33 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "test_helper_restricted.skel.h"
|
||||
|
||||
void test_helper_restricted(void)
|
||||
{
|
||||
int prog_i = 0, prog_cnt;
|
||||
int duration = 0;
|
||||
|
||||
do {
|
||||
struct test_helper_restricted *test;
|
||||
int maybeOK;
|
||||
|
||||
test = test_helper_restricted__open();
|
||||
if (!ASSERT_OK_PTR(test, "open"))
|
||||
return;
|
||||
|
||||
prog_cnt = test->skeleton->prog_cnt;
|
||||
|
||||
for (int j = 0; j < prog_cnt; ++j) {
|
||||
struct bpf_program *prog = *test->skeleton->progs[j].prog;
|
||||
|
||||
maybeOK = bpf_program__set_autoload(prog, prog_i == j);
|
||||
ASSERT_OK(maybeOK, "set autoload");
|
||||
}
|
||||
|
||||
maybeOK = test_helper_restricted__load(test);
|
||||
CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
|
||||
|
||||
test_helper_restricted__destroy(test);
|
||||
} while (++prog_i < prog_cnt);
|
||||
}
|
123
tools/testing/selftests/bpf/progs/test_helper_restricted.c
Normal file
123
tools/testing/selftests/bpf/progs/test_helper_restricted.c
Normal file
@ -0,0 +1,123 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <time.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
struct timer {
|
||||
struct bpf_timer t;
|
||||
};
|
||||
|
||||
struct lock {
|
||||
struct bpf_spin_lock l;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct timer);
|
||||
} timers SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct lock);
|
||||
} locks SEC(".maps");
|
||||
|
||||
static int timer_cb(void *map, int *key, struct timer *timer)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void timer_work(void)
|
||||
{
|
||||
struct timer *timer;
|
||||
const int key = 0;
|
||||
|
||||
timer = bpf_map_lookup_elem(&timers, &key);
|
||||
if (timer) {
|
||||
bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
|
||||
bpf_timer_set_callback(&timer->t, timer_cb);
|
||||
bpf_timer_start(&timer->t, 10E9, 0);
|
||||
bpf_timer_cancel(&timer->t);
|
||||
}
|
||||
}
|
||||
|
||||
static void spin_lock_work(void)
|
||||
{
|
||||
const int key = 0;
|
||||
struct lock *lock;
|
||||
|
||||
lock = bpf_map_lookup_elem(&locks, &key);
|
||||
if (lock) {
|
||||
bpf_spin_lock(&lock->l);
|
||||
bpf_spin_unlock(&lock->l);
|
||||
}
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int raw_tp_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int tp_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int kprobe_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int perf_event_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int raw_tp_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int tp_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int kprobe_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int perf_event_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char LICENSE[] SEC("license") = "GPL";
|
@ -92,6 +92,7 @@ struct bpf_test {
|
||||
int fixup_map_event_output[MAX_FIXUPS];
|
||||
int fixup_map_reuseport_array[MAX_FIXUPS];
|
||||
int fixup_map_ringbuf[MAX_FIXUPS];
|
||||
int fixup_map_timer[MAX_FIXUPS];
|
||||
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
|
||||
* Can be a tab-separated sequence of expected strings. An empty string
|
||||
* means no log verification.
|
||||
@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu)
|
||||
* int cnt;
|
||||
* struct bpf_spin_lock l;
|
||||
* };
|
||||
* struct bpf_timer {
|
||||
* __u64 :64;
|
||||
* __u64 :64;
|
||||
* } __attribute__((aligned(8)));
|
||||
* struct timer {
|
||||
* struct bpf_timer t;
|
||||
* };
|
||||
*/
|
||||
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
|
||||
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
|
||||
static __u32 btf_raw_types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = {
|
||||
BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
|
||||
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
|
||||
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
|
||||
/* struct bpf_timer */ /* [4] */
|
||||
BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
|
||||
/* struct timer */ /* [5] */
|
||||
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
|
||||
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
|
||||
};
|
||||
|
||||
static int load_btf(void)
|
||||
@ -696,6 +709,29 @@ static int create_sk_storage_map(void)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int create_map_timer(void)
|
||||
{
|
||||
struct bpf_create_map_attr attr = {
|
||||
.name = "test_map",
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = 4,
|
||||
.value_size = 16,
|
||||
.max_entries = 1,
|
||||
.btf_key_type_id = 1,
|
||||
.btf_value_type_id = 5,
|
||||
};
|
||||
int fd, btf_fd;
|
||||
|
||||
btf_fd = load_btf();
|
||||
if (btf_fd < 0)
|
||||
return -1;
|
||||
attr.btf_fd = btf_fd;
|
||||
fd = bpf_create_map_xattr(&attr);
|
||||
if (fd < 0)
|
||||
printf("Failed to create map with timer\n");
|
||||
return fd;
|
||||
}
|
||||
|
||||
static char bpf_vlog[UINT_MAX >> 8];
|
||||
|
||||
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
int *fixup_map_event_output = test->fixup_map_event_output;
|
||||
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
|
||||
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
|
||||
int *fixup_map_timer = test->fixup_map_timer;
|
||||
|
||||
if (test->fill_helper) {
|
||||
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
|
||||
@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
fixup_map_ringbuf++;
|
||||
} while (*fixup_map_ringbuf);
|
||||
}
|
||||
if (*fixup_map_timer) {
|
||||
map_fds[21] = create_map_timer();
|
||||
do {
|
||||
prog[*fixup_map_timer].imm = map_fds[21];
|
||||
fixup_map_timer++;
|
||||
} while (*fixup_map_timer);
|
||||
}
|
||||
}
|
||||
|
||||
struct libcap {
|
||||
|
196
tools/testing/selftests/bpf/verifier/helper_restricted.c
Normal file
196
tools/testing/selftests/bpf/verifier/helper_restricted.c
Normal file
@ -0,0 +1,196 @@
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
@ -18,6 +18,40 @@
|
||||
.fixup_map_in_map = { 3 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"map in map state pruning",
|
||||
.insns = {
|
||||
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_in_map = { 4, 14 },
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.errstr = "processed 25 insns",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"invalid inner map pointer",
|
||||
.insns = {
|
||||
|
Loading…
Reference in New Issue
Block a user