mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
bpf-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZIDxUwAKCRDbK58LschI g5hDAQD7ukrniCvMRNIm2yUZIGSxE4RvGiXptO4a0NfLck5R/wEAsfN2KUsPcPhW HS37lVfx7VVXfj42+REf7lWLu4TXpwk= =6mS/ -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Daniel Borkmann says: ==================== pull-request: bpf 2023-06-07 We've added 7 non-merge commits during the last 7 day(s) which contain a total of 12 files changed, 112 insertions(+), 7 deletions(-). The main changes are: 1) Fix a use-after-free in BPF's task local storage, from KP Singh. 2) Make struct path handling more robust in bpf_d_path, from Jiri Olsa. 3) Fix a syzbot NULL-pointer dereference in sockmap, from Eric Dumazet. 4) UAPI fix for BPF_NETFILTER before final kernel ships, from Florian Westphal. 5) Fix map-in-map array_map_gen_lookup code generation where elem_size was not being set for inner maps, from Rhys Rustad-Elliott. 6) Fix sockopt_sk selftest's NETLINK_LIST_MEMBERSHIPS assertion, from Yonghong Song. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Add extra path pointer check to d_path helper selftests/bpf: Fix sockopt_sk selftest bpf: netfilter: Add BPF_NETFILTER bpf_attach_type selftests/bpf: Add access_inner_map selftest bpf: Fix elem_size not being set for inner maps bpf: Fix UAF in task local storage bpf, sockmap: Avoid potential NULL dereference in sk_psock_verdict_data_ready() ==================== Link: https://lore.kernel.org/r/20230607220514.29698-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
c9d99cfa66
@ -1035,6 +1035,7 @@ enum bpf_attach_type {
|
|||||||
BPF_TRACE_KPROBE_MULTI,
|
BPF_TRACE_KPROBE_MULTI,
|
||||||
BPF_LSM_CGROUP,
|
BPF_LSM_CGROUP,
|
||||||
BPF_STRUCT_OPS,
|
BPF_STRUCT_OPS,
|
||||||
|
BPF_NETFILTER,
|
||||||
__MAX_BPF_ATTACH_TYPE
|
__MAX_BPF_ATTACH_TYPE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -69,9 +69,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|||||||
/* Misc members not needed in bpf_map_meta_equal() check. */
|
/* Misc members not needed in bpf_map_meta_equal() check. */
|
||||||
inner_map_meta->ops = inner_map->ops;
|
inner_map_meta->ops = inner_map->ops;
|
||||||
if (inner_map->ops == &array_map_ops) {
|
if (inner_map->ops == &array_map_ops) {
|
||||||
|
struct bpf_array *inner_array_meta =
|
||||||
|
container_of(inner_map_meta, struct bpf_array, map);
|
||||||
|
struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
|
||||||
|
|
||||||
|
inner_array_meta->index_mask = inner_array->index_mask;
|
||||||
|
inner_array_meta->elem_size = inner_array->elem_size;
|
||||||
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
|
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
|
||||||
container_of(inner_map_meta, struct bpf_array, map)->index_mask =
|
|
||||||
container_of(inner_map, struct bpf_array, map)->index_mask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fdput(f);
|
fdput(f);
|
||||||
|
@ -2433,6 +2433,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
|
|||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
case BPF_PROG_TYPE_NETFILTER:
|
||||||
|
if (expected_attach_type == BPF_NETFILTER)
|
||||||
|
return 0;
|
||||||
|
return -EINVAL;
|
||||||
case BPF_PROG_TYPE_SYSCALL:
|
case BPF_PROG_TYPE_SYSCALL:
|
||||||
case BPF_PROG_TYPE_EXT:
|
case BPF_PROG_TYPE_EXT:
|
||||||
if (expected_attach_type)
|
if (expected_attach_type)
|
||||||
@ -4590,7 +4594,12 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
|
|||||||
|
|
||||||
switch (prog->type) {
|
switch (prog->type) {
|
||||||
case BPF_PROG_TYPE_EXT:
|
case BPF_PROG_TYPE_EXT:
|
||||||
|
break;
|
||||||
case BPF_PROG_TYPE_NETFILTER:
|
case BPF_PROG_TYPE_NETFILTER:
|
||||||
|
if (attr->link_create.attach_type != BPF_NETFILTER) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case BPF_PROG_TYPE_PERF_EVENT:
|
case BPF_PROG_TYPE_PERF_EVENT:
|
||||||
case BPF_PROG_TYPE_TRACEPOINT:
|
case BPF_PROG_TYPE_TRACEPOINT:
|
||||||
|
@ -627,6 +627,7 @@ void free_task(struct task_struct *tsk)
|
|||||||
arch_release_task_struct(tsk);
|
arch_release_task_struct(tsk);
|
||||||
if (tsk->flags & PF_KTHREAD)
|
if (tsk->flags & PF_KTHREAD)
|
||||||
free_kthread_struct(tsk);
|
free_kthread_struct(tsk);
|
||||||
|
bpf_task_storage_free(tsk);
|
||||||
free_task_struct(tsk);
|
free_task_struct(tsk);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(free_task);
|
EXPORT_SYMBOL(free_task);
|
||||||
@ -979,7 +980,6 @@ void __put_task_struct(struct task_struct *tsk)
|
|||||||
cgroup_free(tsk);
|
cgroup_free(tsk);
|
||||||
task_numa_free(tsk, true);
|
task_numa_free(tsk, true);
|
||||||
security_task_free(tsk);
|
security_task_free(tsk);
|
||||||
bpf_task_storage_free(tsk);
|
|
||||||
exit_creds(tsk);
|
exit_creds(tsk);
|
||||||
delayacct_tsk_free(tsk);
|
delayacct_tsk_free(tsk);
|
||||||
put_signal_struct(tsk->signal);
|
put_signal_struct(tsk->signal);
|
||||||
|
@ -900,13 +900,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
|
|||||||
|
|
||||||
BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
|
BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
|
||||||
{
|
{
|
||||||
|
struct path copy;
|
||||||
long len;
|
long len;
|
||||||
char *p;
|
char *p;
|
||||||
|
|
||||||
if (!sz)
|
if (!sz)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
p = d_path(path, buf, sz);
|
/*
|
||||||
|
* The path pointer is verified as trusted and safe to use,
|
||||||
|
* but let's double check it's valid anyway to workaround
|
||||||
|
* potentially broken verifier.
|
||||||
|
*/
|
||||||
|
len = copy_from_kernel_nofault(©, path, sizeof(*path));
|
||||||
|
if (len < 0)
|
||||||
|
return len;
|
||||||
|
|
||||||
|
p = d_path(©, buf, sz);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
len = PTR_ERR(p);
|
len = PTR_ERR(p);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1210,7 +1210,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
psock = sk_psock(sk);
|
psock = sk_psock(sk);
|
||||||
psock->saved_data_ready(sk);
|
if (psock)
|
||||||
|
psock->saved_data_ready(sk);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1035,6 +1035,7 @@ enum bpf_attach_type {
|
|||||||
BPF_TRACE_KPROBE_MULTI,
|
BPF_TRACE_KPROBE_MULTI,
|
||||||
BPF_LSM_CGROUP,
|
BPF_LSM_CGROUP,
|
||||||
BPF_STRUCT_OPS,
|
BPF_STRUCT_OPS,
|
||||||
|
BPF_NETFILTER,
|
||||||
__MAX_BPF_ATTACH_TYPE
|
__MAX_BPF_ATTACH_TYPE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -117,6 +117,7 @@ static const char * const attach_type_name[] = {
|
|||||||
[BPF_PERF_EVENT] = "perf_event",
|
[BPF_PERF_EVENT] = "perf_event",
|
||||||
[BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
|
[BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
|
||||||
[BPF_STRUCT_OPS] = "struct_ops",
|
[BPF_STRUCT_OPS] = "struct_ops",
|
||||||
|
[BPF_NETFILTER] = "netfilter",
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char * const link_type_name[] = {
|
static const char * const link_type_name[] = {
|
||||||
@ -8712,7 +8713,7 @@ static const struct bpf_sec_def section_defs[] = {
|
|||||||
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
|
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
|
||||||
SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
|
SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
|
||||||
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
|
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
|
||||||
SEC_DEF("netfilter", NETFILTER, 0, SEC_NONE),
|
SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
|
||||||
};
|
};
|
||||||
|
|
||||||
static size_t custom_sec_def_cnt;
|
static size_t custom_sec_def_cnt;
|
||||||
|
@ -180,7 +180,9 @@ static int probe_prog_load(enum bpf_prog_type prog_type,
|
|||||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||||
|
break;
|
||||||
case BPF_PROG_TYPE_NETFILTER:
|
case BPF_PROG_TYPE_NETFILTER:
|
||||||
|
opts.expected_attach_type = BPF_NETFILTER;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
31
tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c
Normal file
31
tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
|
||||||
|
#include <test_progs.h>
|
||||||
|
|
||||||
|
#include "inner_array_lookup.skel.h"
|
||||||
|
|
||||||
|
void test_inner_array_lookup(void)
|
||||||
|
{
|
||||||
|
int map1_fd, err;
|
||||||
|
int key = 3;
|
||||||
|
int val = 1;
|
||||||
|
struct inner_array_lookup *skel;
|
||||||
|
|
||||||
|
skel = inner_array_lookup__open_and_load();
|
||||||
|
if (!ASSERT_OK_PTR(skel, "open_load_skeleton"))
|
||||||
|
return;
|
||||||
|
|
||||||
|
err = inner_array_lookup__attach(skel);
|
||||||
|
if (!ASSERT_OK(err, "skeleton_attach"))
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
|
map1_fd = bpf_map__fd(skel->maps.inner_map1);
|
||||||
|
bpf_map_update_elem(map1_fd, &key, &val, 0);
|
||||||
|
|
||||||
|
/* Probe should have set the element at index 3 to 2 */
|
||||||
|
bpf_map_lookup_elem(map1_fd, &key, &val);
|
||||||
|
ASSERT_EQ(val, 2, "value_is_2");
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
inner_array_lookup__destroy(skel);
|
||||||
|
}
|
@ -209,7 +209,7 @@ static int getsetsockopt(void)
|
|||||||
err, errno);
|
err, errno);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
|
ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
|
||||||
|
|
||||||
free(big_buf);
|
free(big_buf);
|
||||||
close(fd);
|
close(fd);
|
||||||
|
45
tools/testing/selftests/bpf/progs/inner_array_lookup.c
Normal file
45
tools/testing/selftests/bpf/progs/inner_array_lookup.c
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
|
||||||
|
struct inner_map {
|
||||||
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||||
|
__uint(max_entries, 5);
|
||||||
|
__type(key, int);
|
||||||
|
__type(value, int);
|
||||||
|
} inner_map1 SEC(".maps");
|
||||||
|
|
||||||
|
struct outer_map {
|
||||||
|
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
|
||||||
|
__uint(max_entries, 3);
|
||||||
|
__type(key, int);
|
||||||
|
__array(values, struct inner_map);
|
||||||
|
} outer_map1 SEC(".maps") = {
|
||||||
|
.values = {
|
||||||
|
[2] = &inner_map1,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
SEC("raw_tp/sys_enter")
|
||||||
|
int handle__sys_enter(void *ctx)
|
||||||
|
{
|
||||||
|
int outer_key = 2, inner_key = 3;
|
||||||
|
int *val;
|
||||||
|
void *map;
|
||||||
|
|
||||||
|
map = bpf_map_lookup_elem(&outer_map1, &outer_key);
|
||||||
|
if (!map)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
val = bpf_map_lookup_elem(map, &inner_key);
|
||||||
|
if (!val)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (*val == 1)
|
||||||
|
*val = 2;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue
Block a user