bpf-for-netdev

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCY4ACtQAKCRDbK58LschI
 gyIcAP4nE/chC+gYDOleloK2tQlQawM5Sa4kwHjFzBdPD3tRrAEAs6y2fJjb5vZo
 OXIFKhXv5Xo3knmynkTSVSVOvB48IAE=
 =Cme8
 -----END PGP SIGNATURE-----

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
bpf 2022-11-25

We've added 10 non-merge commits during the last 8 day(s) which contain
a total of 7 files changed, 48 insertions(+), 30 deletions(-).

The main changes are:

1) Several libbpf ringbuf fixes related to probing for its availability,
   size overflows when mmaping a 2G ringbuf and rejection of invalid
   reservationsizes, from Hou Tao.

2) Fix a buggy return pointer in libbpf for attach_raw_tp function,
   from Jiri Olsa.

3) Fix a local storage BPF map bug where the value's spin lock field
   can get initialized incorrectly, from Xu Kuohai.

4) Two follow-up fixes in kprobe_multi BPF selftests for BPF CI,
   from Jiri Olsa.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Make test_bench_attach serial
  selftests/bpf: Filter out default_idle from kprobe_multi bench
  bpf: Set and check spin lock value in sk_storage_map_test
  bpf: Do not copy spin lock field from user in bpf_selem_alloc
  libbpf: Check the validity of size in user_ring_buffer__reserve()
  libbpf: Handle size overflow for user ringbuf mmap
  libbpf: Handle size overflow for ringbuf mmap
  libbpf: Use page size as max_entries when probing ring buffer map
  bpf, perf: Use subprog name when reporting subprog ksymbol
  libbpf: Use correct return pointer in attach_raw_tp
====================

Link: https://lore.kernel.org/r/20221125001034.29473-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-11-28 17:06:51 -08:00
commit 4f4a5de125
7 changed files with 48 additions and 30 deletions

View File

@ -74,7 +74,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
gfp_flags | __GFP_NOWARN);
if (selem) {
if (value)
memcpy(SDATA(selem)->data, value, smap->map.value_size);
copy_map_value(&smap->map, SDATA(selem)->data, value);
return selem;
}

View File

@ -9030,7 +9030,7 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
PERF_RECORD_KSYMBOL_TYPE_BPF,
(u64)(unsigned long)subprog->bpf_func,
subprog->jited_len, unregister,
prog->aux->ksym.name);
subprog->aux->ksym.name);
}
}
}

View File

@ -11169,7 +11169,7 @@ static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf
}
*link = bpf_program__attach_raw_tracepoint(prog, tp_name);
return libbpf_get_error(link);
return libbpf_get_error(*link);
}
/* Common logic for all BPF program types that attach to a btf_id */

View File

@ -234,7 +234,7 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_USER_RINGBUF:
key_size = 0;
value_size = 0;
max_entries = 4096;
max_entries = sysconf(_SC_PAGE_SIZE);
break;
case BPF_MAP_TYPE_STRUCT_OPS:
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */

View File

@ -77,6 +77,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
__u32 len = sizeof(info);
struct epoll_event *e;
struct ring *r;
__u64 mmap_sz;
void *tmp;
int err;
@ -115,8 +116,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
r->mask = info.max_entries - 1;
/* Map writable consumer page */
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
map_fd, 0);
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
if (tmp == MAP_FAILED) {
err = -errno;
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
@ -129,8 +129,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
* data size to allow simple reading of samples that wrap around the
* end of a ring buffer. See kernel implementation for details.
* */
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ,
MAP_SHARED, map_fd, rb->page_size);
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
if (mmap_sz != (__u64)(size_t)mmap_sz) {
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
return libbpf_err(-E2BIG);
}
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
ringbuf_unmap_ring(rb, r);
@ -348,6 +352,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
{
struct bpf_map_info info;
__u32 len = sizeof(info);
__u64 mmap_sz;
void *tmp;
struct epoll_event *rb_epoll;
int err;
@ -384,8 +389,13 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
* simple reading and writing of samples that wrap around the end of
* the buffer. See the kernel implementation for details.
*/
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries,
PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, rb->page_size);
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
if (mmap_sz != (__u64)(size_t)mmap_sz) {
pr_warn("user ringbuf: ring buf size (%u) is too big\n", info.max_entries);
return -E2BIG;
}
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
@ -476,6 +486,10 @@ void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
__u64 cons_pos, prod_pos;
struct ringbuf_hdr *hdr;
/* The top two bits are used as special flags */
if (size & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT))
return errno = E2BIG, NULL;
/* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in
* the kernel.
*/

View File

@ -458,7 +458,7 @@ static void test_sk_storage_map_basic(void)
struct {
int cnt;
int lock;
} value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
} value = { .cnt = 0xeB9f, .lock = 1, }, lookup_value;
struct bpf_map_create_opts bad_xattr;
int btf_fd, map_fd, sk_fd, err;
@ -483,38 +483,41 @@ static void test_sk_storage_map_basic(void)
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.cnt != value.cnt,
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d cnt:%x(%x)\n",
err, errno, lookup_value.cnt, value.cnt);
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_EXIST | BPF_F_LOCK);
CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.cnt != value.cnt,
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d cnt:%x(%x)\n",
err, errno, lookup_value.cnt, value.cnt);
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.cnt != value.cnt,
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d cnt:%x(%x)\n",
err, errno, lookup_value.cnt, value.cnt);
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Update with BPF_NOEXIST */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_NOEXIST | BPF_F_LOCK);
CHECK(!err || errno != EEXIST,
@ -526,22 +529,23 @@ static void test_sk_storage_map_basic(void)
value.cnt -= 1;
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.cnt != value.cnt,
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d cnt:%x(%x)\n",
err, errno, lookup_value.cnt, value.cnt);
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt again and update with map_flags == 0 */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.cnt != value.cnt,
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d cnt:%x(%x)\n",
err, errno, lookup_value.cnt, value.cnt);
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Test delete elem */
err = bpf_map_delete_elem(map_fd, &sk_fd);

View File

@ -358,10 +358,12 @@ static int get_syms(char ***symsp, size_t *cntp)
* We attach to almost all kernel functions and some of them
* will cause 'suspicious RCU usage' when fprobe is attached
* to them. Filter out the current culprits - arch_cpu_idle
* and rcu_* functions.
* default_idle and rcu_* functions.
*/
if (!strcmp(name, "arch_cpu_idle"))
continue;
if (!strcmp(name, "default_idle"))
continue;
if (!strncmp(name, "rcu_", 4))
continue;
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
@ -400,7 +402,7 @@ error:
return err;
}
static void test_bench_attach(void)
void serial_test_kprobe_multi_bench_attach(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi_empty *skel = NULL;
@ -468,6 +470,4 @@ void test_kprobe_multi_test(void)
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
if (test__start_subtest("bench_attach"))
test_bench_attach();
}