2022-02-21 02:40:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2018-08-03 05:27:18 +08:00
|
|
|
#include <linux/bpf-cgroup.h>
|
|
|
|
#include <linux/bpf.h>
|
2021-07-28 06:23:35 +08:00
|
|
|
#include <linux/bpf_local_storage.h>
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
#include <linux/btf.h>
|
2018-08-03 05:27:18 +08:00
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/filter.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/slab.h>
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
#include <uapi/linux/btf.h>
|
2022-04-25 21:32:47 +08:00
|
|
|
#include <linux/btf_ids.h>
|
2018-08-03 05:27:18 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_CGROUP_BPF
|
|
|
|
|
2020-07-25 05:17:53 +08:00
|
|
|
#include "../cgroup/cgroup-internal.h"
|
|
|
|
|
2018-08-03 05:27:18 +08:00
|
|
|
#define LOCAL_STORAGE_CREATE_FLAG_MASK \
|
bpf: add program side {rd, wr}only support for maps
This work adds two new map creation flags BPF_F_RDONLY_PROG
and BPF_F_WRONLY_PROG in order to allow for read-only or
write-only BPF maps from a BPF program side.
Today we have BPF_F_RDONLY and BPF_F_WRONLY, but this only
applies to system call side, meaning the BPF program has full
read/write access to the map as usual while bpf(2) calls with
map fd can either only read or write into the map depending
on the flags. BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG allows
for the exact opposite such that verifier is going to reject
program loads if write into a read-only map or a read into a
write-only map is detected. For read-only map case also some
helpers are forbidden for programs that would alter the map
state such as map deletion, update, etc. As opposed to the two
BPF_F_RDONLY / BPF_F_WRONLY flags, BPF_F_RDONLY_PROG as well
as BPF_F_WRONLY_PROG really do correspond to the map lifetime.
We've enabled this generic map extension to various non-special
maps holding normal user data: array, hash, lru, lpm, local
storage, queue and stack. Further generic map types could be
followed up in future depending on use-case. Main use case
here is to forbid writes into .rodata map values from verifier
side.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:05 +08:00
|
|
|
(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
|
2018-08-03 05:27:18 +08:00
|
|
|
|
|
|
|
struct bpf_cgroup_storage_map {
|
|
|
|
struct bpf_map map;
|
|
|
|
|
|
|
|
spinlock_t lock;
|
|
|
|
struct rb_root root;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
|
|
|
|
{
|
|
|
|
return container_of(map, struct bpf_cgroup_storage_map, map);
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
static bool attach_type_isolated(const struct bpf_map *map)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
return map->key_size == sizeof(struct bpf_cgroup_storage_key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
|
|
|
|
const void *_key1, const void *_key2)
|
|
|
|
{
|
|
|
|
if (attach_type_isolated(&map->map)) {
|
|
|
|
const struct bpf_cgroup_storage_key *key1 = _key1;
|
|
|
|
const struct bpf_cgroup_storage_key *key2 = _key2;
|
|
|
|
|
|
|
|
if (key1->cgroup_inode_id < key2->cgroup_inode_id)
|
|
|
|
return -1;
|
|
|
|
else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
|
|
|
|
return 1;
|
|
|
|
else if (key1->attach_type < key2->attach_type)
|
|
|
|
return -1;
|
|
|
|
else if (key1->attach_type > key2->attach_type)
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
const __u64 *cgroup_inode_id1 = _key1;
|
|
|
|
const __u64 *cgroup_inode_id2 = _key2;
|
|
|
|
|
|
|
|
if (*cgroup_inode_id1 < *cgroup_inode_id2)
|
|
|
|
return -1;
|
|
|
|
else if (*cgroup_inode_id1 > *cgroup_inode_id2)
|
|
|
|
return 1;
|
|
|
|
}
|
2018-08-03 05:27:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
struct bpf_cgroup_storage *
|
|
|
|
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
|
|
|
|
void *key, bool locked)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
|
|
|
struct rb_root *root = &map->root;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
if (!locked)
|
|
|
|
spin_lock_bh(&map->lock);
|
|
|
|
|
|
|
|
node = root->rb_node;
|
|
|
|
while (node) {
|
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
|
|
|
|
storage = container_of(node, struct bpf_cgroup_storage, node);
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
|
2018-08-03 05:27:18 +08:00
|
|
|
case -1:
|
|
|
|
node = node->rb_left;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
node = node->rb_right;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (!locked)
|
|
|
|
spin_unlock_bh(&map->lock);
|
|
|
|
return storage;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!locked)
|
|
|
|
spin_unlock_bh(&map->lock);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
|
|
|
|
struct bpf_cgroup_storage *storage)
|
|
|
|
{
|
|
|
|
struct rb_root *root = &map->root;
|
|
|
|
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct bpf_cgroup_storage *this;
|
|
|
|
|
|
|
|
this = container_of(*new, struct bpf_cgroup_storage, node);
|
|
|
|
|
|
|
|
parent = *new;
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
|
2018-08-03 05:27:18 +08:00
|
|
|
case -1:
|
|
|
|
new = &((*new)->rb_left);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
new = &((*new)->rb_right);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&storage->node, parent, new);
|
|
|
|
rb_insert_color(&storage->node, root);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
|
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
|
|
|
|
storage = cgroup_storage_lookup(map, key, false);
|
|
|
|
if (!storage)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &READ_ONCE(storage->buf)->data[0];
|
|
|
|
}
|
|
|
|
|
bpf: return long from bpf_map_ops funcs
This patch changes the return types of bpf_map_ops functions to long, where
previously int was returned. Using long allows for bpf programs to maintain
the sign bit in the absence of sign extension during situations where
inlined bpf helper funcs make calls to the bpf_map_ops funcs and a negative
error is returned.
The definitions of the helper funcs are generated from comments in the bpf
uapi header at `include/uapi/linux/bpf.h`. The return type of these
helpers was previously changed from int to long in commit bdb7b79b4ce8. For
any case where one of the map helpers call the bpf_map_ops funcs that are
still returning 32-bit int, a compiler might not include sign extension
instructions to properly convert the 32-bit negative value a 64-bit
negative value.
For example:
bpf assembly excerpt of an inlined helper calling a kernel function and
checking for a specific error:
; err = bpf_map_update_elem(&mymap, &key, &val, BPF_NOEXIST);
...
46: call 0xffffffffe103291c ; htab_map_update_elem
; if (err && err != -EEXIST) {
4b: cmp $0xffffffffffffffef,%rax ; cmp -EEXIST,%rax
kernel function assembly excerpt of return value from
`htab_map_update_elem` returning 32-bit int:
movl $0xffffffef, %r9d
...
movl %r9d, %eax
...results in the comparison:
cmp $0xffffffffffffffef, $0x00000000ffffffef
Fixes: bdb7b79b4ce8 ("bpf: Switch most helper return values from 32-bit int to 64-bit long")
Tested-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Link: https://lore.kernel.org/r/20230322194754.185781-3-inwardvessel@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 03:47:54 +08:00
|
|
|
static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
|
|
|
|
void *value, u64 flags)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
struct bpf_storage_buffer *new;
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST)))
|
2019-02-01 07:40:09 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (unlikely((flags & BPF_F_LOCK) &&
|
2022-11-04 03:09:56 +08:00
|
|
|
!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
|
2018-08-03 05:27:18 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
|
|
|
|
key, false);
|
|
|
|
if (!storage)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2019-02-01 07:40:09 +08:00
|
|
|
if (flags & BPF_F_LOCK) {
|
|
|
|
copy_map_value_locked(map, storage->buf->data, value, false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-20 19:30:48 +08:00
|
|
|
new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size),
|
bpf: Make non-preallocated allocation low priority
GFP_ATOMIC doesn't cooperate well with memcg pressure so far, especially
if we allocate too much GFP_ATOMIC memory. For example, when we set the
memcg limit to limit a non-preallocated bpf memory, the GFP_ATOMIC can
easily break the memcg limit by force charge. So it is very dangerous to
use GFP_ATOMIC in non-preallocated case. One way to make it safe is to
remove __GFP_HIGH from GFP_ATOMIC, IOW, use (__GFP_ATOMIC |
__GFP_KSWAPD_RECLAIM) instead, then it will be limited if we allocate
too much memory. There's a plan to completely remove __GFP_ATOMIC in the
mm side[1], so let's use GFP_NOWAIT instead.
We introduced BPF_F_NO_PREALLOC is because full map pre-allocation is
too memory expensive for some cases. That means removing __GFP_HIGH
doesn't break the rule of BPF_F_NO_PREALLOC, but has the same goal with
it-avoiding issues caused by too much memory. So let's remove it.
This fix can also apply to other run-time allocations, for example, the
allocation in lpm trie, local storage and devmap. So let fix it
consistently over the bpf code
It also fixes a typo in the comment.
[1]. https://lore.kernel.org/linux-mm/163712397076.13692.4727608274002939094@noble.neil.brown.name/
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: NeilBrown <neilb@suse.de>
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Link: https://lore.kernel.org/r/20220709154457.57379-2-laoar.shao@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-07-09 23:44:56 +08:00
|
|
|
__GFP_ZERO | GFP_NOWAIT | __GFP_NOWARN,
|
2020-12-02 05:58:36 +08:00
|
|
|
map->numa_node);
|
2018-08-03 05:27:18 +08:00
|
|
|
if (!new)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(&new->data[0], value, map->value_size);
|
2021-07-15 08:54:10 +08:00
|
|
|
check_and_init_map_value(map, new->data);
|
2018-08-03 05:27:18 +08:00
|
|
|
|
|
|
|
new = xchg(&storage->buf, new);
|
|
|
|
kfree_rcu(new, rcu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
|
2018-09-28 22:45:43 +08:00
|
|
|
void *value)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
|
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
int cpu, off = 0;
|
|
|
|
u32 size;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
storage = cgroup_storage_lookup(map, key, false);
|
|
|
|
if (!storage) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* per_cpu areas are zero-filled and bpf programs can only
|
|
|
|
* access 'value_size' of them, so copying rounded areas
|
|
|
|
* will not leak any kernel data
|
|
|
|
*/
|
|
|
|
size = round_up(_map->value_size, 8);
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
bpf_long_memcpy(value + off,
|
|
|
|
per_cpu_ptr(storage->percpu_buf, cpu), size);
|
|
|
|
off += size;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
|
2018-09-28 22:45:43 +08:00
|
|
|
void *value, u64 map_flags)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
|
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
int cpu, off = 0;
|
|
|
|
u32 size;
|
|
|
|
|
|
|
|
if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
storage = cgroup_storage_lookup(map, key, false);
|
|
|
|
if (!storage) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the user space will provide round_up(value_size, 8) bytes that
|
|
|
|
* will be copied into per-cpu area. bpf programs can only access
|
|
|
|
* value_size of it. During lookup the same extra bytes will be
|
|
|
|
* returned or zeros which were zero-filled by percpu_alloc,
|
|
|
|
* so no kernel data leaks possible
|
|
|
|
*/
|
|
|
|
size = round_up(_map->value_size, 8);
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
|
|
|
|
value + off, size);
|
|
|
|
off += size;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,
|
2018-08-03 05:27:18 +08:00
|
|
|
void *_next_key)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
|
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
|
|
|
|
spin_lock_bh(&map->lock);
|
|
|
|
|
|
|
|
if (list_empty(&map->list))
|
|
|
|
goto enoent;
|
|
|
|
|
|
|
|
if (key) {
|
|
|
|
storage = cgroup_storage_lookup(map, key, true);
|
|
|
|
if (!storage)
|
|
|
|
goto enoent;
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
storage = list_next_entry(storage, list_map);
|
2018-08-03 05:27:18 +08:00
|
|
|
if (!storage)
|
|
|
|
goto enoent;
|
|
|
|
} else {
|
|
|
|
storage = list_first_entry(&map->list,
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
struct bpf_cgroup_storage, list_map);
|
2018-08-03 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&map->lock);
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
|
|
|
|
if (attach_type_isolated(&map->map)) {
|
|
|
|
struct bpf_cgroup_storage_key *next = _next_key;
|
|
|
|
*next = storage->key;
|
|
|
|
} else {
|
|
|
|
__u64 *next = _next_key;
|
|
|
|
*next = storage->key.cgroup_inode_id;
|
|
|
|
}
|
2018-08-03 05:27:18 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
enoent:
|
|
|
|
spin_unlock_bh(&map->lock);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
|
|
|
|
{
|
2021-07-28 06:23:35 +08:00
|
|
|
__u32 max_value_size = BPF_LOCAL_STORAGE_MAX_VALUE_SIZE;
|
2018-08-03 05:27:18 +08:00
|
|
|
int numa_node = bpf_map_attr_numa_node(attr);
|
|
|
|
struct bpf_cgroup_storage_map *map;
|
|
|
|
|
2021-07-28 06:23:35 +08:00
|
|
|
/* percpu is bound by PCPU_MIN_UNIT_SIZE, non-percu
|
|
|
|
* is the same as other local storages.
|
|
|
|
*/
|
|
|
|
if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
|
|
|
|
max_value_size = min_t(__u32, max_value_size,
|
|
|
|
PCPU_MIN_UNIT_SIZE);
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
|
|
|
|
attr->key_size != sizeof(__u64))
|
2018-08-03 05:27:18 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2018-10-02 10:41:53 +08:00
|
|
|
if (attr->value_size == 0)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2021-07-28 06:23:35 +08:00
|
|
|
if (attr->value_size > max_value_size)
|
2018-08-03 05:27:18 +08:00
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
|
bpf: add program side {rd, wr}only support for maps
This work adds two new map creation flags BPF_F_RDONLY_PROG
and BPF_F_WRONLY_PROG in order to allow for read-only or
write-only BPF maps from a BPF program side.
Today we have BPF_F_RDONLY and BPF_F_WRONLY, but this only
applies to system call side, meaning the BPF program has full
read/write access to the map as usual while bpf(2) calls with
map fd can either only read or write into the map depending
on the flags. BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG allows
for the exact opposite such that verifier is going to reject
program loads if write into a read-only map or a read into a
write-only map is detected. For read-only map case also some
helpers are forbidden for programs that would alter the map
state such as map deletion, update, etc. As opposed to the two
BPF_F_RDONLY / BPF_F_WRONLY flags, BPF_F_RDONLY_PROG as well
as BPF_F_WRONLY_PROG really do correspond to the map lifetime.
We've enabled this generic map extension to various non-special
maps holding normal user data: array, hash, lru, lpm, local
storage, queue and stack. Further generic map types could be
followed up in future depending on use-case. Main use case
here is to forbid writes into .rodata map values from verifier
side.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:05 +08:00
|
|
|
if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
|
|
|
|
!bpf_map_flags_access_ok(attr->map_flags))
|
2018-08-03 05:27:18 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (attr->max_entries)
|
|
|
|
/* max_entries is not used and enforced to be 0 */
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2022-08-10 23:18:29 +08:00
|
|
|
map = bpf_map_area_alloc(sizeof(struct bpf_cgroup_storage_map), numa_node);
|
2020-12-02 05:58:47 +08:00
|
|
|
if (!map)
|
2018-08-03 05:27:18 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
/* copy mandatory map attributes */
|
|
|
|
bpf_map_init_from_attr(&map->map, attr);
|
|
|
|
|
|
|
|
spin_lock_init(&map->lock);
|
|
|
|
map->root = RB_ROOT;
|
|
|
|
INIT_LIST_HEAD(&map->list);
|
|
|
|
|
|
|
|
return &map->map;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cgroup_storage_map_free(struct bpf_map *_map)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
struct list_head *storages = &map->list;
|
|
|
|
struct bpf_cgroup_storage *storage, *stmp;
|
|
|
|
|
2023-03-03 17:53:10 +08:00
|
|
|
cgroup_lock();
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
|
|
|
|
list_for_each_entry_safe(storage, stmp, storages, list_map) {
|
|
|
|
bpf_cgroup_storage_unlink(storage);
|
|
|
|
bpf_cgroup_storage_free(storage);
|
|
|
|
}
|
|
|
|
|
2023-03-03 17:53:10 +08:00
|
|
|
cgroup_unlock();
|
2018-08-03 05:27:18 +08:00
|
|
|
|
|
|
|
WARN_ON(!RB_EMPTY_ROOT(&map->root));
|
|
|
|
WARN_ON(!list_empty(&map->list));
|
|
|
|
|
2022-08-10 23:18:29 +08:00
|
|
|
bpf_map_area_free(map);
|
2018-08-03 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
bpf: return long from bpf_map_ops funcs
This patch changes the return types of bpf_map_ops functions to long, where
previously int was returned. Using long allows for bpf programs to maintain
the sign bit in the absence of sign extension during situations where
inlined bpf helper funcs make calls to the bpf_map_ops funcs and a negative
error is returned.
The definitions of the helper funcs are generated from comments in the bpf
uapi header at `include/uapi/linux/bpf.h`. The return type of these
helpers was previously changed from int to long in commit bdb7b79b4ce8. For
any case where one of the map helpers call the bpf_map_ops funcs that are
still returning 32-bit int, a compiler might not include sign extension
instructions to properly convert the 32-bit negative value a 64-bit
negative value.
For example:
bpf assembly excerpt of an inlined helper calling a kernel function and
checking for a specific error:
; err = bpf_map_update_elem(&mymap, &key, &val, BPF_NOEXIST);
...
46: call 0xffffffffe103291c ; htab_map_update_elem
; if (err && err != -EEXIST) {
4b: cmp $0xffffffffffffffef,%rax ; cmp -EEXIST,%rax
kernel function assembly excerpt of return value from
`htab_map_update_elem` returning 32-bit int:
movl $0xffffffef, %r9d
...
movl %r9d, %eax
...results in the comparison:
cmp $0xffffffffffffffef, $0x00000000ffffffef
Fixes: bdb7b79b4ce8 ("bpf: Switch most helper return values from 32-bit int to 64-bit long")
Tested-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Link: https://lore.kernel.org/r/20230322194754.185781-3-inwardvessel@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 03:47:54 +08:00
|
|
|
static long cgroup_storage_delete_elem(struct bpf_map *map, void *key)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
static int cgroup_storage_check_btf(const struct bpf_map *map,
|
|
|
|
const struct btf *btf,
|
|
|
|
const struct btf_type *key_type,
|
|
|
|
const struct btf_type *value_type)
|
|
|
|
{
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
if (attach_type_isolated(map)) {
|
|
|
|
struct btf_member *m;
|
|
|
|
u32 offset, size;
|
|
|
|
|
|
|
|
/* Key is expected to be of struct bpf_cgroup_storage_key type,
|
|
|
|
* which is:
|
|
|
|
* struct bpf_cgroup_storage_key {
|
|
|
|
* __u64 cgroup_inode_id;
|
|
|
|
* __u32 attach_type;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Key_type must be a structure with two fields.
|
|
|
|
*/
|
|
|
|
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
|
|
|
|
BTF_INFO_VLEN(key_type->info) != 2)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The first field must be a 64 bit integer at 0 offset.
|
|
|
|
*/
|
|
|
|
m = (struct btf_member *)(key_type + 1);
|
|
|
|
size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
|
|
|
|
if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The second field must be a 32 bit integer at 64 bit offset.
|
|
|
|
*/
|
|
|
|
m++;
|
|
|
|
offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
|
|
|
|
size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
|
|
|
|
if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
u32 int_data;
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
/*
|
|
|
|
* Key is expected to be u64, which stores the cgroup_inode_id
|
|
|
|
*/
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
|
|
|
|
return -EINVAL;
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
int_data = *(u32 *)(key_type + 1);
|
|
|
|
if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
struct seq_file *m)
|
|
|
|
{
|
2021-07-21 19:56:30 +08:00
|
|
|
enum bpf_cgroup_storage_type stype;
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
storage = cgroup_storage_lookup(map_to_storage(map), key, false);
|
|
|
|
if (!storage) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
|
|
|
|
stype = cgroup_storage_type(map);
|
|
|
|
if (stype == BPF_CGROUP_STORAGE_SHARED) {
|
|
|
|
seq_puts(m, ": ");
|
|
|
|
btf_type_seq_show(map->btf, map->btf_value_type_id,
|
|
|
|
&READ_ONCE(storage->buf)->data[0], m);
|
|
|
|
seq_puts(m, "\n");
|
|
|
|
} else {
|
|
|
|
seq_puts(m, ": {\n");
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
seq_printf(m, "\tcpu%d: ", cpu);
|
|
|
|
btf_type_seq_show(map->btf, map->btf_value_type_id,
|
|
|
|
per_cpu_ptr(storage->percpu_buf, cpu),
|
|
|
|
m);
|
|
|
|
seq_puts(m, "\n");
|
|
|
|
}
|
|
|
|
seq_puts(m, "}\n");
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2023-03-05 20:46:10 +08:00
|
|
|
static u64 cgroup_storage_map_usage(const struct bpf_map *map)
|
|
|
|
{
|
|
|
|
/* Currently the dynamically allocated elements are not counted. */
|
|
|
|
return sizeof(struct bpf_cgroup_storage_map);
|
|
|
|
}
|
|
|
|
|
2022-04-25 21:32:47 +08:00
|
|
|
BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct,
|
|
|
|
bpf_cgroup_storage_map)
|
2018-08-03 05:27:18 +08:00
|
|
|
const struct bpf_map_ops cgroup_storage_map_ops = {
|
|
|
|
.map_alloc = cgroup_storage_map_alloc,
|
|
|
|
.map_free = cgroup_storage_map_free,
|
|
|
|
.map_get_next_key = cgroup_storage_get_next_key,
|
|
|
|
.map_lookup_elem = cgroup_storage_lookup_elem,
|
|
|
|
.map_update_elem = cgroup_storage_update_elem,
|
|
|
|
.map_delete_elem = cgroup_storage_delete_elem,
|
bpf: add bpffs pretty print for cgroup local storage maps
Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):
Shared:
$ cat /sys/fs/bpf/map_2
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {9999,1039896}
Per-cpu:
$ cat /sys/fs/bpf/map_1
# WARNING!! The output is for debug purpose only
# WARNING!! The output format will change
{4294968594,1}: {
cpu0: {0,0,0,0,0}
cpu1: {0,0,0,0,0}
cpu2: {1,104,0,0,0}
cpu3: {0,0,0,0,0}
}
Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-11 07:43:01 +08:00
|
|
|
.map_check_btf = cgroup_storage_check_btf,
|
|
|
|
.map_seq_show_elem = cgroup_storage_seq_show_elem,
|
2023-03-05 20:46:10 +08:00
|
|
|
.map_mem_usage = cgroup_storage_map_usage,
|
2022-04-25 21:32:47 +08:00
|
|
|
.map_btf_id = &cgroup_storage_map_btf_ids[0],
|
2018-08-03 05:27:18 +08:00
|
|
|
};
|
|
|
|
|
2019-12-17 20:28:16 +08:00
|
|
|
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
2018-09-28 22:45:36 +08:00
|
|
|
enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
|
2018-08-03 05:27:18 +08:00
|
|
|
|
2019-12-17 20:28:16 +08:00
|
|
|
if (aux->cgroup_storage[stype] &&
|
|
|
|
aux->cgroup_storage[stype] != _map)
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
return -EBUSY;
|
2018-08-03 05:27:18 +08:00
|
|
|
|
2019-12-17 20:28:16 +08:00
|
|
|
aux->cgroup_storage[stype] = _map;
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
return 0;
|
2018-08-03 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
2018-09-28 22:45:43 +08:00
|
|
|
static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
|
|
|
|
size = sizeof(struct bpf_storage_buffer) + map->value_size;
|
|
|
|
*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
|
|
|
|
PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
} else {
|
|
|
|
size = map->value_size;
|
|
|
|
*pages = round_up(round_up(size, 8) * num_possible_cpus(),
|
|
|
|
PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2018-09-28 22:45:36 +08:00
|
|
|
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
|
|
|
|
enum bpf_cgroup_storage_type stype)
|
2018-08-03 05:27:18 +08:00
|
|
|
{
|
2020-12-02 05:58:36 +08:00
|
|
|
const gfp_t gfp = __GFP_ZERO | GFP_USER;
|
2018-08-03 05:27:18 +08:00
|
|
|
struct bpf_cgroup_storage *storage;
|
|
|
|
struct bpf_map *map;
|
2018-09-28 22:45:43 +08:00
|
|
|
size_t size;
|
2018-08-03 05:27:18 +08:00
|
|
|
u32 pages;
|
|
|
|
|
2018-09-28 22:45:36 +08:00
|
|
|
map = prog->aux->cgroup_storage[stype];
|
2018-08-03 05:27:18 +08:00
|
|
|
if (!map)
|
|
|
|
return NULL;
|
|
|
|
|
2018-09-28 22:45:43 +08:00
|
|
|
size = bpf_cgroup_storage_calculate_size(map, &pages);
|
|
|
|
|
2020-12-02 05:58:36 +08:00
|
|
|
storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
|
|
|
|
gfp, map->numa_node);
|
2018-09-28 22:45:43 +08:00
|
|
|
if (!storage)
|
|
|
|
goto enomem;
|
2018-08-03 05:27:18 +08:00
|
|
|
|
2018-09-28 22:45:43 +08:00
|
|
|
if (stype == BPF_CGROUP_STORAGE_SHARED) {
|
2020-12-02 05:58:36 +08:00
|
|
|
storage->buf = bpf_map_kmalloc_node(map, size, gfp,
|
|
|
|
map->numa_node);
|
2018-09-28 22:45:43 +08:00
|
|
|
if (!storage->buf)
|
|
|
|
goto enomem;
|
2021-07-15 08:54:10 +08:00
|
|
|
check_and_init_map_value(map, storage->buf->data);
|
2018-09-28 22:45:43 +08:00
|
|
|
} else {
|
2020-12-02 05:58:36 +08:00
|
|
|
storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
|
2018-09-28 22:45:43 +08:00
|
|
|
if (!storage->percpu_buf)
|
|
|
|
goto enomem;
|
2018-08-03 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
storage->map = (struct bpf_cgroup_storage_map *)map;
|
|
|
|
|
|
|
|
return storage;
|
2018-09-28 22:45:43 +08:00
|
|
|
|
|
|
|
enomem:
|
|
|
|
kfree(storage);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage *storage =
|
|
|
|
container_of(rcu, struct bpf_cgroup_storage, rcu);
|
|
|
|
|
|
|
|
kfree(storage->buf);
|
|
|
|
kfree(storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage *storage =
|
|
|
|
container_of(rcu, struct bpf_cgroup_storage, rcu);
|
|
|
|
|
|
|
|
free_percpu(storage->percpu_buf);
|
|
|
|
kfree(storage);
|
2018-08-03 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
|
|
|
|
{
|
2018-09-28 22:45:43 +08:00
|
|
|
enum bpf_cgroup_storage_type stype;
|
2018-08-03 05:27:18 +08:00
|
|
|
struct bpf_map *map;
|
|
|
|
|
|
|
|
if (!storage)
|
|
|
|
return;
|
|
|
|
|
|
|
|
map = &storage->map->map;
|
2018-09-28 22:45:43 +08:00
|
|
|
stype = cgroup_storage_type(map);
|
|
|
|
if (stype == BPF_CGROUP_STORAGE_SHARED)
|
|
|
|
call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
|
|
|
|
else
|
|
|
|
call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
|
2018-08-03 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
|
|
|
|
struct cgroup *cgroup,
|
|
|
|
enum bpf_attach_type type)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map;
|
|
|
|
|
|
|
|
if (!storage)
|
|
|
|
return;
|
|
|
|
|
|
|
|
storage->key.attach_type = type;
|
2019-11-05 07:54:30 +08:00
|
|
|
storage->key.cgroup_inode_id = cgroup_id(cgroup);
|
2018-08-03 05:27:18 +08:00
|
|
|
|
|
|
|
map = storage->map;
|
|
|
|
|
|
|
|
spin_lock_bh(&map->lock);
|
|
|
|
WARN_ON(cgroup_storage_insert(map, storage));
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
list_add(&storage->list_map, &map->list);
|
|
|
|
list_add(&storage->list_cg, &cgroup->bpf.storages);
|
2018-08-03 05:27:18 +08:00
|
|
|
spin_unlock_bh(&map->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
|
|
|
|
{
|
|
|
|
struct bpf_cgroup_storage_map *map;
|
|
|
|
struct rb_root *root;
|
|
|
|
|
|
|
|
if (!storage)
|
|
|
|
return;
|
|
|
|
|
|
|
|
map = storage->map;
|
|
|
|
|
|
|
|
spin_lock_bh(&map->lock);
|
|
|
|
root = &map->root;
|
|
|
|
rb_erase(&storage->node, root);
|
|
|
|
|
bpf: Make cgroup storages shared between programs on the same cgroup
This change comes in several parts:
One, the restriction that the CGROUP_STORAGE map can only be used
by one program is removed. This results in the removal of the field
'aux' in struct bpf_cgroup_storage_map, and removal of relevant
code associated with the field, and removal of now-noop functions
bpf_free_cgroup_storage and bpf_cgroup_storage_release.
Second, we permit a key of type u64 as the key to the map.
Providing such a key type indicates that the map should ignore
attach type when comparing map keys. However, for simplicity newly
linked storage will still have the attach type at link time in
its key struct. cgroup_storage_check_btf is adapted to accept
u64 as the type of the key.
Third, because the storages are now shared, the storages cannot
be unconditionally freed on program detach. There could be two
ways to solve this issue:
* A. Reference count the usage of the storages, and free when the
last program is detached.
* B. Free only when the storage is impossible to be referred to
again, i.e. when either the cgroup_bpf it is attached to, or
the map itself, is freed.
Option A has the side effect that, when the user detach and
reattach a program, whether the program gets a fresh storage
depends on whether there is another program attached using that
storage. This could trigger races if the user is multi-threaded,
and since nondeterminism in data races is evil, go with option B.
The both the map and the cgroup_bpf now tracks their associated
storages, and the storage unlink and free are removed from
cgroup_bpf_detach and added to cgroup_bpf_release and
cgroup_storage_map_free. The latter also new holds the cgroup_mutex
to prevent any races with the former.
Fourth, on attach, we reuse the old storage if the key already
exists in the map, via cgroup_storage_lookup. If the storage
does not exist yet, we create a new one, and publish it at the
last step in the attach process. This does not create a race
condition because for the whole attach the cgroup_mutex is held.
We keep track of an array of new storages that was allocated
and if the process fails only the new storages would get freed.
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com
2020-07-24 12:47:43 +08:00
|
|
|
list_del(&storage->list_map);
|
|
|
|
list_del(&storage->list_cg);
|
2018-08-03 05:27:18 +08:00
|
|
|
spin_unlock_bh(&map->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|