mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 07:34:06 +08:00
5e7b30205c
Commita5cbe05a66
("bpf: Implement bpf iterator for map elements") added bpf iterator support for map elements. The map element bpf iterator requires info to identify a particular map. In the above commit, the attr->link_create.target_fd is used to carry map_fd and an enum bpf_iter_link_info is added to uapi to specify the target_fd actually representing a map_fd: enum bpf_iter_link_info { BPF_ITER_LINK_UNSPEC = 0, BPF_ITER_LINK_MAP_FD = 1, MAX_BPF_ITER_LINK_INFO, }; This is an extensible approach as we can grow enumerator for pid, cgroup_id, etc. and we can unionize target_fd for pid, cgroup_id, etc. But in the future, there are chances that more complex customization may happen, e.g., for tasks, it could be filtered based on both cgroup_id and user_id. This patch changed the uapi to have fields __aligned_u64 iter_info; __u32 iter_info_len; for additional iter_info for link_create. The iter_info is defined as union bpf_iter_link_info { struct { __u32 map_fd; } map; }; So future extension for additional customization will be easier. The bpf_iter_link_info will be passed to target callback to validate and generic bpf_iter framework does not need to deal it any more. Note that map_fd = 0 will be considered invalid and -EBADF will be returned to user space. Fixes:a5cbe05a66
("bpf: Implement bpf iterator for map elements") Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200805055056.1457463-1-yhs@fb.com
1457 lines
37 KiB
C
1457 lines
37 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2019 Facebook */
|
|
#include <linux/rculist.h>
|
|
#include <linux/list.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/types.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/btf_ids.h>
|
|
#include <net/bpf_sk_storage.h>
|
|
#include <net/sock.h>
|
|
#include <uapi/linux/sock_diag.h>
|
|
#include <uapi/linux/btf.h>
|
|
|
|
#define SK_STORAGE_CREATE_FLAG_MASK \
|
|
(BPF_F_NO_PREALLOC | BPF_F_CLONE)
|
|
|
|
struct bucket {
|
|
struct hlist_head list;
|
|
raw_spinlock_t lock;
|
|
};
|
|
|
|
/* Thp map is not the primary owner of a bpf_sk_storage_elem.
|
|
* Instead, the sk->sk_bpf_storage is.
|
|
*
|
|
* The map (bpf_sk_storage_map) is for two purposes
|
|
* 1. Define the size of the "sk local storage". It is
|
|
* the map's value_size.
|
|
*
|
|
* 2. Maintain a list to keep track of all elems such
|
|
* that they can be cleaned up during the map destruction.
|
|
*
|
|
* When a bpf local storage is being looked up for a
|
|
* particular sk, the "bpf_map" pointer is actually used
|
|
* as the "key" to search in the list of elem in
|
|
* sk->sk_bpf_storage.
|
|
*
|
|
* Hence, consider sk->sk_bpf_storage is the mini-map
|
|
* with the "bpf_map" pointer as the searching key.
|
|
*/
|
|
struct bpf_sk_storage_map {
|
|
struct bpf_map map;
|
|
/* Lookup elem does not require accessing the map.
|
|
*
|
|
* Updating/Deleting requires a bucket lock to
|
|
* link/unlink the elem from the map. Having
|
|
* multiple buckets to improve contention.
|
|
*/
|
|
struct bucket *buckets;
|
|
u32 bucket_log;
|
|
u16 elem_size;
|
|
u16 cache_idx;
|
|
};
|
|
|
|
struct bpf_sk_storage_data {
|
|
/* smap is used as the searching key when looking up
|
|
* from sk->sk_bpf_storage.
|
|
*
|
|
* Put it in the same cacheline as the data to minimize
|
|
* the number of cachelines access during the cache hit case.
|
|
*/
|
|
struct bpf_sk_storage_map __rcu *smap;
|
|
u8 data[] __aligned(8);
|
|
};
|
|
|
|
/* Linked to bpf_sk_storage and bpf_sk_storage_map */
|
|
struct bpf_sk_storage_elem {
|
|
struct hlist_node map_node; /* Linked to bpf_sk_storage_map */
|
|
struct hlist_node snode; /* Linked to bpf_sk_storage */
|
|
struct bpf_sk_storage __rcu *sk_storage;
|
|
struct rcu_head rcu;
|
|
/* 8 bytes hole */
|
|
/* The data is stored in aother cacheline to minimize
|
|
* the number of cachelines access during a cache hit.
|
|
*/
|
|
struct bpf_sk_storage_data sdata ____cacheline_aligned;
|
|
};
|
|
|
|
#define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata)
|
|
#define SDATA(_SELEM) (&(_SELEM)->sdata)
|
|
#define BPF_SK_STORAGE_CACHE_SIZE 16
|
|
|
|
static DEFINE_SPINLOCK(cache_idx_lock);
|
|
static u64 cache_idx_usage_counts[BPF_SK_STORAGE_CACHE_SIZE];
|
|
|
|
struct bpf_sk_storage {
|
|
struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
|
|
struct hlist_head list; /* List of bpf_sk_storage_elem */
|
|
struct sock *sk; /* The sk that owns the the above "list" of
|
|
* bpf_sk_storage_elem.
|
|
*/
|
|
struct rcu_head rcu;
|
|
raw_spinlock_t lock; /* Protect adding/removing from the "list" */
|
|
};
|
|
|
|
static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
|
|
struct bpf_sk_storage_elem *selem)
|
|
{
|
|
return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
|
|
}
|
|
|
|
static int omem_charge(struct sock *sk, unsigned int size)
|
|
{
|
|
/* same check as in sock_kmalloc() */
|
|
if (size <= sysctl_optmem_max &&
|
|
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
|
atomic_add(size, &sk->sk_omem_alloc);
|
|
return 0;
|
|
}
|
|
|
|
return -ENOMEM;
|
|
}
|
|
|
|
static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem)
|
|
{
|
|
return !hlist_unhashed(&selem->snode);
|
|
}
|
|
|
|
static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem)
|
|
{
|
|
return !hlist_unhashed(&selem->map_node);
|
|
}
|
|
|
|
static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
|
|
struct sock *sk, void *value,
|
|
bool charge_omem)
|
|
{
|
|
struct bpf_sk_storage_elem *selem;
|
|
|
|
if (charge_omem && omem_charge(sk, smap->elem_size))
|
|
return NULL;
|
|
|
|
selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
|
|
if (selem) {
|
|
if (value)
|
|
memcpy(SDATA(selem)->data, value, smap->map.value_size);
|
|
return selem;
|
|
}
|
|
|
|
if (charge_omem)
|
|
atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/* sk_storage->lock must be held and selem->sk_storage == sk_storage.
|
|
* The caller must ensure selem->smap is still valid to be
|
|
* dereferenced for its smap->elem_size and smap->cache_idx.
|
|
*/
|
|
static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
|
|
struct bpf_sk_storage_elem *selem,
|
|
bool uncharge_omem)
|
|
{
|
|
struct bpf_sk_storage_map *smap;
|
|
bool free_sk_storage;
|
|
struct sock *sk;
|
|
|
|
smap = rcu_dereference(SDATA(selem)->smap);
|
|
sk = sk_storage->sk;
|
|
|
|
/* All uncharging on sk->sk_omem_alloc must be done first.
|
|
* sk may be freed once the last selem is unlinked from sk_storage.
|
|
*/
|
|
if (uncharge_omem)
|
|
atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
|
|
|
|
free_sk_storage = hlist_is_singular_node(&selem->snode,
|
|
&sk_storage->list);
|
|
if (free_sk_storage) {
|
|
atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
|
|
sk_storage->sk = NULL;
|
|
/* After this RCU_INIT, sk may be freed and cannot be used */
|
|
RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
|
|
|
|
/* sk_storage is not freed now. sk_storage->lock is
|
|
* still held and raw_spin_unlock_bh(&sk_storage->lock)
|
|
* will be done by the caller.
|
|
*
|
|
* Although the unlock will be done under
|
|
* rcu_read_lock(), it is more intutivie to
|
|
* read if kfree_rcu(sk_storage, rcu) is done
|
|
* after the raw_spin_unlock_bh(&sk_storage->lock).
|
|
*
|
|
* Hence, a "bool free_sk_storage" is returned
|
|
* to the caller which then calls the kfree_rcu()
|
|
* after unlock.
|
|
*/
|
|
}
|
|
hlist_del_init_rcu(&selem->snode);
|
|
if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
|
|
SDATA(selem))
|
|
RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
|
|
|
|
kfree_rcu(selem, rcu);
|
|
|
|
return free_sk_storage;
|
|
}
|
|
|
|
static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
|
|
{
|
|
struct bpf_sk_storage *sk_storage;
|
|
bool free_sk_storage = false;
|
|
|
|
if (unlikely(!selem_linked_to_sk(selem)))
|
|
/* selem has already been unlinked from sk */
|
|
return;
|
|
|
|
sk_storage = rcu_dereference(selem->sk_storage);
|
|
raw_spin_lock_bh(&sk_storage->lock);
|
|
if (likely(selem_linked_to_sk(selem)))
|
|
free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
|
|
raw_spin_unlock_bh(&sk_storage->lock);
|
|
|
|
if (free_sk_storage)
|
|
kfree_rcu(sk_storage, rcu);
|
|
}
|
|
|
|
static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
|
|
struct bpf_sk_storage_elem *selem)
|
|
{
|
|
RCU_INIT_POINTER(selem->sk_storage, sk_storage);
|
|
hlist_add_head(&selem->snode, &sk_storage->list);
|
|
}
|
|
|
|
static void selem_unlink_map(struct bpf_sk_storage_elem *selem)
|
|
{
|
|
struct bpf_sk_storage_map *smap;
|
|
struct bucket *b;
|
|
|
|
if (unlikely(!selem_linked_to_map(selem)))
|
|
/* selem has already be unlinked from smap */
|
|
return;
|
|
|
|
smap = rcu_dereference(SDATA(selem)->smap);
|
|
b = select_bucket(smap, selem);
|
|
raw_spin_lock_bh(&b->lock);
|
|
if (likely(selem_linked_to_map(selem)))
|
|
hlist_del_init_rcu(&selem->map_node);
|
|
raw_spin_unlock_bh(&b->lock);
|
|
}
|
|
|
|
static void selem_link_map(struct bpf_sk_storage_map *smap,
|
|
struct bpf_sk_storage_elem *selem)
|
|
{
|
|
struct bucket *b = select_bucket(smap, selem);
|
|
|
|
raw_spin_lock_bh(&b->lock);
|
|
RCU_INIT_POINTER(SDATA(selem)->smap, smap);
|
|
hlist_add_head_rcu(&selem->map_node, &b->list);
|
|
raw_spin_unlock_bh(&b->lock);
|
|
}
|
|
|
|
static void selem_unlink(struct bpf_sk_storage_elem *selem)
|
|
{
|
|
/* Always unlink from map before unlinking from sk_storage
|
|
* because selem will be freed after successfully unlinked from
|
|
* the sk_storage.
|
|
*/
|
|
selem_unlink_map(selem);
|
|
selem_unlink_sk(selem);
|
|
}
|
|
|
|
static struct bpf_sk_storage_data *
|
|
__sk_storage_lookup(struct bpf_sk_storage *sk_storage,
|
|
struct bpf_sk_storage_map *smap,
|
|
bool cacheit_lockit)
|
|
{
|
|
struct bpf_sk_storage_data *sdata;
|
|
struct bpf_sk_storage_elem *selem;
|
|
|
|
/* Fast path (cache hit) */
|
|
sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
|
|
if (sdata && rcu_access_pointer(sdata->smap) == smap)
|
|
return sdata;
|
|
|
|
/* Slow path (cache miss) */
|
|
hlist_for_each_entry_rcu(selem, &sk_storage->list, snode)
|
|
if (rcu_access_pointer(SDATA(selem)->smap) == smap)
|
|
break;
|
|
|
|
if (!selem)
|
|
return NULL;
|
|
|
|
sdata = SDATA(selem);
|
|
if (cacheit_lockit) {
|
|
/* spinlock is needed to avoid racing with the
|
|
* parallel delete. Otherwise, publishing an already
|
|
* deleted sdata to the cache will become a use-after-free
|
|
* problem in the next __sk_storage_lookup().
|
|
*/
|
|
raw_spin_lock_bh(&sk_storage->lock);
|
|
if (selem_linked_to_sk(selem))
|
|
rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
|
|
sdata);
|
|
raw_spin_unlock_bh(&sk_storage->lock);
|
|
}
|
|
|
|
return sdata;
|
|
}
|
|
|
|
static struct bpf_sk_storage_data *
|
|
sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
|
|
{
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_sk_storage_map *smap;
|
|
|
|
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
|
if (!sk_storage)
|
|
return NULL;
|
|
|
|
smap = (struct bpf_sk_storage_map *)map;
|
|
return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
|
|
}
|
|
|
|
static int check_flags(const struct bpf_sk_storage_data *old_sdata,
|
|
u64 map_flags)
|
|
{
|
|
if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
|
|
/* elem already exists */
|
|
return -EEXIST;
|
|
|
|
if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
|
|
/* elem doesn't exist, cannot update it */
|
|
return -ENOENT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int sk_storage_alloc(struct sock *sk,
|
|
struct bpf_sk_storage_map *smap,
|
|
struct bpf_sk_storage_elem *first_selem)
|
|
{
|
|
struct bpf_sk_storage *prev_sk_storage, *sk_storage;
|
|
int err;
|
|
|
|
err = omem_charge(sk, sizeof(*sk_storage));
|
|
if (err)
|
|
return err;
|
|
|
|
sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN);
|
|
if (!sk_storage) {
|
|
err = -ENOMEM;
|
|
goto uncharge;
|
|
}
|
|
INIT_HLIST_HEAD(&sk_storage->list);
|
|
raw_spin_lock_init(&sk_storage->lock);
|
|
sk_storage->sk = sk;
|
|
|
|
__selem_link_sk(sk_storage, first_selem);
|
|
selem_link_map(smap, first_selem);
|
|
/* Publish sk_storage to sk. sk->sk_lock cannot be acquired.
|
|
* Hence, atomic ops is used to set sk->sk_bpf_storage
|
|
* from NULL to the newly allocated sk_storage ptr.
|
|
*
|
|
* From now on, the sk->sk_bpf_storage pointer is protected
|
|
* by the sk_storage->lock. Hence, when freeing
|
|
* the sk->sk_bpf_storage, the sk_storage->lock must
|
|
* be held before setting sk->sk_bpf_storage to NULL.
|
|
*/
|
|
prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
|
|
NULL, sk_storage);
|
|
if (unlikely(prev_sk_storage)) {
|
|
selem_unlink_map(first_selem);
|
|
err = -EAGAIN;
|
|
goto uncharge;
|
|
|
|
/* Note that even first_selem was linked to smap's
|
|
* bucket->list, first_selem can be freed immediately
|
|
* (instead of kfree_rcu) because
|
|
* bpf_sk_storage_map_free() does a
|
|
* synchronize_rcu() before walking the bucket->list.
|
|
* Hence, no one is accessing selem from the
|
|
* bucket->list under rcu_read_lock().
|
|
*/
|
|
}
|
|
|
|
return 0;
|
|
|
|
uncharge:
|
|
kfree(sk_storage);
|
|
atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
|
|
return err;
|
|
}
|
|
|
|
/* sk cannot be going away because it is linking new elem
|
|
* to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
|
|
* Otherwise, it will become a leak (and other memory issues
|
|
* during map destruction).
|
|
*/
|
|
static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
|
|
struct bpf_map *map,
|
|
void *value,
|
|
u64 map_flags)
|
|
{
|
|
struct bpf_sk_storage_data *old_sdata = NULL;
|
|
struct bpf_sk_storage_elem *selem;
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_sk_storage_map *smap;
|
|
int err;
|
|
|
|
/* BPF_EXIST and BPF_NOEXIST cannot be both set */
|
|
if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
|
|
/* BPF_F_LOCK can only be used in a value with spin_lock */
|
|
unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
smap = (struct bpf_sk_storage_map *)map;
|
|
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
|
if (!sk_storage || hlist_empty(&sk_storage->list)) {
|
|
/* Very first elem for this sk */
|
|
err = check_flags(NULL, map_flags);
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
|
|
selem = selem_alloc(smap, sk, value, true);
|
|
if (!selem)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
err = sk_storage_alloc(sk, smap, selem);
|
|
if (err) {
|
|
kfree(selem);
|
|
atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
return SDATA(selem);
|
|
}
|
|
|
|
if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
|
|
/* Hoping to find an old_sdata to do inline update
|
|
* such that it can avoid taking the sk_storage->lock
|
|
* and changing the lists.
|
|
*/
|
|
old_sdata = __sk_storage_lookup(sk_storage, smap, false);
|
|
err = check_flags(old_sdata, map_flags);
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) {
|
|
copy_map_value_locked(map, old_sdata->data,
|
|
value, false);
|
|
return old_sdata;
|
|
}
|
|
}
|
|
|
|
raw_spin_lock_bh(&sk_storage->lock);
|
|
|
|
/* Recheck sk_storage->list under sk_storage->lock */
|
|
if (unlikely(hlist_empty(&sk_storage->list))) {
|
|
/* A parallel del is happening and sk_storage is going
|
|
* away. It has just been checked before, so very
|
|
* unlikely. Return instead of retry to keep things
|
|
* simple.
|
|
*/
|
|
err = -EAGAIN;
|
|
goto unlock_err;
|
|
}
|
|
|
|
old_sdata = __sk_storage_lookup(sk_storage, smap, false);
|
|
err = check_flags(old_sdata, map_flags);
|
|
if (err)
|
|
goto unlock_err;
|
|
|
|
if (old_sdata && (map_flags & BPF_F_LOCK)) {
|
|
copy_map_value_locked(map, old_sdata->data, value, false);
|
|
selem = SELEM(old_sdata);
|
|
goto unlock;
|
|
}
|
|
|
|
/* sk_storage->lock is held. Hence, we are sure
|
|
* we can unlink and uncharge the old_sdata successfully
|
|
* later. Hence, instead of charging the new selem now
|
|
* and then uncharge the old selem later (which may cause
|
|
* a potential but unnecessary charge failure), avoid taking
|
|
* a charge at all here (the "!old_sdata" check) and the
|
|
* old_sdata will not be uncharged later during __selem_unlink_sk().
|
|
*/
|
|
selem = selem_alloc(smap, sk, value, !old_sdata);
|
|
if (!selem) {
|
|
err = -ENOMEM;
|
|
goto unlock_err;
|
|
}
|
|
|
|
/* First, link the new selem to the map */
|
|
selem_link_map(smap, selem);
|
|
|
|
/* Second, link (and publish) the new selem to sk_storage */
|
|
__selem_link_sk(sk_storage, selem);
|
|
|
|
/* Third, remove old selem, SELEM(old_sdata) */
|
|
if (old_sdata) {
|
|
selem_unlink_map(SELEM(old_sdata));
|
|
__selem_unlink_sk(sk_storage, SELEM(old_sdata), false);
|
|
}
|
|
|
|
unlock:
|
|
raw_spin_unlock_bh(&sk_storage->lock);
|
|
return SDATA(selem);
|
|
|
|
unlock_err:
|
|
raw_spin_unlock_bh(&sk_storage->lock);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
|
|
{
|
|
struct bpf_sk_storage_data *sdata;
|
|
|
|
sdata = sk_storage_lookup(sk, map, false);
|
|
if (!sdata)
|
|
return -ENOENT;
|
|
|
|
selem_unlink(SELEM(sdata));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static u16 cache_idx_get(void)
|
|
{
|
|
u64 min_usage = U64_MAX;
|
|
u16 i, res = 0;
|
|
|
|
spin_lock(&cache_idx_lock);
|
|
|
|
for (i = 0; i < BPF_SK_STORAGE_CACHE_SIZE; i++) {
|
|
if (cache_idx_usage_counts[i] < min_usage) {
|
|
min_usage = cache_idx_usage_counts[i];
|
|
res = i;
|
|
|
|
/* Found a free cache_idx */
|
|
if (!min_usage)
|
|
break;
|
|
}
|
|
}
|
|
cache_idx_usage_counts[res]++;
|
|
|
|
spin_unlock(&cache_idx_lock);
|
|
|
|
return res;
|
|
}
|
|
|
|
static void cache_idx_free(u16 idx)
|
|
{
|
|
spin_lock(&cache_idx_lock);
|
|
cache_idx_usage_counts[idx]--;
|
|
spin_unlock(&cache_idx_lock);
|
|
}
|
|
|
|
/* Called by __sk_destruct() & bpf_sk_storage_clone() */
|
|
void bpf_sk_storage_free(struct sock *sk)
|
|
{
|
|
struct bpf_sk_storage_elem *selem;
|
|
struct bpf_sk_storage *sk_storage;
|
|
bool free_sk_storage = false;
|
|
struct hlist_node *n;
|
|
|
|
rcu_read_lock();
|
|
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
|
if (!sk_storage) {
|
|
rcu_read_unlock();
|
|
return;
|
|
}
|
|
|
|
/* Netiher the bpf_prog nor the bpf-map's syscall
|
|
* could be modifying the sk_storage->list now.
|
|
* Thus, no elem can be added-to or deleted-from the
|
|
* sk_storage->list by the bpf_prog or by the bpf-map's syscall.
|
|
*
|
|
* It is racing with bpf_sk_storage_map_free() alone
|
|
* when unlinking elem from the sk_storage->list and
|
|
* the map's bucket->list.
|
|
*/
|
|
raw_spin_lock_bh(&sk_storage->lock);
|
|
hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
|
|
/* Always unlink from map before unlinking from
|
|
* sk_storage.
|
|
*/
|
|
selem_unlink_map(selem);
|
|
free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
|
|
}
|
|
raw_spin_unlock_bh(&sk_storage->lock);
|
|
rcu_read_unlock();
|
|
|
|
if (free_sk_storage)
|
|
kfree_rcu(sk_storage, rcu);
|
|
}
|
|
|
|
static void bpf_sk_storage_map_free(struct bpf_map *map)
|
|
{
|
|
struct bpf_sk_storage_elem *selem;
|
|
struct bpf_sk_storage_map *smap;
|
|
struct bucket *b;
|
|
unsigned int i;
|
|
|
|
smap = (struct bpf_sk_storage_map *)map;
|
|
|
|
cache_idx_free(smap->cache_idx);
|
|
|
|
/* Note that this map might be concurrently cloned from
|
|
* bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
|
|
* RCU read section to finish before proceeding. New RCU
|
|
* read sections should be prevented via bpf_map_inc_not_zero.
|
|
*/
|
|
synchronize_rcu();
|
|
|
|
/* bpf prog and the userspace can no longer access this map
|
|
* now. No new selem (of this map) can be added
|
|
* to the sk->sk_bpf_storage or to the map bucket's list.
|
|
*
|
|
* The elem of this map can be cleaned up here
|
|
* or
|
|
* by bpf_sk_storage_free() during __sk_destruct().
|
|
*/
|
|
for (i = 0; i < (1U << smap->bucket_log); i++) {
|
|
b = &smap->buckets[i];
|
|
|
|
rcu_read_lock();
|
|
/* No one is adding to b->list now */
|
|
while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)),
|
|
struct bpf_sk_storage_elem,
|
|
map_node))) {
|
|
selem_unlink(selem);
|
|
cond_resched_rcu();
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
/* bpf_sk_storage_free() may still need to access the map.
|
|
* e.g. bpf_sk_storage_free() has unlinked selem from the map
|
|
* which then made the above while((selem = ...)) loop
|
|
* exited immediately.
|
|
*
|
|
* However, the bpf_sk_storage_free() still needs to access
|
|
* the smap->elem_size to do the uncharging in
|
|
* __selem_unlink_sk().
|
|
*
|
|
* Hence, wait another rcu grace period for the
|
|
* bpf_sk_storage_free() to finish.
|
|
*/
|
|
synchronize_rcu();
|
|
|
|
kvfree(smap->buckets);
|
|
kfree(map);
|
|
}
|
|
|
|
/* U16_MAX is much more than enough for sk local storage
|
|
* considering a tcp_sock is ~2k.
|
|
*/
|
|
#define MAX_VALUE_SIZE \
|
|
min_t(u32, \
|
|
(KMALLOC_MAX_SIZE - MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem)), \
|
|
(U16_MAX - sizeof(struct bpf_sk_storage_elem)))
|
|
|
|
static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
|
|
{
|
|
if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK ||
|
|
!(attr->map_flags & BPF_F_NO_PREALLOC) ||
|
|
attr->max_entries ||
|
|
attr->key_size != sizeof(int) || !attr->value_size ||
|
|
/* Enforce BTF for userspace sk dumping */
|
|
!attr->btf_key_type_id || !attr->btf_value_type_id)
|
|
return -EINVAL;
|
|
|
|
if (!bpf_capable())
|
|
return -EPERM;
|
|
|
|
if (attr->value_size > MAX_VALUE_SIZE)
|
|
return -E2BIG;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
|
|
{
|
|
struct bpf_sk_storage_map *smap;
|
|
unsigned int i;
|
|
u32 nbuckets;
|
|
u64 cost;
|
|
int ret;
|
|
|
|
smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
|
|
if (!smap)
|
|
return ERR_PTR(-ENOMEM);
|
|
bpf_map_init_from_attr(&smap->map, attr);
|
|
|
|
nbuckets = roundup_pow_of_two(num_possible_cpus());
|
|
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
|
|
nbuckets = max_t(u32, 2, nbuckets);
|
|
smap->bucket_log = ilog2(nbuckets);
|
|
cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
|
|
|
|
ret = bpf_map_charge_init(&smap->map.memory, cost);
|
|
if (ret < 0) {
|
|
kfree(smap);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
|
|
GFP_USER | __GFP_NOWARN);
|
|
if (!smap->buckets) {
|
|
bpf_map_charge_finish(&smap->map.memory);
|
|
kfree(smap);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
for (i = 0; i < nbuckets; i++) {
|
|
INIT_HLIST_HEAD(&smap->buckets[i].list);
|
|
raw_spin_lock_init(&smap->buckets[i].lock);
|
|
}
|
|
|
|
smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
|
|
smap->cache_idx = cache_idx_get();
|
|
|
|
return &smap->map;
|
|
}
|
|
|
|
static int notsupp_get_next_key(struct bpf_map *map, void *key,
|
|
void *next_key)
|
|
{
|
|
return -ENOTSUPP;
|
|
}
|
|
|
|
static int bpf_sk_storage_map_check_btf(const struct bpf_map *map,
|
|
const struct btf *btf,
|
|
const struct btf_type *key_type,
|
|
const struct btf_type *value_type)
|
|
{
|
|
u32 int_data;
|
|
|
|
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
|
|
return -EINVAL;
|
|
|
|
int_data = *(u32 *)(key_type + 1);
|
|
if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
|
|
{
|
|
struct bpf_sk_storage_data *sdata;
|
|
struct socket *sock;
|
|
int fd, err;
|
|
|
|
fd = *(int *)key;
|
|
sock = sockfd_lookup(fd, &err);
|
|
if (sock) {
|
|
sdata = sk_storage_lookup(sock->sk, map, true);
|
|
sockfd_put(sock);
|
|
return sdata ? sdata->data : NULL;
|
|
}
|
|
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
|
|
void *value, u64 map_flags)
|
|
{
|
|
struct bpf_sk_storage_data *sdata;
|
|
struct socket *sock;
|
|
int fd, err;
|
|
|
|
fd = *(int *)key;
|
|
sock = sockfd_lookup(fd, &err);
|
|
if (sock) {
|
|
sdata = sk_storage_update(sock->sk, map, value, map_flags);
|
|
sockfd_put(sock);
|
|
return PTR_ERR_OR_ZERO(sdata);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
|
|
{
|
|
struct socket *sock;
|
|
int fd, err;
|
|
|
|
fd = *(int *)key;
|
|
sock = sockfd_lookup(fd, &err);
|
|
if (sock) {
|
|
err = sk_storage_delete(sock->sk, map);
|
|
sockfd_put(sock);
|
|
return err;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static struct bpf_sk_storage_elem *
|
|
bpf_sk_storage_clone_elem(struct sock *newsk,
|
|
struct bpf_sk_storage_map *smap,
|
|
struct bpf_sk_storage_elem *selem)
|
|
{
|
|
struct bpf_sk_storage_elem *copy_selem;
|
|
|
|
copy_selem = selem_alloc(smap, newsk, NULL, true);
|
|
if (!copy_selem)
|
|
return NULL;
|
|
|
|
if (map_value_has_spin_lock(&smap->map))
|
|
copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
|
|
SDATA(selem)->data, true);
|
|
else
|
|
copy_map_value(&smap->map, SDATA(copy_selem)->data,
|
|
SDATA(selem)->data);
|
|
|
|
return copy_selem;
|
|
}
|
|
|
|
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
|
|
{
|
|
struct bpf_sk_storage *new_sk_storage = NULL;
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_sk_storage_elem *selem;
|
|
int ret = 0;
|
|
|
|
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
|
|
|
|
rcu_read_lock();
|
|
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
|
|
|
if (!sk_storage || hlist_empty(&sk_storage->list))
|
|
goto out;
|
|
|
|
hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
|
|
struct bpf_sk_storage_elem *copy_selem;
|
|
struct bpf_sk_storage_map *smap;
|
|
struct bpf_map *map;
|
|
|
|
smap = rcu_dereference(SDATA(selem)->smap);
|
|
if (!(smap->map.map_flags & BPF_F_CLONE))
|
|
continue;
|
|
|
|
/* Note that for lockless listeners adding new element
|
|
* here can race with cleanup in bpf_sk_storage_map_free.
|
|
* Try to grab map refcnt to make sure that it's still
|
|
* alive and prevent concurrent removal.
|
|
*/
|
|
map = bpf_map_inc_not_zero(&smap->map);
|
|
if (IS_ERR(map))
|
|
continue;
|
|
|
|
copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
|
|
if (!copy_selem) {
|
|
ret = -ENOMEM;
|
|
bpf_map_put(map);
|
|
goto out;
|
|
}
|
|
|
|
if (new_sk_storage) {
|
|
selem_link_map(smap, copy_selem);
|
|
__selem_link_sk(new_sk_storage, copy_selem);
|
|
} else {
|
|
ret = sk_storage_alloc(newsk, smap, copy_selem);
|
|
if (ret) {
|
|
kfree(copy_selem);
|
|
atomic_sub(smap->elem_size,
|
|
&newsk->sk_omem_alloc);
|
|
bpf_map_put(map);
|
|
goto out;
|
|
}
|
|
|
|
new_sk_storage = rcu_dereference(copy_selem->sk_storage);
|
|
}
|
|
bpf_map_put(map);
|
|
}
|
|
|
|
out:
|
|
rcu_read_unlock();
|
|
|
|
/* In case of an error, don't free anything explicitly here, the
|
|
* caller is responsible to call bpf_sk_storage_free.
|
|
*/
|
|
|
|
return ret;
|
|
}
|
|
|
|
BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
|
|
void *, value, u64, flags)
|
|
{
|
|
struct bpf_sk_storage_data *sdata;
|
|
|
|
if (flags > BPF_SK_STORAGE_GET_F_CREATE)
|
|
return (unsigned long)NULL;
|
|
|
|
sdata = sk_storage_lookup(sk, map, true);
|
|
if (sdata)
|
|
return (unsigned long)sdata->data;
|
|
|
|
if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
|
|
/* Cannot add new elem to a going away sk.
|
|
* Otherwise, the new elem may become a leak
|
|
* (and also other memory issues during map
|
|
* destruction).
|
|
*/
|
|
refcount_inc_not_zero(&sk->sk_refcnt)) {
|
|
sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
|
|
/* sk must be a fullsock (guaranteed by verifier),
|
|
* so sock_gen_put() is unnecessary.
|
|
*/
|
|
sock_put(sk);
|
|
return IS_ERR(sdata) ?
|
|
(unsigned long)NULL : (unsigned long)sdata->data;
|
|
}
|
|
|
|
return (unsigned long)NULL;
|
|
}
|
|
|
|
BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
|
|
{
|
|
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
|
|
int err;
|
|
|
|
err = sk_storage_delete(sk, map);
|
|
sock_put(sk);
|
|
return err;
|
|
}
|
|
|
|
return -ENOENT;
|
|
}
|
|
|
|
static int sk_storage_map_btf_id;
|
|
const struct bpf_map_ops sk_storage_map_ops = {
|
|
.map_alloc_check = bpf_sk_storage_map_alloc_check,
|
|
.map_alloc = bpf_sk_storage_map_alloc,
|
|
.map_free = bpf_sk_storage_map_free,
|
|
.map_get_next_key = notsupp_get_next_key,
|
|
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
|
|
.map_update_elem = bpf_fd_sk_storage_update_elem,
|
|
.map_delete_elem = bpf_fd_sk_storage_delete_elem,
|
|
.map_check_btf = bpf_sk_storage_map_check_btf,
|
|
.map_btf_name = "bpf_sk_storage_map",
|
|
.map_btf_id = &sk_storage_map_btf_id,
|
|
};
|
|
|
|
const struct bpf_func_proto bpf_sk_storage_get_proto = {
|
|
.func = bpf_sk_storage_get,
|
|
.gpl_only = false,
|
|
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
|
.arg1_type = ARG_CONST_MAP_PTR,
|
|
.arg2_type = ARG_PTR_TO_SOCKET,
|
|
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
|
.arg4_type = ARG_ANYTHING,
|
|
};
|
|
|
|
const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
|
|
.func = bpf_sk_storage_get,
|
|
.gpl_only = false,
|
|
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
|
.arg1_type = ARG_CONST_MAP_PTR,
|
|
.arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
|
|
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
|
.arg4_type = ARG_ANYTHING,
|
|
};
|
|
|
|
const struct bpf_func_proto bpf_sk_storage_delete_proto = {
|
|
.func = bpf_sk_storage_delete,
|
|
.gpl_only = false,
|
|
.ret_type = RET_INTEGER,
|
|
.arg1_type = ARG_CONST_MAP_PTR,
|
|
.arg2_type = ARG_PTR_TO_SOCKET,
|
|
};
|
|
|
|
struct bpf_sk_storage_diag {
|
|
u32 nr_maps;
|
|
struct bpf_map *maps[];
|
|
};
|
|
|
|
/* The reply will be like:
|
|
* INET_DIAG_BPF_SK_STORAGES (nla_nest)
|
|
* SK_DIAG_BPF_STORAGE (nla_nest)
|
|
* SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
|
|
* SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
|
|
* SK_DIAG_BPF_STORAGE (nla_nest)
|
|
* SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
|
|
* SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
|
|
* ....
|
|
*/
|
|
static int nla_value_size(u32 value_size)
|
|
{
|
|
/* SK_DIAG_BPF_STORAGE (nla_nest)
|
|
* SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
|
|
* SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
|
|
*/
|
|
return nla_total_size(0) + nla_total_size(sizeof(u32)) +
|
|
nla_total_size_64bit(value_size);
|
|
}
|
|
|
|
void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
|
|
{
|
|
u32 i;
|
|
|
|
if (!diag)
|
|
return;
|
|
|
|
for (i = 0; i < diag->nr_maps; i++)
|
|
bpf_map_put(diag->maps[i]);
|
|
|
|
kfree(diag);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
|
|
|
|
static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
|
|
const struct bpf_map *map)
|
|
{
|
|
u32 i;
|
|
|
|
for (i = 0; i < diag->nr_maps; i++) {
|
|
if (diag->maps[i] == map)
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
struct bpf_sk_storage_diag *
|
|
bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
|
|
{
|
|
struct bpf_sk_storage_diag *diag;
|
|
struct nlattr *nla;
|
|
u32 nr_maps = 0;
|
|
int rem, err;
|
|
|
|
/* bpf_sk_storage_map is currently limited to CAP_SYS_ADMIN as
|
|
* the map_alloc_check() side also does.
|
|
*/
|
|
if (!bpf_capable())
|
|
return ERR_PTR(-EPERM);
|
|
|
|
nla_for_each_nested(nla, nla_stgs, rem) {
|
|
if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
|
|
nr_maps++;
|
|
}
|
|
|
|
diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
|
|
GFP_KERNEL);
|
|
if (!diag)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
nla_for_each_nested(nla, nla_stgs, rem) {
|
|
struct bpf_map *map;
|
|
int map_fd;
|
|
|
|
if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
|
|
continue;
|
|
|
|
map_fd = nla_get_u32(nla);
|
|
map = bpf_map_get(map_fd);
|
|
if (IS_ERR(map)) {
|
|
err = PTR_ERR(map);
|
|
goto err_free;
|
|
}
|
|
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
|
|
bpf_map_put(map);
|
|
err = -EINVAL;
|
|
goto err_free;
|
|
}
|
|
if (diag_check_dup(diag, map)) {
|
|
bpf_map_put(map);
|
|
err = -EEXIST;
|
|
goto err_free;
|
|
}
|
|
diag->maps[diag->nr_maps++] = map;
|
|
}
|
|
|
|
return diag;
|
|
|
|
err_free:
|
|
bpf_sk_storage_diag_free(diag);
|
|
return ERR_PTR(err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
|
|
|
|
static int diag_get(struct bpf_sk_storage_data *sdata, struct sk_buff *skb)
|
|
{
|
|
struct nlattr *nla_stg, *nla_value;
|
|
struct bpf_sk_storage_map *smap;
|
|
|
|
/* It cannot exceed max nlattr's payload */
|
|
BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < MAX_VALUE_SIZE);
|
|
|
|
nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
|
|
if (!nla_stg)
|
|
return -EMSGSIZE;
|
|
|
|
smap = rcu_dereference(sdata->smap);
|
|
if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
|
|
goto errout;
|
|
|
|
nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
|
|
smap->map.value_size,
|
|
SK_DIAG_BPF_STORAGE_PAD);
|
|
if (!nla_value)
|
|
goto errout;
|
|
|
|
if (map_value_has_spin_lock(&smap->map))
|
|
copy_map_value_locked(&smap->map, nla_data(nla_value),
|
|
sdata->data, true);
|
|
else
|
|
copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
|
|
|
|
nla_nest_end(skb, nla_stg);
|
|
return 0;
|
|
|
|
errout:
|
|
nla_nest_cancel(skb, nla_stg);
|
|
return -EMSGSIZE;
|
|
}
|
|
|
|
static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
|
|
int stg_array_type,
|
|
unsigned int *res_diag_size)
|
|
{
|
|
/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
|
|
unsigned int diag_size = nla_total_size(0);
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_sk_storage_elem *selem;
|
|
struct bpf_sk_storage_map *smap;
|
|
struct nlattr *nla_stgs;
|
|
unsigned int saved_len;
|
|
int err = 0;
|
|
|
|
rcu_read_lock();
|
|
|
|
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
|
if (!sk_storage || hlist_empty(&sk_storage->list)) {
|
|
rcu_read_unlock();
|
|
return 0;
|
|
}
|
|
|
|
nla_stgs = nla_nest_start(skb, stg_array_type);
|
|
if (!nla_stgs)
|
|
/* Continue to learn diag_size */
|
|
err = -EMSGSIZE;
|
|
|
|
saved_len = skb->len;
|
|
hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
|
|
smap = rcu_dereference(SDATA(selem)->smap);
|
|
diag_size += nla_value_size(smap->map.value_size);
|
|
|
|
if (nla_stgs && diag_get(SDATA(selem), skb))
|
|
/* Continue to learn diag_size */
|
|
err = -EMSGSIZE;
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (nla_stgs) {
|
|
if (saved_len == skb->len)
|
|
nla_nest_cancel(skb, nla_stgs);
|
|
else
|
|
nla_nest_end(skb, nla_stgs);
|
|
}
|
|
|
|
if (diag_size == nla_total_size(0)) {
|
|
*res_diag_size = 0;
|
|
return 0;
|
|
}
|
|
|
|
*res_diag_size = diag_size;
|
|
return err;
|
|
}
|
|
|
|
int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
|
|
struct sock *sk, struct sk_buff *skb,
|
|
int stg_array_type,
|
|
unsigned int *res_diag_size)
|
|
{
|
|
/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
|
|
unsigned int diag_size = nla_total_size(0);
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_sk_storage_data *sdata;
|
|
struct nlattr *nla_stgs;
|
|
unsigned int saved_len;
|
|
int err = 0;
|
|
u32 i;
|
|
|
|
*res_diag_size = 0;
|
|
|
|
/* No map has been specified. Dump all. */
|
|
if (!diag->nr_maps)
|
|
return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
|
|
res_diag_size);
|
|
|
|
rcu_read_lock();
|
|
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
|
if (!sk_storage || hlist_empty(&sk_storage->list)) {
|
|
rcu_read_unlock();
|
|
return 0;
|
|
}
|
|
|
|
nla_stgs = nla_nest_start(skb, stg_array_type);
|
|
if (!nla_stgs)
|
|
/* Continue to learn diag_size */
|
|
err = -EMSGSIZE;
|
|
|
|
saved_len = skb->len;
|
|
for (i = 0; i < diag->nr_maps; i++) {
|
|
sdata = __sk_storage_lookup(sk_storage,
|
|
(struct bpf_sk_storage_map *)diag->maps[i],
|
|
false);
|
|
|
|
if (!sdata)
|
|
continue;
|
|
|
|
diag_size += nla_value_size(diag->maps[i]->value_size);
|
|
|
|
if (nla_stgs && diag_get(sdata, skb))
|
|
/* Continue to learn diag_size */
|
|
err = -EMSGSIZE;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (nla_stgs) {
|
|
if (saved_len == skb->len)
|
|
nla_nest_cancel(skb, nla_stgs);
|
|
else
|
|
nla_nest_end(skb, nla_stgs);
|
|
}
|
|
|
|
if (diag_size == nla_total_size(0)) {
|
|
*res_diag_size = 0;
|
|
return 0;
|
|
}
|
|
|
|
*res_diag_size = diag_size;
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
|
|
|
|
struct bpf_iter_seq_sk_storage_map_info {
|
|
struct bpf_map *map;
|
|
unsigned int bucket_id;
|
|
unsigned skip_elems;
|
|
};
|
|
|
|
static struct bpf_sk_storage_elem *
|
|
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
|
|
struct bpf_sk_storage_elem *prev_selem)
|
|
{
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_sk_storage_elem *selem;
|
|
u32 skip_elems = info->skip_elems;
|
|
struct bpf_sk_storage_map *smap;
|
|
u32 bucket_id = info->bucket_id;
|
|
u32 i, count, n_buckets;
|
|
struct bucket *b;
|
|
|
|
smap = (struct bpf_sk_storage_map *)info->map;
|
|
n_buckets = 1U << smap->bucket_log;
|
|
if (bucket_id >= n_buckets)
|
|
return NULL;
|
|
|
|
/* try to find next selem in the same bucket */
|
|
selem = prev_selem;
|
|
count = 0;
|
|
while (selem) {
|
|
selem = hlist_entry_safe(selem->map_node.next,
|
|
struct bpf_sk_storage_elem, map_node);
|
|
if (!selem) {
|
|
/* not found, unlock and go to the next bucket */
|
|
b = &smap->buckets[bucket_id++];
|
|
raw_spin_unlock_bh(&b->lock);
|
|
skip_elems = 0;
|
|
break;
|
|
}
|
|
sk_storage = rcu_dereference_raw(selem->sk_storage);
|
|
if (sk_storage) {
|
|
info->skip_elems = skip_elems + count;
|
|
return selem;
|
|
}
|
|
count++;
|
|
}
|
|
|
|
for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
|
|
b = &smap->buckets[i];
|
|
raw_spin_lock_bh(&b->lock);
|
|
count = 0;
|
|
hlist_for_each_entry(selem, &b->list, map_node) {
|
|
sk_storage = rcu_dereference_raw(selem->sk_storage);
|
|
if (sk_storage && count >= skip_elems) {
|
|
info->bucket_id = i;
|
|
info->skip_elems = count;
|
|
return selem;
|
|
}
|
|
count++;
|
|
}
|
|
raw_spin_unlock_bh(&b->lock);
|
|
skip_elems = 0;
|
|
}
|
|
|
|
info->bucket_id = i;
|
|
info->skip_elems = 0;
|
|
return NULL;
|
|
}
|
|
|
|
static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
|
|
{
|
|
struct bpf_sk_storage_elem *selem;
|
|
|
|
selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
|
|
if (!selem)
|
|
return NULL;
|
|
|
|
if (*pos == 0)
|
|
++*pos;
|
|
return selem;
|
|
}
|
|
|
|
static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
|
|
loff_t *pos)
|
|
{
|
|
struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
|
|
|
|
++*pos;
|
|
++info->skip_elems;
|
|
return bpf_sk_storage_map_seq_find_next(seq->private, v);
|
|
}
|
|
|
|
struct bpf_iter__bpf_sk_storage_map {
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
__bpf_md_ptr(struct bpf_map *, map);
|
|
__bpf_md_ptr(struct sock *, sk);
|
|
__bpf_md_ptr(void *, value);
|
|
};
|
|
|
|
DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
|
|
struct bpf_map *map, struct sock *sk,
|
|
void *value)
|
|
|
|
static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
|
|
struct bpf_sk_storage_elem *selem)
|
|
{
|
|
struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
|
|
struct bpf_iter__bpf_sk_storage_map ctx = {};
|
|
struct bpf_sk_storage *sk_storage;
|
|
struct bpf_iter_meta meta;
|
|
struct bpf_prog *prog;
|
|
int ret = 0;
|
|
|
|
meta.seq = seq;
|
|
prog = bpf_iter_get_info(&meta, selem == NULL);
|
|
if (prog) {
|
|
ctx.meta = &meta;
|
|
ctx.map = info->map;
|
|
if (selem) {
|
|
sk_storage = rcu_dereference_raw(selem->sk_storage);
|
|
ctx.sk = sk_storage->sk;
|
|
ctx.value = SDATA(selem)->data;
|
|
}
|
|
ret = bpf_iter_run_prog(prog, &ctx);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
|
|
{
|
|
return __bpf_sk_storage_map_seq_show(seq, v);
|
|
}
|
|
|
|
static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
|
|
{
|
|
struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
|
|
struct bpf_sk_storage_map *smap;
|
|
struct bucket *b;
|
|
|
|
if (!v) {
|
|
(void)__bpf_sk_storage_map_seq_show(seq, v);
|
|
} else {
|
|
smap = (struct bpf_sk_storage_map *)info->map;
|
|
b = &smap->buckets[info->bucket_id];
|
|
raw_spin_unlock_bh(&b->lock);
|
|
}
|
|
}
|
|
|
|
static int bpf_iter_init_sk_storage_map(void *priv_data,
|
|
struct bpf_iter_aux_info *aux)
|
|
{
|
|
struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
|
|
|
|
seq_info->map = aux->map;
|
|
return 0;
|
|
}
|
|
|
|
static int bpf_iter_attach_map(struct bpf_prog *prog,
|
|
union bpf_iter_link_info *linfo,
|
|
struct bpf_iter_aux_info *aux)
|
|
{
|
|
struct bpf_map *map;
|
|
int err = -EINVAL;
|
|
|
|
if (!linfo->map.map_fd)
|
|
return -EBADF;
|
|
|
|
map = bpf_map_get_with_uref(linfo->map.map_fd);
|
|
if (IS_ERR(map))
|
|
return PTR_ERR(map);
|
|
|
|
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
|
|
goto put_map;
|
|
|
|
if (prog->aux->max_rdonly_access > map->value_size) {
|
|
err = -EACCES;
|
|
goto put_map;
|
|
}
|
|
|
|
aux->map = map;
|
|
return 0;
|
|
|
|
put_map:
|
|
bpf_map_put_with_uref(map);
|
|
return err;
|
|
}
|
|
|
|
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
|
|
{
|
|
bpf_map_put_with_uref(aux->map);
|
|
}
|
|
|
|
static const struct seq_operations bpf_sk_storage_map_seq_ops = {
|
|
.start = bpf_sk_storage_map_seq_start,
|
|
.next = bpf_sk_storage_map_seq_next,
|
|
.stop = bpf_sk_storage_map_seq_stop,
|
|
.show = bpf_sk_storage_map_seq_show,
|
|
};
|
|
|
|
static const struct bpf_iter_seq_info iter_seq_info = {
|
|
.seq_ops = &bpf_sk_storage_map_seq_ops,
|
|
.init_seq_private = bpf_iter_init_sk_storage_map,
|
|
.fini_seq_private = NULL,
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
|
|
};
|
|
|
|
static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
|
|
.target = "bpf_sk_storage_map",
|
|
.attach_target = bpf_iter_attach_map,
|
|
.detach_target = bpf_iter_detach_map,
|
|
.ctx_arg_info_size = 2,
|
|
.ctx_arg_info = {
|
|
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
{ offsetof(struct bpf_iter__bpf_sk_storage_map, value),
|
|
PTR_TO_RDWR_BUF_OR_NULL },
|
|
},
|
|
.seq_info = &iter_seq_info,
|
|
};
|
|
|
|
static int __init bpf_sk_storage_map_iter_init(void)
|
|
{
|
|
bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
|
|
btf_sock_ids[BTF_SOCK_TYPE_SOCK];
|
|
return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
|
|
}
|
|
late_initcall(bpf_sk_storage_map_iter_init);
|