2020-08-26 02:29:16 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2019 Facebook */
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/hash.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/btf_ids.h>
|
|
|
|
#include <linux/bpf_local_storage.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <uapi/linux/sock_diag.h>
|
|
|
|
#include <uapi/linux/btf.h>
|
2021-12-24 23:29:15 +08:00
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/rcupdate_trace.h>
|
|
|
|
#include <linux/rcupdate_wait.h>
|
2020-08-26 02:29:16 +08:00
|
|
|
|
|
|
|
#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
|
|
|
|
|
|
|
|
static struct bpf_local_storage_map_bucket *
|
|
|
|
select_bucket(struct bpf_local_storage_map *smap,
|
|
|
|
struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
|
|
|
|
{
|
|
|
|
struct bpf_map *map = &smap->map;
|
|
|
|
|
|
|
|
if (!map->ops->map_local_storage_charge)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return map->ops->map_local_storage_charge(smap, owner, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
|
|
|
|
u32 size)
|
|
|
|
{
|
|
|
|
struct bpf_map *map = &smap->map;
|
|
|
|
|
|
|
|
if (map->ops->map_local_storage_uncharge)
|
|
|
|
map->ops->map_local_storage_uncharge(smap, owner, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bpf_local_storage __rcu **
|
|
|
|
owner_storage(struct bpf_local_storage_map *smap, void *owner)
|
|
|
|
{
|
|
|
|
struct bpf_map *map = &smap->map;
|
|
|
|
|
|
|
|
return map->ops->map_owner_storage_ptr(owner);
|
|
|
|
}
|
|
|
|
|
2023-02-22 04:06:42 +08:00
|
|
|
static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
return !hlist_unhashed_lockless(&selem->snode);
|
|
|
|
}
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
return !hlist_unhashed(&selem->snode);
|
|
|
|
}
|
|
|
|
|
2023-02-22 04:06:42 +08:00
|
|
|
static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
return !hlist_unhashed_lockless(&selem->map_node);
|
|
|
|
}
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
return !hlist_unhashed(&selem->map_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_local_storage_elem *
|
|
|
|
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
|
2022-03-18 12:55:52 +08:00
|
|
|
void *value, bool charge_mem, gfp_t gfp_flags)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage_elem *selem;
|
|
|
|
|
|
|
|
if (charge_mem && mem_charge(smap, owner, smap->elem_size))
|
|
|
|
return NULL;
|
|
|
|
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
if (smap->bpf_ma) {
|
|
|
|
migrate_disable();
|
|
|
|
selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
|
|
|
|
migrate_enable();
|
|
|
|
if (selem)
|
|
|
|
/* Keep the original bpf_map_kzalloc behavior
|
|
|
|
* before started using the bpf_mem_cache_alloc.
|
|
|
|
*
|
|
|
|
* No need to use zero_map_value. The bpf_selem_free()
|
|
|
|
* only does bpf_mem_cache_free when there is
|
|
|
|
* no other bpf prog is using the selem.
|
|
|
|
*/
|
|
|
|
memset(SDATA(selem)->data, 0, smap->map.value_size);
|
|
|
|
} else {
|
|
|
|
selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
|
|
|
|
gfp_flags | __GFP_NOWARN);
|
|
|
|
}
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
if (selem) {
|
|
|
|
if (value)
|
2022-11-14 21:47:19 +08:00
|
|
|
copy_map_value(&smap->map, SDATA(selem)->data, value);
|
2023-02-25 23:40:09 +08:00
|
|
|
/* No need to call check_and_init_map_value as memory is zero init */
|
2020-08-26 02:29:16 +08:00
|
|
|
return selem;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (charge_mem)
|
|
|
|
mem_uncharge(smap, owner, smap->elem_size);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
/* rcu tasks trace callback for bpf_ma == false */
|
|
|
|
static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_local_storage *local_storage;
|
|
|
|
|
|
|
|
/* If RCU Tasks Trace grace period implies RCU grace period, do
|
|
|
|
* kfree(), else do kfree_rcu().
|
|
|
|
*/
|
|
|
|
local_storage = container_of(rcu, struct bpf_local_storage, rcu);
|
|
|
|
if (rcu_trace_implies_rcu_gp())
|
|
|
|
kfree(local_storage);
|
|
|
|
else
|
|
|
|
kfree_rcu(local_storage, rcu);
|
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:20 +08:00
|
|
|
static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
|
2021-12-24 23:29:15 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage *local_storage;
|
|
|
|
|
2023-03-08 14:59:29 +08:00
|
|
|
local_storage = container_of(rcu, struct bpf_local_storage, rcu);
|
2023-03-23 05:52:44 +08:00
|
|
|
bpf_mem_cache_raw_free(local_storage);
|
2023-03-08 14:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
2022-10-14 19:39:45 +08:00
|
|
|
if (rcu_trace_implies_rcu_gp())
|
2023-03-08 14:59:29 +08:00
|
|
|
bpf_local_storage_free_rcu(rcu);
|
2022-10-14 19:39:45 +08:00
|
|
|
else
|
2023-03-08 14:59:29 +08:00
|
|
|
call_rcu(rcu, bpf_local_storage_free_rcu);
|
2021-12-24 23:29:15 +08:00
|
|
|
}
|
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
/* Handle bpf_ma == false */
|
|
|
|
static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
|
|
|
|
bool vanilla_rcu)
|
|
|
|
{
|
|
|
|
if (vanilla_rcu)
|
|
|
|
kfree_rcu(local_storage, rcu);
|
|
|
|
else
|
|
|
|
call_rcu_tasks_trace(&local_storage->rcu,
|
|
|
|
__bpf_local_storage_free_trace_rcu);
|
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:30 +08:00
|
|
|
static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
|
2023-03-23 05:52:44 +08:00
|
|
|
struct bpf_local_storage_map *smap,
|
|
|
|
bool bpf_ma, bool reuse_now)
|
2023-03-08 14:59:30 +08:00
|
|
|
{
|
2023-04-13 01:12:52 +08:00
|
|
|
if (!local_storage)
|
|
|
|
return;
|
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
if (!bpf_ma) {
|
|
|
|
__bpf_local_storage_free(local_storage, reuse_now);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!reuse_now) {
|
2023-03-08 14:59:30 +08:00
|
|
|
call_rcu_tasks_trace(&local_storage->rcu,
|
|
|
|
bpf_local_storage_free_trace_rcu);
|
2023-03-23 05:52:44 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (smap) {
|
|
|
|
migrate_disable();
|
|
|
|
bpf_mem_cache_free(&smap->storage_ma, local_storage);
|
|
|
|
migrate_enable();
|
|
|
|
} else {
|
|
|
|
/* smap could be NULL if the selem that triggered
|
|
|
|
* this 'local_storage' creation had been long gone.
|
|
|
|
* In this case, directly do call_rcu().
|
|
|
|
*/
|
2023-03-08 14:59:30 +08:00
|
|
|
call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
|
2023-03-23 05:52:44 +08:00
|
|
|
}
|
2023-03-08 14:59:30 +08:00
|
|
|
}
|
|
|
|
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
/* rcu tasks trace callback for bpf_ma == false */
|
|
|
|
static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_local_storage_elem *selem;
|
|
|
|
|
|
|
|
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
|
|
|
|
if (rcu_trace_implies_rcu_gp())
|
|
|
|
kfree(selem);
|
|
|
|
else
|
|
|
|
kfree_rcu(selem, rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle bpf_ma == false */
|
|
|
|
static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
|
|
|
|
bool vanilla_rcu)
|
|
|
|
{
|
|
|
|
if (vanilla_rcu)
|
|
|
|
kfree_rcu(selem, rcu);
|
|
|
|
else
|
|
|
|
call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
|
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:27 +08:00
|
|
|
static void bpf_selem_free_rcu(struct rcu_head *rcu)
|
2023-03-03 22:15:42 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage_elem *selem;
|
|
|
|
|
|
|
|
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
bpf_mem_cache_raw_free(selem);
|
2023-03-08 14:59:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
2023-03-03 22:15:42 +08:00
|
|
|
if (rcu_trace_implies_rcu_gp())
|
2023-03-08 14:59:27 +08:00
|
|
|
bpf_selem_free_rcu(rcu);
|
2022-10-14 19:39:45 +08:00
|
|
|
else
|
2023-03-08 14:59:27 +08:00
|
|
|
call_rcu(rcu, bpf_selem_free_rcu);
|
2021-12-24 23:29:15 +08:00
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:28 +08:00
|
|
|
void bpf_selem_free(struct bpf_local_storage_elem *selem,
|
|
|
|
struct bpf_local_storage_map *smap,
|
|
|
|
bool reuse_now)
|
|
|
|
{
|
|
|
|
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
|
|
|
|
if (!smap->bpf_ma) {
|
|
|
|
__bpf_selem_free(selem, reuse_now);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!reuse_now) {
|
2023-03-08 14:59:28 +08:00
|
|
|
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
} else {
|
|
|
|
/* Instead of using the vanilla call_rcu(),
|
|
|
|
* bpf_mem_cache_free will be able to reuse selem
|
|
|
|
* immediately.
|
|
|
|
*/
|
|
|
|
migrate_disable();
|
|
|
|
bpf_mem_cache_free(&smap->selem_ma, selem);
|
|
|
|
migrate_enable();
|
|
|
|
}
|
2023-03-08 14:59:28 +08:00
|
|
|
}
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
/* local_storage->lock must be held and selem->local_storage == local_storage.
|
|
|
|
* The caller must ensure selem->smap is still valid to be
|
|
|
|
* dereferenced for its smap->elem_size and smap->cache_idx.
|
|
|
|
*/
|
2022-10-26 12:28:45 +08:00
|
|
|
static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
|
|
|
|
struct bpf_local_storage_elem *selem,
|
2023-03-08 14:59:25 +08:00
|
|
|
bool uncharge_mem, bool reuse_now)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage_map *smap;
|
|
|
|
bool free_local_storage;
|
|
|
|
void *owner;
|
|
|
|
|
2021-12-24 23:29:15 +08:00
|
|
|
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
|
2020-08-26 02:29:16 +08:00
|
|
|
owner = local_storage->owner;
|
|
|
|
|
|
|
|
/* All uncharging on the owner must be done first.
|
|
|
|
* The owner may be freed once the last selem is unlinked
|
|
|
|
* from local_storage.
|
|
|
|
*/
|
|
|
|
if (uncharge_mem)
|
|
|
|
mem_uncharge(smap, owner, smap->elem_size);
|
|
|
|
|
|
|
|
free_local_storage = hlist_is_singular_node(&selem->snode,
|
|
|
|
&local_storage->list);
|
|
|
|
if (free_local_storage) {
|
|
|
|
mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
|
|
|
|
local_storage->owner = NULL;
|
|
|
|
|
|
|
|
/* After this RCU_INIT, owner may be freed and cannot be used */
|
|
|
|
RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
|
|
|
|
|
|
|
|
/* local_storage is not freed now. local_storage->lock is
|
|
|
|
* still held and raw_spin_unlock_bh(&local_storage->lock)
|
|
|
|
* will be done by the caller.
|
|
|
|
*
|
|
|
|
* Although the unlock will be done under
|
2022-02-21 02:40:55 +08:00
|
|
|
* rcu_read_lock(), it is more intuitive to
|
2021-12-24 23:29:15 +08:00
|
|
|
* read if the freeing of the storage is done
|
2020-08-26 02:29:16 +08:00
|
|
|
* after the raw_spin_unlock_bh(&local_storage->lock).
|
|
|
|
*
|
|
|
|
* Hence, a "bool free_local_storage" is returned
|
2021-12-24 23:29:15 +08:00
|
|
|
* to the caller which then calls then frees the storage after
|
|
|
|
* all the RCU grace periods have expired.
|
2020-08-26 02:29:16 +08:00
|
|
|
*/
|
|
|
|
}
|
|
|
|
hlist_del_init_rcu(&selem->snode);
|
|
|
|
if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
|
|
|
|
SDATA(selem))
|
|
|
|
RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
|
|
|
|
|
2023-03-08 14:59:28 +08:00
|
|
|
bpf_selem_free(selem, smap, reuse_now);
|
2022-04-18 23:51:58 +08:00
|
|
|
|
2023-03-08 14:59:24 +08:00
|
|
|
if (rcu_access_pointer(local_storage->smap) == smap)
|
|
|
|
RCU_INIT_POINTER(local_storage->smap, NULL);
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
return free_local_storage;
|
|
|
|
}
|
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
|
|
|
|
struct bpf_local_storage_map *storage_smap,
|
|
|
|
struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct bpf_local_storage_map *selem_smap;
|
|
|
|
|
|
|
|
/* local_storage->smap may be NULL. If it is, get the bpf_ma
|
|
|
|
* from any selem in the local_storage->list. The bpf_ma of all
|
|
|
|
* local_storage and selem should have the same value
|
|
|
|
* for the same map type.
|
|
|
|
*
|
|
|
|
* If the local_storage->list is already empty, the caller will not
|
|
|
|
* care about the bpf_ma value also because the caller is not
|
|
|
|
* responsibile to free the local_storage.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (storage_smap)
|
|
|
|
return storage_smap->bpf_ma;
|
|
|
|
|
|
|
|
if (!selem) {
|
|
|
|
struct hlist_node *n;
|
|
|
|
|
|
|
|
n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
|
|
|
|
bpf_rcu_lock_held());
|
|
|
|
if (!n)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
|
|
|
|
}
|
|
|
|
selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
|
|
|
|
|
|
|
|
return selem_smap->bpf_ma;
|
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:23 +08:00
|
|
|
static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
|
2023-03-08 14:59:25 +08:00
|
|
|
bool reuse_now)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
2023-03-23 05:52:44 +08:00
|
|
|
struct bpf_local_storage_map *storage_smap;
|
2020-08-26 02:29:16 +08:00
|
|
|
struct bpf_local_storage *local_storage;
|
2023-03-23 05:52:44 +08:00
|
|
|
bool bpf_ma, free_local_storage = false;
|
2021-02-26 07:43:14 +08:00
|
|
|
unsigned long flags;
|
2020-08-26 02:29:16 +08:00
|
|
|
|
2023-02-22 04:06:42 +08:00
|
|
|
if (unlikely(!selem_linked_to_storage_lockless(selem)))
|
2020-08-26 02:29:16 +08:00
|
|
|
/* selem has already been unlinked from sk */
|
|
|
|
return;
|
|
|
|
|
2021-12-24 23:29:15 +08:00
|
|
|
local_storage = rcu_dereference_check(selem->local_storage,
|
|
|
|
bpf_rcu_lock_held());
|
2023-03-23 05:52:44 +08:00
|
|
|
storage_smap = rcu_dereference_check(local_storage->smap,
|
|
|
|
bpf_rcu_lock_held());
|
|
|
|
bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
|
|
|
|
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
if (likely(selem_linked_to_storage(selem)))
|
|
|
|
free_local_storage = bpf_selem_unlink_storage_nolock(
|
2023-03-08 14:59:25 +08:00
|
|
|
local_storage, selem, true, reuse_now);
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
|
2023-03-08 14:59:30 +08:00
|
|
|
if (free_local_storage)
|
2023-03-23 05:52:44 +08:00
|
|
|
bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
|
|
|
|
struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
RCU_INIT_POINTER(selem->local_storage, local_storage);
|
2020-09-17 04:44:53 +08:00
|
|
|
hlist_add_head_rcu(&selem->snode, &local_storage->list);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:20 +08:00
|
|
|
static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage_map *smap;
|
|
|
|
struct bpf_local_storage_map_bucket *b;
|
2021-02-26 07:43:14 +08:00
|
|
|
unsigned long flags;
|
2020-08-26 02:29:16 +08:00
|
|
|
|
2023-02-22 04:06:42 +08:00
|
|
|
if (unlikely(!selem_linked_to_map_lockless(selem)))
|
2020-08-26 02:29:16 +08:00
|
|
|
/* selem has already be unlinked from smap */
|
|
|
|
return;
|
|
|
|
|
2021-12-24 23:29:15 +08:00
|
|
|
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
|
2020-08-26 02:29:16 +08:00
|
|
|
b = select_bucket(smap, selem);
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_lock_irqsave(&b->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
if (likely(selem_linked_to_map(selem)))
|
|
|
|
hlist_del_init_rcu(&selem->map_node);
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_unlock_irqrestore(&b->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
|
|
|
|
struct bpf_local_storage_elem *selem)
|
|
|
|
{
|
|
|
|
struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
|
2021-02-26 07:43:14 +08:00
|
|
|
unsigned long flags;
|
2020-08-26 02:29:16 +08:00
|
|
|
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_lock_irqsave(&b->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
RCU_INIT_POINTER(SDATA(selem)->smap, smap);
|
|
|
|
hlist_add_head_rcu(&selem->map_node, &b->list);
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_unlock_irqrestore(&b->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:25 +08:00
|
|
|
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
/* Always unlink from map before unlinking from local_storage
|
|
|
|
* because selem will be freed after successfully unlinked from
|
|
|
|
* the local_storage.
|
|
|
|
*/
|
|
|
|
bpf_selem_unlink_map(selem);
|
2023-03-08 14:59:25 +08:00
|
|
|
bpf_selem_unlink_storage(selem, reuse_now);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
2022-10-26 02:45:19 +08:00
|
|
|
/* If cacheit_lockit is false, this lookup function is lockless */
|
2020-08-26 02:29:16 +08:00
|
|
|
struct bpf_local_storage_data *
|
|
|
|
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
|
|
|
|
struct bpf_local_storage_map *smap,
|
|
|
|
bool cacheit_lockit)
|
|
|
|
{
|
|
|
|
struct bpf_local_storage_data *sdata;
|
|
|
|
struct bpf_local_storage_elem *selem;
|
|
|
|
|
|
|
|
/* Fast path (cache hit) */
|
2021-12-24 23:29:15 +08:00
|
|
|
sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
|
|
|
|
bpf_rcu_lock_held());
|
2020-08-26 02:29:16 +08:00
|
|
|
if (sdata && rcu_access_pointer(sdata->smap) == smap)
|
|
|
|
return sdata;
|
|
|
|
|
|
|
|
/* Slow path (cache miss) */
|
2021-12-24 23:29:15 +08:00
|
|
|
hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
|
|
|
|
rcu_read_lock_trace_held())
|
2020-08-26 02:29:16 +08:00
|
|
|
if (rcu_access_pointer(SDATA(selem)->smap) == smap)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!selem)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sdata = SDATA(selem);
|
|
|
|
if (cacheit_lockit) {
|
2021-02-26 07:43:14 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
/* spinlock is needed to avoid racing with the
|
|
|
|
* parallel delete. Otherwise, publishing an already
|
|
|
|
* deleted sdata to the cache will become a use-after-free
|
|
|
|
* problem in the next bpf_local_storage_lookup().
|
|
|
|
*/
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
if (selem_linked_to_storage(selem))
|
|
|
|
rcu_assign_pointer(local_storage->cache[smap->cache_idx],
|
|
|
|
sdata);
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return sdata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_flags(const struct bpf_local_storage_data *old_sdata,
|
|
|
|
u64 map_flags)
|
|
|
|
{
|
|
|
|
if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
|
|
|
|
/* elem already exists */
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
|
|
|
|
/* elem doesn't exist, cannot update it */
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bpf_local_storage_alloc(void *owner,
|
|
|
|
struct bpf_local_storage_map *smap,
|
2022-03-18 12:55:52 +08:00
|
|
|
struct bpf_local_storage_elem *first_selem,
|
|
|
|
gfp_t gfp_flags)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage *prev_storage, *storage;
|
|
|
|
struct bpf_local_storage **owner_storage_ptr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mem_charge(smap, owner, sizeof(*storage));
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
if (smap->bpf_ma) {
|
|
|
|
migrate_disable();
|
|
|
|
storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
|
|
|
|
migrate_enable();
|
|
|
|
} else {
|
|
|
|
storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
|
|
|
|
gfp_flags | __GFP_NOWARN);
|
|
|
|
}
|
|
|
|
|
2020-08-26 02:29:16 +08:00
|
|
|
if (!storage) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto uncharge;
|
|
|
|
}
|
|
|
|
|
2023-03-08 14:59:24 +08:00
|
|
|
RCU_INIT_POINTER(storage->smap, smap);
|
2020-08-26 02:29:16 +08:00
|
|
|
INIT_HLIST_HEAD(&storage->list);
|
|
|
|
raw_spin_lock_init(&storage->lock);
|
|
|
|
storage->owner = owner;
|
|
|
|
|
|
|
|
bpf_selem_link_storage_nolock(storage, first_selem);
|
|
|
|
bpf_selem_link_map(smap, first_selem);
|
|
|
|
|
|
|
|
owner_storage_ptr =
|
|
|
|
(struct bpf_local_storage **)owner_storage(smap, owner);
|
|
|
|
/* Publish storage to the owner.
|
|
|
|
* Instead of using any lock of the kernel object (i.e. owner),
|
|
|
|
* cmpxchg will work with any kernel object regardless what
|
|
|
|
* the running context is, bh, irq...etc.
|
|
|
|
*
|
|
|
|
* From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
|
|
|
|
* is protected by the storage->lock. Hence, when freeing
|
|
|
|
* the owner->storage, the storage->lock must be held before
|
|
|
|
* setting owner->storage ptr to NULL.
|
|
|
|
*/
|
|
|
|
prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
|
|
|
|
if (unlikely(prev_storage)) {
|
|
|
|
bpf_selem_unlink_map(first_selem);
|
|
|
|
err = -EAGAIN;
|
|
|
|
goto uncharge;
|
|
|
|
|
|
|
|
/* Note that even first_selem was linked to smap's
|
|
|
|
* bucket->list, first_selem can be freed immediately
|
|
|
|
* (instead of kfree_rcu) because
|
|
|
|
* bpf_local_storage_map_free() does a
|
2021-12-24 23:29:15 +08:00
|
|
|
* synchronize_rcu_mult (waiting for both sleepable and
|
|
|
|
* normal programs) before walking the bucket->list.
|
2020-08-26 02:29:16 +08:00
|
|
|
* Hence, no one is accessing selem from the
|
|
|
|
* bucket->list under rcu_read_lock().
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
uncharge:
|
2023-03-23 05:52:44 +08:00
|
|
|
bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
|
2020-08-26 02:29:16 +08:00
|
|
|
mem_uncharge(smap, owner, sizeof(*storage));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sk cannot be going away because it is linking new elem
|
|
|
|
* to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
|
|
|
|
* Otherwise, it will become a leak (and other memory issues
|
|
|
|
* during map destruction).
|
|
|
|
*/
|
|
|
|
struct bpf_local_storage_data *
|
|
|
|
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
2022-03-18 12:55:52 +08:00
|
|
|
void *value, u64 map_flags, gfp_t gfp_flags)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage_data *old_sdata = NULL;
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
|
2020-08-26 02:29:16 +08:00
|
|
|
struct bpf_local_storage *local_storage;
|
2021-02-26 07:43:14 +08:00
|
|
|
unsigned long flags;
|
2020-08-26 02:29:16 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* BPF_EXIST and BPF_NOEXIST cannot be both set */
|
|
|
|
if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
|
|
|
|
/* BPF_F_LOCK can only be used in a value with spin_lock */
|
|
|
|
unlikely((map_flags & BPF_F_LOCK) &&
|
2022-11-04 03:09:56 +08:00
|
|
|
!btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
|
2020-08-26 02:29:16 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2022-03-18 12:55:52 +08:00
|
|
|
if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2021-12-24 23:29:15 +08:00
|
|
|
local_storage = rcu_dereference_check(*owner_storage(smap, owner),
|
|
|
|
bpf_rcu_lock_held());
|
2020-08-26 02:29:16 +08:00
|
|
|
if (!local_storage || hlist_empty(&local_storage->list)) {
|
|
|
|
/* Very first elem for the owner */
|
|
|
|
err = check_flags(NULL, map_flags);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
2022-03-18 12:55:52 +08:00
|
|
|
selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
if (!selem)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2022-03-18 12:55:52 +08:00
|
|
|
err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
if (err) {
|
2023-03-08 14:59:28 +08:00
|
|
|
bpf_selem_free(selem, smap, true);
|
2020-08-26 02:29:16 +08:00
|
|
|
mem_uncharge(smap, owner, smap->elem_size);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDATA(selem);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
|
|
|
|
/* Hoping to find an old_sdata to do inline update
|
|
|
|
* such that it can avoid taking the local_storage->lock
|
|
|
|
* and changing the lists.
|
|
|
|
*/
|
|
|
|
old_sdata =
|
|
|
|
bpf_local_storage_lookup(local_storage, smap, false);
|
|
|
|
err = check_flags(old_sdata, map_flags);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
2023-02-22 04:06:42 +08:00
|
|
|
if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
|
2020-08-26 02:29:16 +08:00
|
|
|
copy_map_value_locked(&smap->map, old_sdata->data,
|
|
|
|
value, false);
|
|
|
|
return old_sdata;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
/* A lookup has just been done before and concluded a new selem is
|
|
|
|
* needed. The chance of an unnecessary alloc is unlikely.
|
|
|
|
*/
|
|
|
|
alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
|
|
|
|
if (!alloc_selem)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2022-03-18 12:55:52 +08:00
|
|
|
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
2020-08-26 02:29:16 +08:00
|
|
|
|
|
|
|
/* Recheck local_storage->list under local_storage->lock */
|
|
|
|
if (unlikely(hlist_empty(&local_storage->list))) {
|
|
|
|
/* A parallel del is happening and local_storage is going
|
|
|
|
* away. It has just been checked before, so very
|
|
|
|
* unlikely. Return instead of retry to keep things
|
|
|
|
* simple.
|
|
|
|
*/
|
|
|
|
err = -EAGAIN;
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
goto unlock;
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
|
|
|
|
err = check_flags(old_sdata, map_flags);
|
|
|
|
if (err)
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
goto unlock;
|
2020-08-26 02:29:16 +08:00
|
|
|
|
|
|
|
if (old_sdata && (map_flags & BPF_F_LOCK)) {
|
|
|
|
copy_map_value_locked(&smap->map, old_sdata->data, value,
|
|
|
|
false);
|
|
|
|
selem = SELEM(old_sdata);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
alloc_selem = NULL;
|
2020-08-26 02:29:16 +08:00
|
|
|
/* First, link the new selem to the map */
|
|
|
|
bpf_selem_link_map(smap, selem);
|
|
|
|
|
|
|
|
/* Second, link (and publish) the new selem to local_storage */
|
|
|
|
bpf_selem_link_storage_nolock(local_storage, selem);
|
|
|
|
|
|
|
|
/* Third, remove old selem, SELEM(old_sdata) */
|
|
|
|
if (old_sdata) {
|
|
|
|
bpf_selem_unlink_map(SELEM(old_sdata));
|
|
|
|
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
true, false);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
2021-02-26 07:43:14 +08:00
|
|
|
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
if (alloc_selem) {
|
2022-03-18 12:55:52 +08:00
|
|
|
mem_uncharge(smap, owner, smap->elem_size);
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
bpf_selem_free(alloc_selem, smap, true);
|
2022-03-18 12:55:52 +08:00
|
|
|
}
|
bpf: bpf_sk_storage: Fix invalid wait context lockdep report
'./test_progs -t test_local_storage' reported a splat:
[ 27.137569] =============================
[ 27.138122] [ BUG: Invalid wait context ]
[ 27.138650] 6.5.0-03980-gd11ae1b16b0a #247 Tainted: G O
[ 27.139542] -----------------------------
[ 27.140106] test_progs/1729 is trying to lock:
[ 27.140713] ffff8883ef047b88 (stock_lock){-.-.}-{3:3}, at: local_lock_acquire+0x9/0x130
[ 27.141834] other info that might help us debug this:
[ 27.142437] context-{5:5}
[ 27.142856] 2 locks held by test_progs/1729:
[ 27.143352] #0: ffffffff84bcd9c0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x40
[ 27.144492] #1: ffff888107deb2c0 (&storage->lock){..-.}-{2:2}, at: bpf_local_storage_update+0x39e/0x8e0
[ 27.145855] stack backtrace:
[ 27.146274] CPU: 0 PID: 1729 Comm: test_progs Tainted: G O 6.5.0-03980-gd11ae1b16b0a #247
[ 27.147550] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 27.149127] Call Trace:
[ 27.149490] <TASK>
[ 27.149867] dump_stack_lvl+0x130/0x1d0
[ 27.152609] dump_stack+0x14/0x20
[ 27.153131] __lock_acquire+0x1657/0x2220
[ 27.153677] lock_acquire+0x1b8/0x510
[ 27.157908] local_lock_acquire+0x29/0x130
[ 27.159048] obj_cgroup_charge+0xf4/0x3c0
[ 27.160794] slab_pre_alloc_hook+0x28e/0x2b0
[ 27.161931] __kmem_cache_alloc_node+0x51/0x210
[ 27.163557] __kmalloc+0xaa/0x210
[ 27.164593] bpf_map_kzalloc+0xbc/0x170
[ 27.165147] bpf_selem_alloc+0x130/0x510
[ 27.166295] bpf_local_storage_update+0x5aa/0x8e0
[ 27.167042] bpf_fd_sk_storage_update_elem+0xdb/0x1a0
[ 27.169199] bpf_map_update_value+0x415/0x4f0
[ 27.169871] map_update_elem+0x413/0x550
[ 27.170330] __sys_bpf+0x5e9/0x640
[ 27.174065] __x64_sys_bpf+0x80/0x90
[ 27.174568] do_syscall_64+0x48/0xa0
[ 27.175201] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 27.175932] RIP: 0033:0x7effb40e41ad
[ 27.176357] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d8
[ 27.179028] RSP: 002b:00007ffe64c21fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000141
[ 27.180088] RAX: ffffffffffffffda RBX: 00007ffe64c22768 RCX: 00007effb40e41ad
[ 27.181082] RDX: 0000000000000020 RSI: 00007ffe64c22008 RDI: 0000000000000002
[ 27.182030] RBP: 00007ffe64c21ff0 R08: 0000000000000000 R09: 00007ffe64c22788
[ 27.183038] R10: 0000000000000064 R11: 0000000000000202 R12: 0000000000000000
[ 27.184006] R13: 00007ffe64c22788 R14: 00007effb42a1000 R15: 0000000000000000
[ 27.184958] </TASK>
It complains about acquiring a local_lock while holding a raw_spin_lock.
It means it should not allocate memory while holding a raw_spin_lock
since it is not safe for RT.
raw_spin_lock is needed because bpf_local_storage supports tracing
context. In particular for task local storage, it is easy to
get a "current" task PTR_TO_BTF_ID in tracing bpf prog.
However, task (and cgroup) local storage has already been moved to
bpf mem allocator which can be used after raw_spin_lock.
The splat is for the sk storage. For sk (and inode) storage,
it has not been moved to bpf mem allocator. Using raw_spin_lock or not,
kzalloc(GFP_ATOMIC) could theoretically be unsafe in tracing context.
However, the local storage helper requires a verifier accepted
sk pointer (PTR_TO_BTF_ID), it is hypothetical if that (mean running
a bpf prog in a kzalloc unsafe context and also able to hold a verifier
accepted sk pointer) could happen.
This patch avoids kzalloc after raw_spin_lock to silent the splat.
There is an existing kzalloc before the raw_spin_lock. At that point,
a kzalloc is very likely required because a lookup has just been done
before. Thus, this patch always does the kzalloc before acquiring
the raw_spin_lock and remove the later kzalloc usage after the
raw_spin_lock. After this change, it will have a charge and then
uncharge during the syscall bpf_map_update_elem() code path.
This patch opts for simplicity and not continue the old
optimization to save one charge and uncharge.
This issue is dated back to the very first commit of bpf_sk_storage
which had been refactored multiple times to create task, inode, and
cgroup storage. This patch uses a Fixes tag with a more recent
commit that should be easier to do backport.
Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage")
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230901231129.578493-2-martin.lau@linux.dev
2023-09-02 07:11:27 +08:00
|
|
|
return err ? ERR_PTR(err) : SDATA(selem);
|
2020-08-26 02:29:16 +08:00
|
|
|
}
|
|
|
|
|
2022-10-26 12:28:45 +08:00
|
|
|
static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
u64 min_usage = U64_MAX;
|
|
|
|
u16 i, res = 0;
|
|
|
|
|
|
|
|
spin_lock(&cache->idx_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
|
|
|
|
if (cache->idx_usage_counts[i] < min_usage) {
|
|
|
|
min_usage = cache->idx_usage_counts[i];
|
|
|
|
res = i;
|
|
|
|
|
|
|
|
/* Found a free cache_idx */
|
|
|
|
if (!min_usage)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cache->idx_usage_counts[res]++;
|
|
|
|
|
|
|
|
spin_unlock(&cache->idx_lock);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2022-10-26 12:28:45 +08:00
|
|
|
static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
|
|
|
|
u16 idx)
|
2020-08-26 02:29:16 +08:00
|
|
|
{
|
|
|
|
spin_lock(&cache->idx_lock);
|
|
|
|
cache->idx_usage_counts[idx]--;
|
|
|
|
spin_unlock(&cache->idx_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
|
|
|
|
!(attr->map_flags & BPF_F_NO_PREALLOC) ||
|
|
|
|
attr->max_entries ||
|
|
|
|
attr->key_size != sizeof(int) || !attr->value_size ||
|
|
|
|
/* Enforce BTF for userspace sk dumping */
|
|
|
|
!attr->btf_key_type_id || !attr->btf_value_type_id)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
|
|
|
|
const struct btf *btf,
|
|
|
|
const struct btf_type *key_type,
|
|
|
|
const struct btf_type *value_type)
|
|
|
|
{
|
|
|
|
u32 int_data;
|
|
|
|
|
|
|
|
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
int_data = *(u32 *)(key_type + 1);
|
|
|
|
if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2022-10-26 12:28:45 +08:00
|
|
|
|
2023-03-08 14:59:21 +08:00
|
|
|
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
|
2022-10-26 12:28:45 +08:00
|
|
|
{
|
2023-03-23 05:52:44 +08:00
|
|
|
struct bpf_local_storage_map *storage_smap;
|
2022-10-26 12:28:45 +08:00
|
|
|
struct bpf_local_storage_elem *selem;
|
2023-03-23 05:52:44 +08:00
|
|
|
bool bpf_ma, free_storage = false;
|
2022-10-26 12:28:45 +08:00
|
|
|
struct hlist_node *n;
|
2023-03-08 14:59:21 +08:00
|
|
|
unsigned long flags;
|
2022-10-26 12:28:45 +08:00
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
|
|
|
|
bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
|
|
|
|
|
2022-10-26 12:28:45 +08:00
|
|
|
/* Neither the bpf_prog nor the bpf_map's syscall
|
|
|
|
* could be modifying the local_storage->list now.
|
|
|
|
* Thus, no elem can be added to or deleted from the
|
|
|
|
* local_storage->list by the bpf_prog or by the bpf_map's syscall.
|
|
|
|
*
|
|
|
|
* It is racing with bpf_local_storage_map_free() alone
|
|
|
|
* when unlinking elem from the local_storage->list and
|
|
|
|
* the map's bucket->list.
|
|
|
|
*/
|
2023-03-08 14:59:21 +08:00
|
|
|
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
2022-10-26 12:28:45 +08:00
|
|
|
hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
|
|
|
|
/* Always unlink from map before unlinking from
|
|
|
|
* local_storage.
|
|
|
|
*/
|
|
|
|
bpf_selem_unlink_map(selem);
|
|
|
|
/* If local_storage list has only one element, the
|
|
|
|
* bpf_selem_unlink_storage_nolock() will return true.
|
|
|
|
* Otherwise, it will return false. The current loop iteration
|
|
|
|
* intends to remove all local storage. So the last iteration
|
|
|
|
* of the loop will set the free_cgroup_storage to true.
|
|
|
|
*/
|
|
|
|
free_storage = bpf_selem_unlink_storage_nolock(
|
2023-09-02 07:11:28 +08:00
|
|
|
local_storage, selem, true, true);
|
2022-10-26 12:28:45 +08:00
|
|
|
}
|
2023-03-08 14:59:21 +08:00
|
|
|
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
2022-10-26 12:28:45 +08:00
|
|
|
|
2023-03-08 14:59:21 +08:00
|
|
|
if (free_storage)
|
2023-03-23 05:52:44 +08:00
|
|
|
bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
|
2022-10-26 12:28:45 +08:00
|
|
|
}
|
|
|
|
|
2023-03-05 20:46:11 +08:00
|
|
|
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
|
|
|
|
{
|
|
|
|
struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
|
|
|
|
u64 usage = sizeof(*smap);
|
|
|
|
|
|
|
|
/* The dynamically callocated selems are not counted currently. */
|
|
|
|
usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
|
|
|
|
return usage;
|
|
|
|
}
|
|
|
|
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
|
|
|
|
* A deadlock free allocator is useful for storage that the bpf prog can easily
|
|
|
|
* get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
|
|
|
|
* The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
|
|
|
|
* memory immediately. To be reuse-immediate safe, the owner destruction
|
|
|
|
* code path needs to go through a rcu grace period before calling
|
|
|
|
* bpf_local_storage_destroy().
|
|
|
|
*
|
|
|
|
* When bpf_ma == false, the kmalloc and kfree are used.
|
|
|
|
*/
|
2022-10-26 12:28:45 +08:00
|
|
|
struct bpf_map *
|
|
|
|
bpf_local_storage_map_alloc(union bpf_attr *attr,
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
struct bpf_local_storage_cache *cache,
|
|
|
|
bool bpf_ma)
|
2022-10-26 12:28:45 +08:00
|
|
|
{
|
|
|
|
struct bpf_local_storage_map *smap;
|
2023-03-08 14:59:22 +08:00
|
|
|
unsigned int i;
|
|
|
|
u32 nbuckets;
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
int err;
|
2023-03-08 14:59:22 +08:00
|
|
|
|
|
|
|
smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
|
|
|
|
if (!smap)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
bpf_map_init_from_attr(&smap->map, attr);
|
|
|
|
|
|
|
|
nbuckets = roundup_pow_of_two(num_possible_cpus());
|
|
|
|
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
|
|
|
|
nbuckets = max_t(u32, 2, nbuckets);
|
|
|
|
smap->bucket_log = ilog2(nbuckets);
|
2022-10-26 12:28:45 +08:00
|
|
|
|
2023-03-08 14:59:22 +08:00
|
|
|
smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
|
|
|
|
nbuckets, GFP_USER | __GFP_NOWARN);
|
|
|
|
if (!smap->buckets) {
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto free_smap;
|
2023-03-08 14:59:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nbuckets; i++) {
|
|
|
|
INIT_HLIST_HEAD(&smap->buckets[i].list);
|
|
|
|
raw_spin_lock_init(&smap->buckets[i].lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
smap->elem_size = offsetof(struct bpf_local_storage_elem,
|
|
|
|
sdata.data[attr->value_size]);
|
2022-10-26 12:28:45 +08:00
|
|
|
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
smap->bpf_ma = bpf_ma;
|
|
|
|
if (bpf_ma) {
|
|
|
|
err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
|
|
|
|
if (err)
|
|
|
|
goto free_smap;
|
2023-03-23 05:52:44 +08:00
|
|
|
|
|
|
|
err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
|
|
|
|
if (err) {
|
|
|
|
bpf_mem_alloc_destroy(&smap->selem_ma);
|
|
|
|
goto free_smap;
|
|
|
|
}
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
}
|
|
|
|
|
2022-10-26 12:28:45 +08:00
|
|
|
smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
|
|
|
|
return &smap->map;
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
|
|
|
|
free_smap:
|
|
|
|
kvfree(smap->buckets);
|
|
|
|
bpf_map_area_free(smap);
|
|
|
|
return ERR_PTR(err);
|
2022-10-26 12:28:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_local_storage_map_free(struct bpf_map *map,
|
|
|
|
struct bpf_local_storage_cache *cache,
|
|
|
|
int __percpu *busy_counter)
|
|
|
|
{
|
|
|
|
struct bpf_local_storage_map_bucket *b;
|
|
|
|
struct bpf_local_storage_elem *selem;
|
|
|
|
struct bpf_local_storage_map *smap;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
smap = (struct bpf_local_storage_map *)map;
|
|
|
|
bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
|
|
|
|
|
|
|
|
/* Note that this map might be concurrently cloned from
|
|
|
|
* bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
|
|
|
|
* RCU read section to finish before proceeding. New RCU
|
|
|
|
* read sections should be prevented via bpf_map_inc_not_zero.
|
|
|
|
*/
|
|
|
|
synchronize_rcu();
|
|
|
|
|
|
|
|
/* bpf prog and the userspace can no longer access this map
|
|
|
|
* now. No new selem (of this map) can be added
|
|
|
|
* to the owner->storage or to the map bucket's list.
|
|
|
|
*
|
|
|
|
* The elem of this map can be cleaned up here
|
|
|
|
* or when the storage is freed e.g.
|
|
|
|
* by bpf_sk_storage_free() during __sk_destruct().
|
|
|
|
*/
|
|
|
|
for (i = 0; i < (1U << smap->bucket_log); i++) {
|
|
|
|
b = &smap->buckets[i];
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
/* No one is adding to b->list now */
|
|
|
|
while ((selem = hlist_entry_safe(
|
|
|
|
rcu_dereference_raw(hlist_first_rcu(&b->list)),
|
|
|
|
struct bpf_local_storage_elem, map_node))) {
|
|
|
|
if (busy_counter) {
|
|
|
|
migrate_disable();
|
|
|
|
this_cpu_inc(*busy_counter);
|
|
|
|
}
|
2023-03-08 14:59:25 +08:00
|
|
|
bpf_selem_unlink(selem, true);
|
2022-10-26 12:28:45 +08:00
|
|
|
if (busy_counter) {
|
|
|
|
this_cpu_dec(*busy_counter);
|
|
|
|
migrate_enable();
|
|
|
|
}
|
|
|
|
cond_resched_rcu();
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* While freeing the storage we may still need to access the map.
|
|
|
|
*
|
|
|
|
* e.g. when bpf_sk_storage_free() has unlinked selem from the map
|
|
|
|
* which then made the above while((selem = ...)) loop
|
|
|
|
* exit immediately.
|
|
|
|
*
|
|
|
|
* However, while freeing the storage one still needs to access the
|
|
|
|
* smap->elem_size to do the uncharging in
|
|
|
|
* bpf_selem_unlink_storage_nolock().
|
|
|
|
*
|
|
|
|
* Hence, wait another rcu grace period for the storage to be freed.
|
|
|
|
*/
|
|
|
|
synchronize_rcu();
|
|
|
|
|
2023-03-23 05:52:44 +08:00
|
|
|
if (smap->bpf_ma) {
|
bpf: Use bpf_mem_cache_alloc/free in bpf_local_storage_elem
This patch uses bpf_mem_alloc for the task and cgroup local storage that
the bpf prog can easily get a hold of the storage owner's PTR_TO_BTF_ID.
eg. bpf_get_current_task_btf() can be used in some of the kmalloc code
path which will cause deadlock/recursion. bpf_mem_cache_alloc is
deadlock free and will solve a legit use case in [1].
For sk storage, its batch creation benchmark shows a few percent
regression when the sk create/destroy batch size is larger than 32.
The sk creation/destruction happens much more often and
depends on external traffic. Considering it is hypothetical
to be able to cause deadlock with sk storage, it can cross
the bridge to use bpf_mem_alloc till a legit (ie. useful)
use case comes up.
For inode storage, bpf_local_storage_destroy() is called before
waiting for a rcu gp and its memory cannot be reused immediately.
inode stays with kmalloc/kfree after the rcu [or tasks_trace] gp.
A 'bool bpf_ma' argument is added to bpf_local_storage_map_alloc().
Only task and cgroup storage have 'bpf_ma == true' which
means to use bpf_mem_cache_alloc/free(). This patch only changes
selem to use bpf_mem_alloc for task and cgroup. The next patch
will change the local_storage to use bpf_mem_alloc also for
task and cgroup.
Here is some more details on the changes:
* memory allocation:
After bpf_mem_cache_alloc(), the SDATA(selem)->data is zero-ed because
bpf_mem_cache_alloc() could return a reused selem. It is to keep
the existing bpf_map_kzalloc() behavior. Only SDATA(selem)->data
is zero-ed. SDATA(selem)->data is the visible part to the bpf prog.
No need to use zero_map_value() to do the zeroing because
bpf_selem_free(..., reuse_now = true) ensures no bpf prog is using
the selem before returning the selem through bpf_mem_cache_free().
For the internal fields of selem, they will be initialized when
linking to the new smap and the new local_storage.
When 'bpf_ma == false', nothing changes in this patch. It will
stay with the bpf_map_kzalloc().
* memory free:
The bpf_selem_free() and bpf_selem_free_rcu() are modified to handle
the bpf_ma == true case.
For the common selem free path where its owner is also being destroyed,
the mem is freed in bpf_local_storage_destroy(), the owner (task
and cgroup) has gone through a rcu gp. The memory can be reused
immediately, so bpf_local_storage_destroy() will call
bpf_selem_free(..., reuse_now = true) which will do
bpf_mem_cache_free() for immediate reuse consideration.
An exception is the delete elem code path. The delete elem code path
is called from the helper bpf_*_storage_delete() and the syscall
bpf_map_delete_elem(). This path is an unusual case for local
storage because the common use case is to have the local storage
staying with its owner life time so that the bpf prog and the user
space does not have to monitor the owner's destruction. For the delete
elem path, the selem cannot be reused immediately because there could
be bpf prog using it. It will call bpf_selem_free(..., reuse_now = false)
and it will wait for a rcu tasks trace gp before freeing the elem. The
rcu callback is changed to do bpf_mem_cache_raw_free() instead of kfree().
When 'bpf_ma == false', it should be the same as before.
__bpf_selem_free() is added to do the kfree_rcu and call_tasks_trace_rcu().
A few words on the 'reuse_now == true'. When 'reuse_now == true',
it is still racing with bpf_local_storage_map_free which is under rcu
protection, so it still needs to wait for a rcu gp instead of kfree().
Otherwise, the selem may be reused by slab for a totally different struct
while the bpf_local_storage_map_free() is still using it (as a
rcu reader). For the inode case, there may be other rcu readers also.
In short, when bpf_ma == false and reuse_now == true => vanilla rcu.
[1]: https://lore.kernel.org/bpf/20221118190109.1512674-1-namhyung@kernel.org/
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230322215246.1675516-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-23 05:52:43 +08:00
|
|
|
bpf_mem_alloc_destroy(&smap->selem_ma);
|
2023-03-23 05:52:44 +08:00
|
|
|
bpf_mem_alloc_destroy(&smap->storage_ma);
|
|
|
|
}
|
2022-10-26 12:28:45 +08:00
|
|
|
kvfree(smap->buckets);
|
|
|
|
bpf_map_area_free(smap);
|
|
|
|
}
|