mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
bpf: Convert bpf_prog refcnt to atomic64_t
Similarly to bpf_map's refcnt/usercnt, convert bpf_prog's refcnt to atomic64 and remove artificial 32k limit. This allows to make bpf_prog's refcounting non-failing, simplifying logic of users of bpf_prog_add/bpf_prog_inc. Validated compilation by running allyesconfig kernel build. Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20191117172806.2195367-3-andriin@fb.com
This commit is contained in:
parent
1e0bd5a091
commit
85192dbf4d
@ -3171,13 +3171,8 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
|
|||||||
bnxt_init_rxbd_pages(ring, type);
|
bnxt_init_rxbd_pages(ring, type);
|
||||||
|
|
||||||
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
|
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
|
||||||
rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
|
bpf_prog_add(bp->xdp_prog, 1);
|
||||||
if (IS_ERR(rxr->xdp_prog)) {
|
rxr->xdp_prog = bp->xdp_prog;
|
||||||
int rc = PTR_ERR(rxr->xdp_prog);
|
|
||||||
|
|
||||||
rxr->xdp_prog = NULL;
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
prod = rxr->rx_prod;
|
prod = rxr->rx_prod;
|
||||||
for (i = 0; i < bp->rx_ring_size; i++) {
|
for (i = 0; i < bp->rx_ring_size; i++) {
|
||||||
|
@ -1876,13 +1876,8 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
|||||||
|
|
||||||
if (nic->xdp_prog) {
|
if (nic->xdp_prog) {
|
||||||
/* Attach BPF program */
|
/* Attach BPF program */
|
||||||
nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
|
bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
|
||||||
if (!IS_ERR(nic->xdp_prog)) {
|
bpf_attached = true;
|
||||||
bpf_attached = true;
|
|
||||||
} else {
|
|
||||||
ret = PTR_ERR(nic->xdp_prog);
|
|
||||||
nic->xdp_prog = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Calculate Tx queues needed for XDP and network stack */
|
/* Calculate Tx queues needed for XDP and network stack */
|
||||||
|
@ -1807,11 +1807,8 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
|
|||||||
if (prog && !xdp_mtu_valid(priv, dev->mtu))
|
if (prog && !xdp_mtu_valid(priv, dev->mtu))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (prog) {
|
if (prog)
|
||||||
prog = bpf_prog_add(prog, priv->num_channels);
|
bpf_prog_add(prog, priv->num_channels);
|
||||||
if (IS_ERR(prog))
|
|
||||||
return PTR_ERR(prog);
|
|
||||||
}
|
|
||||||
|
|
||||||
up = netif_running(dev);
|
up = netif_running(dev);
|
||||||
need_update = (!!priv->xdp_prog != !!prog);
|
need_update = (!!priv->xdp_prog != !!prog);
|
||||||
|
@ -2286,11 +2286,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
|||||||
lockdep_is_held(&priv->mdev->state_lock));
|
lockdep_is_held(&priv->mdev->state_lock));
|
||||||
|
|
||||||
if (xdp_prog && carry_xdp_prog) {
|
if (xdp_prog && carry_xdp_prog) {
|
||||||
xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
|
bpf_prog_add(xdp_prog, tmp->rx_ring_num);
|
||||||
if (IS_ERR(xdp_prog)) {
|
|
||||||
mlx4_en_free_resources(tmp);
|
|
||||||
return PTR_ERR(xdp_prog);
|
|
||||||
}
|
|
||||||
for (i = 0; i < tmp->rx_ring_num; i++)
|
for (i = 0; i < tmp->rx_ring_num; i++)
|
||||||
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
|
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
|
||||||
xdp_prog);
|
xdp_prog);
|
||||||
@ -2782,11 +2778,9 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|||||||
* program for a new one.
|
* program for a new one.
|
||||||
*/
|
*/
|
||||||
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
|
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
|
||||||
if (prog) {
|
if (prog)
|
||||||
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
|
bpf_prog_add(prog, priv->rx_ring_num - 1);
|
||||||
if (IS_ERR(prog))
|
|
||||||
return PTR_ERR(prog);
|
|
||||||
}
|
|
||||||
mutex_lock(&mdev->state_lock);
|
mutex_lock(&mdev->state_lock);
|
||||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||||
old_prog = rcu_dereference_protected(
|
old_prog = rcu_dereference_protected(
|
||||||
@ -2807,13 +2801,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|||||||
if (!tmp)
|
if (!tmp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (prog) {
|
if (prog)
|
||||||
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
|
bpf_prog_add(prog, priv->rx_ring_num - 1);
|
||||||
if (IS_ERR(prog)) {
|
|
||||||
err = PTR_ERR(prog);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&mdev->state_lock);
|
mutex_lock(&mdev->state_lock);
|
||||||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||||
@ -2862,7 +2851,6 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|||||||
|
|
||||||
unlock_out:
|
unlock_out:
|
||||||
mutex_unlock(&mdev->state_lock);
|
mutex_unlock(&mdev->state_lock);
|
||||||
out:
|
|
||||||
kfree(tmp);
|
kfree(tmp);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -408,12 +408,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||||||
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
||||||
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
|
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
|
||||||
|
|
||||||
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
if (params->xdp_prog)
|
||||||
if (IS_ERR(rq->xdp_prog)) {
|
bpf_prog_inc(params->xdp_prog);
|
||||||
err = PTR_ERR(rq->xdp_prog);
|
rq->xdp_prog = params->xdp_prog;
|
||||||
rq->xdp_prog = NULL;
|
|
||||||
goto err_rq_wq_destroy;
|
|
||||||
}
|
|
||||||
|
|
||||||
rq_xdp_ix = rq->ix;
|
rq_xdp_ix = rq->ix;
|
||||||
if (xsk)
|
if (xsk)
|
||||||
@ -4406,16 +4403,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
|||||||
/* no need for full reset when exchanging programs */
|
/* no need for full reset when exchanging programs */
|
||||||
reset = (!priv->channels.params.xdp_prog || !prog);
|
reset = (!priv->channels.params.xdp_prog || !prog);
|
||||||
|
|
||||||
if (was_opened && !reset) {
|
if (was_opened && !reset)
|
||||||
/* num_channels is invariant here, so we can take the
|
/* num_channels is invariant here, so we can take the
|
||||||
* batched reference right upfront.
|
* batched reference right upfront.
|
||||||
*/
|
*/
|
||||||
prog = bpf_prog_add(prog, priv->channels.num);
|
bpf_prog_add(prog, priv->channels.num);
|
||||||
if (IS_ERR(prog)) {
|
|
||||||
err = PTR_ERR(prog);
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (was_opened && reset) {
|
if (was_opened && reset) {
|
||||||
struct mlx5e_channels new_channels = {};
|
struct mlx5e_channels new_channels = {};
|
||||||
|
@ -2107,12 +2107,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
|
|||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
|
bpf_prog_add(edev->xdp_prog, 1);
|
||||||
if (IS_ERR(fp->rxq->xdp_prog)) {
|
fp->rxq->xdp_prog = edev->xdp_prog;
|
||||||
rc = PTR_ERR(fp->rxq->xdp_prog);
|
|
||||||
fp->rxq->xdp_prog = NULL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fp->type & QEDE_FASTPATH_TX) {
|
if (fp->type & QEDE_FASTPATH_TX) {
|
||||||
|
@ -2445,11 +2445,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|||||||
if (!prog && !old_prog)
|
if (!prog && !old_prog)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (prog) {
|
if (prog)
|
||||||
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
|
bpf_prog_add(prog, vi->max_queue_pairs - 1);
|
||||||
if (IS_ERR(prog))
|
|
||||||
return PTR_ERR(prog);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
||||||
if (netif_running(dev)) {
|
if (netif_running(dev)) {
|
||||||
|
@ -485,7 +485,7 @@ struct bpf_func_info_aux {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_prog_aux {
|
struct bpf_prog_aux {
|
||||||
atomic_t refcnt;
|
atomic64_t refcnt;
|
||||||
u32 used_map_cnt;
|
u32 used_map_cnt;
|
||||||
u32 max_ctx_offset;
|
u32 max_ctx_offset;
|
||||||
u32 max_pkt_offset;
|
u32 max_pkt_offset;
|
||||||
@ -770,9 +770,9 @@ extern const struct bpf_verifier_ops xdp_analyzer_ops;
|
|||||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||||
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
|
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
|
||||||
bool attach_drv);
|
bool attach_drv);
|
||||||
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
|
void bpf_prog_add(struct bpf_prog *prog, int i);
|
||||||
void bpf_prog_sub(struct bpf_prog *prog, int i);
|
void bpf_prog_sub(struct bpf_prog *prog, int i);
|
||||||
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
|
void bpf_prog_inc(struct bpf_prog *prog);
|
||||||
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
|
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
|
||||||
void bpf_prog_put(struct bpf_prog *prog);
|
void bpf_prog_put(struct bpf_prog *prog);
|
||||||
int __bpf_prog_charge(struct user_struct *user, u32 pages);
|
int __bpf_prog_charge(struct user_struct *user, u32 pages);
|
||||||
@ -912,10 +912,8 @@ static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
|
|||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
|
static inline void bpf_prog_add(struct bpf_prog *prog, int i)
|
||||||
int i)
|
|
||||||
{
|
{
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
|
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
|
||||||
@ -926,9 +924,8 @@ static inline void bpf_prog_put(struct bpf_prog *prog)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
|
static inline void bpf_prog_inc(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bpf_prog *__must_check
|
static inline struct bpf_prog *__must_check
|
||||||
|
@ -31,7 +31,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
|
|||||||
{
|
{
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case BPF_TYPE_PROG:
|
case BPF_TYPE_PROG:
|
||||||
raw = bpf_prog_inc(raw);
|
bpf_prog_inc(raw);
|
||||||
break;
|
break;
|
||||||
case BPF_TYPE_MAP:
|
case BPF_TYPE_MAP:
|
||||||
bpf_map_inc_with_uref(raw);
|
bpf_map_inc_with_uref(raw);
|
||||||
@ -534,7 +534,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type
|
|||||||
if (!bpf_prog_get_ok(prog, &type, false))
|
if (!bpf_prog_get_ok(prog, &type, false))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
return bpf_prog_inc(prog);
|
bpf_prog_inc(prog);
|
||||||
|
return prog;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
|
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
|
||||||
|
@ -1339,7 +1339,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
|
|||||||
|
|
||||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
if (atomic64_dec_and_test(&prog->aux->refcnt)) {
|
||||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||||
/* bpf_prog_free_id() must be called first */
|
/* bpf_prog_free_id() must be called first */
|
||||||
bpf_prog_free_id(prog, do_idr_lock);
|
bpf_prog_free_id(prog, do_idr_lock);
|
||||||
@ -1445,16 +1445,9 @@ static struct bpf_prog *____bpf_prog_get(struct fd f)
|
|||||||
return f.file->private_data;
|
return f.file->private_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* prog's refcnt limit */
|
void bpf_prog_add(struct bpf_prog *prog, int i)
|
||||||
#define BPF_MAX_REFCNT 32768
|
|
||||||
|
|
||||||
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
|
|
||||||
{
|
{
|
||||||
if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
|
atomic64_add(i, &prog->aux->refcnt);
|
||||||
atomic_sub(i, &prog->aux->refcnt);
|
|
||||||
return ERR_PTR(-EBUSY);
|
|
||||||
}
|
|
||||||
return prog;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bpf_prog_add);
|
EXPORT_SYMBOL_GPL(bpf_prog_add);
|
||||||
|
|
||||||
@ -1465,13 +1458,13 @@ void bpf_prog_sub(struct bpf_prog *prog, int i)
|
|||||||
* path holds a reference to the program, thus atomic_sub() can
|
* path holds a reference to the program, thus atomic_sub() can
|
||||||
* be safely used in such cases!
|
* be safely used in such cases!
|
||||||
*/
|
*/
|
||||||
WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
|
WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bpf_prog_sub);
|
EXPORT_SYMBOL_GPL(bpf_prog_sub);
|
||||||
|
|
||||||
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
void bpf_prog_inc(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
return bpf_prog_add(prog, 1);
|
atomic64_inc(&prog->aux->refcnt);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bpf_prog_inc);
|
EXPORT_SYMBOL_GPL(bpf_prog_inc);
|
||||||
|
|
||||||
@ -1480,12 +1473,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
|
|||||||
{
|
{
|
||||||
int refold;
|
int refold;
|
||||||
|
|
||||||
refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
|
refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
|
||||||
|
|
||||||
if (refold >= BPF_MAX_REFCNT) {
|
|
||||||
__bpf_prog_put(prog, false);
|
|
||||||
return ERR_PTR(-EBUSY);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!refold)
|
if (!refold)
|
||||||
return ERR_PTR(-ENOENT);
|
return ERR_PTR(-ENOENT);
|
||||||
@ -1523,7 +1511,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
prog = bpf_prog_inc(prog);
|
bpf_prog_inc(prog);
|
||||||
out:
|
out:
|
||||||
fdput(f);
|
fdput(f);
|
||||||
return prog;
|
return prog;
|
||||||
@ -1714,7 +1702,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
|||||||
prog->orig_prog = NULL;
|
prog->orig_prog = NULL;
|
||||||
prog->jited = 0;
|
prog->jited = 0;
|
||||||
|
|
||||||
atomic_set(&prog->aux->refcnt, 1);
|
atomic64_set(&prog->aux->refcnt, 1);
|
||||||
prog->gpl_compatible = is_gpl ? 1 : 0;
|
prog->gpl_compatible = is_gpl ? 1 : 0;
|
||||||
|
|
||||||
if (bpf_prog_is_dev_bound(prog->aux)) {
|
if (bpf_prog_is_dev_bound(prog->aux)) {
|
||||||
|
@ -10477,12 +10477,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|||||||
context = parent_event->overflow_handler_context;
|
context = parent_event->overflow_handler_context;
|
||||||
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
|
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
|
||||||
if (overflow_handler == bpf_overflow_handler) {
|
if (overflow_handler == bpf_overflow_handler) {
|
||||||
struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
|
struct bpf_prog *prog = parent_event->prog;
|
||||||
|
|
||||||
if (IS_ERR(prog)) {
|
bpf_prog_inc(prog);
|
||||||
err = PTR_ERR(prog);
|
|
||||||
goto err_ns;
|
|
||||||
}
|
|
||||||
event->prog = prog;
|
event->prog = prog;
|
||||||
event->orig_overflow_handler =
|
event->orig_overflow_handler =
|
||||||
parent_event->orig_overflow_handler;
|
parent_event->orig_overflow_handler;
|
||||||
|
Loading…
Reference in New Issue
Block a user