mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
Merge branch 'bpf-minor-cleanups'
Daniel Borkmann says: ==================== Two minor BPF cleanups Two minor cleanups on devmap and redirect I still had in my queue. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
572a5767f1
@ -48,30 +48,30 @@
|
|||||||
* calls will fail at this point.
|
* calls will fail at this point.
|
||||||
*/
|
*/
|
||||||
#include <linux/bpf.h>
|
#include <linux/bpf.h>
|
||||||
#include <linux/jhash.h>
|
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
#include <linux/rculist_nulls.h>
|
|
||||||
#include "percpu_freelist.h"
|
|
||||||
#include "bpf_lru_list.h"
|
|
||||||
#include "map_in_map.h"
|
|
||||||
|
|
||||||
struct bpf_dtab_netdev {
|
struct bpf_dtab_netdev {
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
int key;
|
|
||||||
struct rcu_head rcu;
|
|
||||||
struct bpf_dtab *dtab;
|
struct bpf_dtab *dtab;
|
||||||
|
unsigned int bit;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_dtab {
|
struct bpf_dtab {
|
||||||
struct bpf_map map;
|
struct bpf_map map;
|
||||||
struct bpf_dtab_netdev **netdev_map;
|
struct bpf_dtab_netdev **netdev_map;
|
||||||
unsigned long int __percpu *flush_needed;
|
unsigned long __percpu *flush_needed;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(dev_map_lock);
|
static DEFINE_SPINLOCK(dev_map_lock);
|
||||||
static LIST_HEAD(dev_map_list);
|
static LIST_HEAD(dev_map_list);
|
||||||
|
|
||||||
|
static u64 dev_map_bitmap_size(const union bpf_attr *attr)
|
||||||
|
{
|
||||||
|
return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
|
||||||
|
}
|
||||||
|
|
||||||
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab;
|
struct bpf_dtab *dtab;
|
||||||
@ -95,11 +95,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||||||
dtab->map.map_flags = attr->map_flags;
|
dtab->map.map_flags = attr->map_flags;
|
||||||
dtab->map.numa_node = bpf_map_attr_numa_node(attr);
|
dtab->map.numa_node = bpf_map_attr_numa_node(attr);
|
||||||
|
|
||||||
err = -ENOMEM;
|
|
||||||
|
|
||||||
/* make sure page count doesn't overflow */
|
/* make sure page count doesn't overflow */
|
||||||
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
|
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
|
||||||
cost += BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
|
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
|
||||||
if (cost >= U32_MAX - PAGE_SIZE)
|
if (cost >= U32_MAX - PAGE_SIZE)
|
||||||
goto free_dtab;
|
goto free_dtab;
|
||||||
|
|
||||||
@ -110,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||||||
if (err)
|
if (err)
|
||||||
goto free_dtab;
|
goto free_dtab;
|
||||||
|
|
||||||
err = -ENOMEM;
|
|
||||||
/* A per cpu bitfield with a bit per possible net device */
|
/* A per cpu bitfield with a bit per possible net device */
|
||||||
dtab->flush_needed = __alloc_percpu(
|
dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
|
||||||
BITS_TO_LONGS(attr->max_entries) *
|
__alignof__(unsigned long));
|
||||||
sizeof(unsigned long),
|
|
||||||
__alignof__(unsigned long));
|
|
||||||
if (!dtab->flush_needed)
|
if (!dtab->flush_needed)
|
||||||
goto free_dtab;
|
goto free_dtab;
|
||||||
|
|
||||||
@ -128,12 +123,12 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||||||
spin_lock(&dev_map_lock);
|
spin_lock(&dev_map_lock);
|
||||||
list_add_tail_rcu(&dtab->list, &dev_map_list);
|
list_add_tail_rcu(&dtab->list, &dev_map_list);
|
||||||
spin_unlock(&dev_map_lock);
|
spin_unlock(&dev_map_lock);
|
||||||
return &dtab->map;
|
|
||||||
|
|
||||||
|
return &dtab->map;
|
||||||
free_dtab:
|
free_dtab:
|
||||||
free_percpu(dtab->flush_needed);
|
free_percpu(dtab->flush_needed);
|
||||||
kfree(dtab);
|
kfree(dtab);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dev_map_free(struct bpf_map *map)
|
static void dev_map_free(struct bpf_map *map)
|
||||||
@ -178,9 +173,6 @@ static void dev_map_free(struct bpf_map *map)
|
|||||||
kfree(dev);
|
kfree(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* At this point bpf program is detached and all pending operations
|
|
||||||
* _must_ be complete
|
|
||||||
*/
|
|
||||||
free_percpu(dtab->flush_needed);
|
free_percpu(dtab->flush_needed);
|
||||||
bpf_map_area_free(dtab->netdev_map);
|
bpf_map_area_free(dtab->netdev_map);
|
||||||
kfree(dtab);
|
kfree(dtab);
|
||||||
@ -190,7 +182,7 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
u32 index = key ? *(u32 *)key : U32_MAX;
|
u32 index = key ? *(u32 *)key : U32_MAX;
|
||||||
u32 *next = (u32 *)next_key;
|
u32 *next = next_key;
|
||||||
|
|
||||||
if (index >= dtab->map.max_entries) {
|
if (index >= dtab->map.max_entries) {
|
||||||
*next = 0;
|
*next = 0;
|
||||||
@ -199,29 +191,16 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||||||
|
|
||||||
if (index == dtab->map.max_entries - 1)
|
if (index == dtab->map.max_entries - 1)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
*next = index + 1;
|
*next = index + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __dev_map_insert_ctx(struct bpf_map *map, u32 key)
|
void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
|
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
|
||||||
|
|
||||||
__set_bit(key, bitmap);
|
__set_bit(bit, bitmap);
|
||||||
}
|
|
||||||
|
|
||||||
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
|
||||||
{
|
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
|
||||||
struct bpf_dtab_netdev *dev;
|
|
||||||
|
|
||||||
if (key >= map->max_entries)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
dev = READ_ONCE(dtab->netdev_map[key]);
|
|
||||||
return dev ? dev->dev : NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
|
/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
|
||||||
@ -248,7 +227,6 @@ void __dev_map_flush(struct bpf_map *map)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
netdev = dev->dev;
|
netdev = dev->dev;
|
||||||
|
|
||||||
__clear_bit(bit, bitmap);
|
__clear_bit(bit, bitmap);
|
||||||
if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush))
|
if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush))
|
||||||
continue;
|
continue;
|
||||||
@ -261,43 +239,49 @@ void __dev_map_flush(struct bpf_map *map)
|
|||||||
* update happens in parallel here a dev_put wont happen until after reading the
|
* update happens in parallel here a dev_put wont happen until after reading the
|
||||||
* ifindex.
|
* ifindex.
|
||||||
*/
|
*/
|
||||||
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
|
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
struct bpf_dtab_netdev *dev;
|
struct bpf_dtab_netdev *dev;
|
||||||
u32 i = *(u32 *)key;
|
|
||||||
|
|
||||||
if (i >= map->max_entries)
|
if (key >= map->max_entries)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
dev = READ_ONCE(dtab->netdev_map[i]);
|
dev = READ_ONCE(dtab->netdev_map[key]);
|
||||||
return dev ? &dev->dev->ifindex : NULL;
|
return dev ? dev->dev : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dev_map_flush_old(struct bpf_dtab_netdev *old_dev)
|
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
{
|
{
|
||||||
if (old_dev->dev->netdev_ops->ndo_xdp_flush) {
|
struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key);
|
||||||
struct net_device *fl = old_dev->dev;
|
|
||||||
|
return dev ? &dev->ifindex : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
|
||||||
|
{
|
||||||
|
if (dev->dev->netdev_ops->ndo_xdp_flush) {
|
||||||
|
struct net_device *fl = dev->dev;
|
||||||
unsigned long *bitmap;
|
unsigned long *bitmap;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
bitmap = per_cpu_ptr(old_dev->dtab->flush_needed, cpu);
|
bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
|
||||||
__clear_bit(old_dev->key, bitmap);
|
__clear_bit(dev->bit, bitmap);
|
||||||
|
|
||||||
fl->netdev_ops->ndo_xdp_flush(old_dev->dev);
|
fl->netdev_ops->ndo_xdp_flush(dev->dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __dev_map_entry_free(struct rcu_head *rcu)
|
static void __dev_map_entry_free(struct rcu_head *rcu)
|
||||||
{
|
{
|
||||||
struct bpf_dtab_netdev *old_dev;
|
struct bpf_dtab_netdev *dev;
|
||||||
|
|
||||||
old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
|
dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
|
||||||
dev_map_flush_old(old_dev);
|
dev_map_flush_old(dev);
|
||||||
dev_put(old_dev->dev);
|
dev_put(dev->dev);
|
||||||
kfree(old_dev);
|
kfree(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dev_map_delete_elem(struct bpf_map *map, void *key)
|
static int dev_map_delete_elem(struct bpf_map *map, void *key)
|
||||||
@ -309,8 +293,8 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
|
|||||||
if (k >= map->max_entries)
|
if (k >= map->max_entries)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Use synchronize_rcu() here to ensure any rcu critical sections
|
/* Use call_rcu() here to ensure any rcu critical sections have
|
||||||
* have completed, but this does not guarantee a flush has happened
|
* completed, but this does not guarantee a flush has happened
|
||||||
* yet. Because driver side rcu_read_lock/unlock only protects the
|
* yet. Because driver side rcu_read_lock/unlock only protects the
|
||||||
* running XDP program. However, for pending flush operations the
|
* running XDP program. However, for pending flush operations the
|
||||||
* dev and ctx are stored in another per cpu map. And additionally,
|
* dev and ctx are stored in another per cpu map. And additionally,
|
||||||
@ -334,10 +318,8 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||||||
|
|
||||||
if (unlikely(map_flags > BPF_EXIST))
|
if (unlikely(map_flags > BPF_EXIST))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (unlikely(i >= dtab->map.max_entries))
|
if (unlikely(i >= dtab->map.max_entries))
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
|
|
||||||
if (unlikely(map_flags == BPF_NOEXIST))
|
if (unlikely(map_flags == BPF_NOEXIST))
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
@ -355,7 +337,7 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->key = i;
|
dev->bit = i;
|
||||||
dev->dtab = dtab;
|
dev->dtab = dtab;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1835,29 +1835,6 @@ static const struct bpf_func_proto bpf_redirect_proto = {
|
|||||||
.arg2_type = ARG_ANYTHING,
|
.arg2_type = ARG_ANYTHING,
|
||||||
};
|
};
|
||||||
|
|
||||||
BPF_CALL_3(bpf_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
|
|
||||||
{
|
|
||||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
|
||||||
|
|
||||||
if (unlikely(flags))
|
|
||||||
return XDP_ABORTED;
|
|
||||||
|
|
||||||
ri->ifindex = ifindex;
|
|
||||||
ri->flags = flags;
|
|
||||||
ri->map = map;
|
|
||||||
|
|
||||||
return XDP_REDIRECT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct bpf_func_proto bpf_redirect_map_proto = {
|
|
||||||
.func = bpf_redirect_map,
|
|
||||||
.gpl_only = false,
|
|
||||||
.ret_type = RET_INTEGER,
|
|
||||||
.arg1_type = ARG_CONST_MAP_PTR,
|
|
||||||
.arg2_type = ARG_ANYTHING,
|
|
||||||
.arg3_type = ARG_ANYTHING,
|
|
||||||
};
|
|
||||||
|
|
||||||
BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags)
|
BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags)
|
||||||
{
|
{
|
||||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||||
@ -2506,13 +2483,11 @@ static int __bpf_tx_xdp(struct net_device *dev,
|
|||||||
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
|
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (map)
|
if (map)
|
||||||
__dev_map_insert_ctx(map, index);
|
__dev_map_insert_ctx(map, index);
|
||||||
else
|
else
|
||||||
dev->netdev_ops->ndo_xdp_flush(dev);
|
dev->netdev_ops->ndo_xdp_flush(dev);
|
||||||
|
return 0;
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void xdp_do_flush_map(void)
|
void xdp_do_flush_map(void)
|
||||||
@ -2520,16 +2495,14 @@ void xdp_do_flush_map(void)
|
|||||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||||
struct bpf_map *map = ri->map_to_flush;
|
struct bpf_map *map = ri->map_to_flush;
|
||||||
|
|
||||||
ri->map = NULL;
|
|
||||||
ri->map_to_flush = NULL;
|
ri->map_to_flush = NULL;
|
||||||
|
|
||||||
if (map)
|
if (map)
|
||||||
__dev_map_flush(map);
|
__dev_map_flush(map);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_do_flush_map);
|
EXPORT_SYMBOL_GPL(xdp_do_flush_map);
|
||||||
|
|
||||||
int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
||||||
struct bpf_prog *xdp_prog)
|
struct bpf_prog *xdp_prog)
|
||||||
{
|
{
|
||||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||||
struct bpf_map *map = ri->map;
|
struct bpf_map *map = ri->map;
|
||||||
@ -2545,14 +2518,12 @@ int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
|||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
if (ri->map_to_flush && ri->map_to_flush != map)
|
||||||
if (ri->map_to_flush && (ri->map_to_flush != map))
|
|
||||||
xdp_do_flush_map();
|
xdp_do_flush_map();
|
||||||
|
|
||||||
err = __bpf_tx_xdp(fwd, map, xdp, index);
|
err = __bpf_tx_xdp(fwd, map, xdp, index);
|
||||||
if (likely(!err))
|
if (likely(!err))
|
||||||
ri->map_to_flush = map;
|
ri->map_to_flush = map;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err);
|
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err);
|
||||||
return err;
|
return err;
|
||||||
@ -2594,20 +2565,17 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb)
|
|||||||
ri->ifindex = 0;
|
ri->ifindex = 0;
|
||||||
if (unlikely(!dev)) {
|
if (unlikely(!dev)) {
|
||||||
bpf_warn_invalid_xdp_redirect(index);
|
bpf_warn_invalid_xdp_redirect(index);
|
||||||
goto err;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!(dev->flags & IFF_UP)))
|
if (unlikely(!(dev->flags & IFF_UP)))
|
||||||
goto err;
|
return -ENETDOWN;
|
||||||
|
|
||||||
len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
|
len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
|
||||||
if (skb->len > len)
|
if (skb->len > len)
|
||||||
goto err;
|
return -E2BIG;
|
||||||
|
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
|
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
|
||||||
|
|
||||||
@ -2620,6 +2588,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
|
|||||||
|
|
||||||
ri->ifindex = ifindex;
|
ri->ifindex = ifindex;
|
||||||
ri->flags = flags;
|
ri->flags = flags;
|
||||||
|
|
||||||
return XDP_REDIRECT;
|
return XDP_REDIRECT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2631,6 +2600,29 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
|
|||||||
.arg2_type = ARG_ANYTHING,
|
.arg2_type = ARG_ANYTHING,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
|
||||||
|
{
|
||||||
|
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||||
|
|
||||||
|
if (unlikely(flags))
|
||||||
|
return XDP_ABORTED;
|
||||||
|
|
||||||
|
ri->ifindex = ifindex;
|
||||||
|
ri->flags = flags;
|
||||||
|
ri->map = map;
|
||||||
|
|
||||||
|
return XDP_REDIRECT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
|
||||||
|
.func = bpf_xdp_redirect_map,
|
||||||
|
.gpl_only = false,
|
||||||
|
.ret_type = RET_INTEGER,
|
||||||
|
.arg1_type = ARG_CONST_MAP_PTR,
|
||||||
|
.arg2_type = ARG_ANYTHING,
|
||||||
|
.arg3_type = ARG_ANYTHING,
|
||||||
|
};
|
||||||
|
|
||||||
bool bpf_helper_changes_pkt_data(void *func)
|
bool bpf_helper_changes_pkt_data(void *func)
|
||||||
{
|
{
|
||||||
if (func == bpf_skb_vlan_push ||
|
if (func == bpf_skb_vlan_push ||
|
||||||
@ -3233,7 +3225,7 @@ xdp_func_proto(enum bpf_func_id func_id)
|
|||||||
case BPF_FUNC_redirect:
|
case BPF_FUNC_redirect:
|
||||||
return &bpf_xdp_redirect_proto;
|
return &bpf_xdp_redirect_proto;
|
||||||
case BPF_FUNC_redirect_map:
|
case BPF_FUNC_redirect_map:
|
||||||
return &bpf_redirect_map_proto;
|
return &bpf_xdp_redirect_map_proto;
|
||||||
default:
|
default:
|
||||||
return bpf_base_func_proto(func_id);
|
return bpf_base_func_proto(func_id);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user