mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
xsk: Make xskmap flush_list common for all map instances
The xskmap flush list is used to track entries that need to flushed from via the xdp_do_flush_map() function. This list used to be per-map, but there is really no reason for that. Instead make the flush list global for all xskmaps, which simplifies __xsk_map_flush() and xsk_map_alloc(). Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20191219061006.21980-5-bjorn.topel@gmail.com
This commit is contained in:
parent
fb5aacdf36
commit
e312b9e706
@ -72,7 +72,6 @@ struct xdp_umem {
|
||||
|
||||
struct xsk_map {
|
||||
struct bpf_map map;
|
||||
struct list_head __percpu *flush_list;
|
||||
spinlock_t lock; /* Synchronize map updates */
|
||||
struct xdp_sock *xsk_map[];
|
||||
};
|
||||
@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
|
||||
struct xdp_sock **map_entry);
|
||||
int xsk_map_inc(struct xsk_map *map);
|
||||
void xsk_map_put(struct xsk_map *map);
|
||||
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||
struct xdp_sock *xs);
|
||||
void __xsk_map_flush(struct bpf_map *map);
|
||||
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
|
||||
void __xsk_map_flush(void);
|
||||
|
||||
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||
struct xdp_sock *xs)
|
||||
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void __xsk_map_flush(struct bpf_map *map)
|
||||
static inline void __xsk_map_flush(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -72,9 +72,9 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
|
||||
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_map_memory mem;
|
||||
int cpu, err, numa_node;
|
||||
int err, numa_node;
|
||||
struct xsk_map *m;
|
||||
u64 cost, size;
|
||||
u64 size;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
@ -86,9 +86,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
|
||||
|
||||
numa_node = bpf_map_attr_numa_node(attr);
|
||||
size = struct_size(m, xsk_map, attr->max_entries);
|
||||
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
|
||||
|
||||
err = bpf_map_charge_init(&mem, cost);
|
||||
err = bpf_map_charge_init(&mem, size);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
@ -102,16 +101,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
|
||||
bpf_map_charge_move(&m->map.memory, &mem);
|
||||
spin_lock_init(&m->lock);
|
||||
|
||||
m->flush_list = alloc_percpu(struct list_head);
|
||||
if (!m->flush_list) {
|
||||
bpf_map_charge_finish(&m->map.memory);
|
||||
bpf_map_area_free(m);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
|
||||
|
||||
return &m->map;
|
||||
}
|
||||
|
||||
@ -121,7 +110,6 @@ static void xsk_map_free(struct bpf_map *map)
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_net();
|
||||
free_percpu(m->flush_list);
|
||||
bpf_map_area_free(m);
|
||||
}
|
||||
|
||||
|
@ -3511,8 +3511,7 @@ err:
|
||||
|
||||
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
|
||||
struct bpf_map *map,
|
||||
struct xdp_buff *xdp,
|
||||
u32 index)
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -3537,7 +3536,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
|
||||
case BPF_MAP_TYPE_XSKMAP: {
|
||||
struct xdp_sock *xs = fwd;
|
||||
|
||||
err = __xsk_map_redirect(map, xdp, xs);
|
||||
err = __xsk_map_redirect(xs, xdp);
|
||||
return err;
|
||||
}
|
||||
default:
|
||||
@ -3562,7 +3561,7 @@ void xdp_do_flush_map(void)
|
||||
__cpu_map_flush(map);
|
||||
break;
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
__xsk_map_flush(map);
|
||||
__xsk_map_flush();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -3619,7 +3618,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
||||
if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
|
||||
xdp_do_flush_map();
|
||||
|
||||
err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
|
||||
err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
|
@ -31,6 +31,8 @@
|
||||
|
||||
#define TX_BATCH_SIZE 16
|
||||
|
||||
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
|
||||
|
||||
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
|
||||
{
|
||||
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
|
||||
@ -264,11 +266,9 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||
struct xdp_sock *xs)
|
||||
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
|
||||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
|
||||
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
|
||||
int err;
|
||||
|
||||
err = xsk_rcv(xs, xdp);
|
||||
@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __xsk_map_flush(struct bpf_map *map)
|
||||
void __xsk_map_flush(void)
|
||||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
|
||||
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
|
||||
struct xdp_sock *xs, *tmp;
|
||||
|
||||
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
|
||||
@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = {
|
||||
|
||||
static int __init xsk_init(void)
|
||||
{
|
||||
int err;
|
||||
int err, cpu;
|
||||
|
||||
err = proto_register(&xsk_proto, 0 /* no slab */);
|
||||
if (err)
|
||||
@ -1195,6 +1194,8 @@ static int __init xsk_init(void)
|
||||
if (err)
|
||||
goto out_pernet;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
|
||||
return 0;
|
||||
|
||||
out_pernet:
|
||||
|
Loading…
Reference in New Issue
Block a user