2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00

bpf, xdp: Restructure redirect actions

The XDP_REDIRECT implementations for maps and non-maps are fairly
similar, but obviously need to take different code paths depending on
if the target is using a map or not. Today, the redirect targets for
XDP either uses a map, or is based on ifindex.

Here, the map type and id are added to bpf_redirect_info, instead of
the actual map. Map type, map item/ifindex, and the map_id (if any) is
passed to xdp_do_redirect().

For ifindex-based redirect, used by the bpf_redirect() XDP BFP helper,
a special map type/id are used. Map type of UNSPEC together with map id
equal to INT_MAX has the special meaning of an ifindex based
redirect. Note that valid map ids are 1 inclusive, INT_MAX exclusive
([1,INT_MAX[).

In addition to making the code easier to follow, using explicit type
and id in bpf_redirect_info has a slight positive performance impact
by avoiding a pointer indirection for the map type lookup, and instead
use the cacheline for bpf_redirect_info.

Since the actual map is not passed via bpf_redirect_info anymore, the
map lookup is only done in the BPF helper. This means that the
bpf_clear_redirect_map() function can be removed. The actual map item
is RCU protected.

The bpf_redirect_info flags member is not used by XDP, and not
read/written any more. The map member is only written to when
required/used, and not unconditionally.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210308112907.559576-3-bjorn.topel@gmail.com
This commit is contained in:
Björn Töpel 2021-03-08 12:29:07 +01:00 committed by Daniel Borkmann
parent e6a4750ffe
commit ee75aef23a
6 changed files with 115 additions and 128 deletions

View File

@ -646,7 +646,8 @@ struct bpf_redirect_info {
u32 flags; u32 flags;
u32 tgt_index; u32 tgt_index;
void *tgt_value; void *tgt_value;
struct bpf_map *map; u32 map_id;
enum bpf_map_type map_type;
u32 kern_flags; u32 kern_flags;
struct bpf_nh_params nh; struct bpf_nh_params nh;
}; };
@ -1488,13 +1489,14 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind
* performs multiple lookups, the last one always takes * performs multiple lookups, the last one always takes
* precedence. * precedence.
*/ */
WRITE_ONCE(ri->map, NULL); ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
ri->map_type = BPF_MAP_TYPE_UNSPEC;
return flags; return flags;
} }
ri->flags = flags;
ri->tgt_index = ifindex; ri->tgt_index = ifindex;
WRITE_ONCE(ri->map, map); ri->map_id = map->id;
ri->map_type = map->map_type;
return XDP_REDIRECT; return XDP_REDIRECT;
} }

View File

@ -86,19 +86,15 @@ struct _bpf_dtab_netdev {
}; };
#endif /* __DEVMAP_OBJ_TYPE */ #endif /* __DEVMAP_OBJ_TYPE */
#define devmap_ifindex(tgt, map) \
(((map->map_type == BPF_MAP_TYPE_DEVMAP || \
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
DECLARE_EVENT_CLASS(xdp_redirect_template, DECLARE_EVENT_CLASS(xdp_redirect_template,
TP_PROTO(const struct net_device *dev, TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp, const struct bpf_prog *xdp,
const void *tgt, int err, const void *tgt, int err,
const struct bpf_map *map, u32 index), enum bpf_map_type map_type,
u32 map_id, u32 index),
TP_ARGS(dev, xdp, tgt, err, map, index), TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, prog_id) __field(int, prog_id)
@ -111,14 +107,22 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
), ),
TP_fast_assign( TP_fast_assign(
u32 ifindex = 0, map_index = index;
if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
ifindex = index;
map_index = 0;
}
__entry->prog_id = xdp->aux->id; __entry->prog_id = xdp->aux->id;
__entry->act = XDP_REDIRECT; __entry->act = XDP_REDIRECT;
__entry->ifindex = dev->ifindex; __entry->ifindex = dev->ifindex;
__entry->err = err; __entry->err = err;
__entry->to_ifindex = map ? devmap_ifindex(tgt, map) : __entry->to_ifindex = ifindex;
index; __entry->map_id = map_id;
__entry->map_id = map ? map->id : 0; __entry->map_index = map_index;
__entry->map_index = map ? index : 0;
), ),
TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d" TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
@ -133,45 +137,49 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
TP_PROTO(const struct net_device *dev, TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp, const struct bpf_prog *xdp,
const void *tgt, int err, const void *tgt, int err,
const struct bpf_map *map, u32 index), enum bpf_map_type map_type,
TP_ARGS(dev, xdp, tgt, err, map, index) u32 map_id, u32 index),
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
); );
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err, DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_PROTO(const struct net_device *dev, TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp, const struct bpf_prog *xdp,
const void *tgt, int err, const void *tgt, int err,
const struct bpf_map *map, u32 index), enum bpf_map_type map_type,
TP_ARGS(dev, xdp, tgt, err, map, index) u32 map_id, u32 index),
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
); );
#define _trace_xdp_redirect(dev, xdp, to) \ #define _trace_xdp_redirect(dev, xdp, to) \
trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to) trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
#define _trace_xdp_redirect_err(dev, xdp, to, err) \ #define _trace_xdp_redirect_err(dev, xdp, to, err) \
trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to) trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \ #define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
trace_xdp_redirect(dev, xdp, to, 0, map, index) trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \ #define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
trace_xdp_redirect_err(dev, xdp, to, err, map, index) trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
/* not used anymore, but kept around so as not to break old programs */ /* not used anymore, but kept around so as not to break old programs */
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map, DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
TP_PROTO(const struct net_device *dev, TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp, const struct bpf_prog *xdp,
const void *tgt, int err, const void *tgt, int err,
const struct bpf_map *map, u32 index), enum bpf_map_type map_type,
TP_ARGS(dev, xdp, tgt, err, map, index) u32 map_id, u32 index),
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
); );
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err, DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TP_PROTO(const struct net_device *dev, TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp, const struct bpf_prog *xdp,
const void *tgt, int err, const void *tgt, int err,
const struct bpf_map *map, u32 index), enum bpf_map_type map_type,
TP_ARGS(dev, xdp, tgt, err, map, index) u32 map_id, u32 index),
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
); );
TRACE_EVENT(xdp_cpumap_kthread, TRACE_EVENT(xdp_cpumap_kthread,

View File

@ -543,7 +543,6 @@ static void cpu_map_free(struct bpf_map *map)
* complete. * complete.
*/ */
bpf_clear_redirect_map(map);
synchronize_rcu(); synchronize_rcu();
/* For cpu_map the remote CPUs can still be using the entries /* For cpu_map the remote CPUs can still be using the entries

View File

@ -197,7 +197,6 @@ static void dev_map_free(struct bpf_map *map)
list_del_rcu(&dtab->list); list_del_rcu(&dtab->list);
spin_unlock(&dev_map_lock); spin_unlock(&dev_map_lock);
bpf_clear_redirect_map(map);
synchronize_rcu(); synchronize_rcu();
/* Make sure prior __dev_map_entry_free() have completed. */ /* Make sure prior __dev_map_entry_free() have completed. */

View File

@ -3918,23 +3918,6 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_map *map, struct xdp_buff *xdp)
{
switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
return dev_map_enqueue(fwd, xdp, dev_rx);
case BPF_MAP_TYPE_CPUMAP:
return cpu_map_enqueue(fwd, xdp, dev_rx);
case BPF_MAP_TYPE_XSKMAP:
return __xsk_map_redirect(fwd, xdp);
default:
return -EBADRQC;
}
return 0;
}
void xdp_do_flush(void) void xdp_do_flush(void)
{ {
__dev_flush(); __dev_flush();
@ -3943,55 +3926,52 @@ void xdp_do_flush(void)
} }
EXPORT_SYMBOL_GPL(xdp_do_flush); EXPORT_SYMBOL_GPL(xdp_do_flush);
void bpf_clear_redirect_map(struct bpf_map *map)
{
struct bpf_redirect_info *ri;
int cpu;
for_each_possible_cpu(cpu) {
ri = per_cpu_ptr(&bpf_redirect_info, cpu);
/* Avoid polluting remote cacheline due to writes if
* not needed. Once we pass this test, we need the
* cmpxchg() to make sure it hasn't been changed in
* the meantime by remote CPU.
*/
if (unlikely(READ_ONCE(ri->map) == map))
cmpxchg(&ri->map, map, NULL);
}
}
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog) struct bpf_prog *xdp_prog)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map = READ_ONCE(ri->map); enum bpf_map_type map_type = ri->map_type;
u32 index = ri->tgt_index;
void *fwd = ri->tgt_value; void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
int err; int err;
ri->tgt_index = 0; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
ri->tgt_value = NULL; ri->map_type = BPF_MAP_TYPE_UNSPEC;
WRITE_ONCE(ri->map, NULL);
if (unlikely(!map)) { switch (map_type) {
fwd = dev_get_by_index_rcu(dev_net(dev), index); case BPF_MAP_TYPE_DEVMAP:
if (unlikely(!fwd)) { fallthrough;
err = -EINVAL; case BPF_MAP_TYPE_DEVMAP_HASH:
goto err; err = dev_map_enqueue(fwd, xdp, dev);
break;
case BPF_MAP_TYPE_CPUMAP:
err = cpu_map_enqueue(fwd, xdp, dev);
break;
case BPF_MAP_TYPE_XSKMAP:
err = __xsk_map_redirect(fwd, xdp);
break;
case BPF_MAP_TYPE_UNSPEC:
if (map_id == INT_MAX) {
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
if (unlikely(!fwd)) {
err = -EINVAL;
break;
}
err = dev_xdp_enqueue(fwd, xdp, dev);
break;
} }
fallthrough;
err = dev_xdp_enqueue(fwd, xdp, dev); default:
} else { err = -EBADRQC;
err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
} }
if (unlikely(err)) if (unlikely(err))
goto err; goto err;
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
return 0; return 0;
err: err:
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
return err; return err;
} }
EXPORT_SYMBOL_GPL(xdp_do_redirect); EXPORT_SYMBOL_GPL(xdp_do_redirect);
@ -4000,41 +3980,36 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct xdp_buff *xdp, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct bpf_prog *xdp_prog,
struct bpf_map *map) void *fwd,
enum bpf_map_type map_type, u32 map_id)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
u32 index = ri->tgt_index; int err;
void *fwd = ri->tgt_value;
int err = 0;
ri->tgt_index = 0; switch (map_type) {
ri->tgt_value = NULL; case BPF_MAP_TYPE_DEVMAP:
WRITE_ONCE(ri->map, NULL); fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
if (map->map_type == BPF_MAP_TYPE_DEVMAP || err = dev_map_generic_redirect(fwd, skb, xdp_prog);
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
struct bpf_dtab_netdev *dst = fwd;
err = dev_map_generic_redirect(dst, skb, xdp_prog);
if (unlikely(err)) if (unlikely(err))
goto err; goto err;
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { break;
struct xdp_sock *xs = fwd; case BPF_MAP_TYPE_XSKMAP:
err = xsk_generic_rcv(fwd, xdp);
err = xsk_generic_rcv(xs, xdp);
if (err) if (err)
goto err; goto err;
consume_skb(skb); consume_skb(skb);
} else { break;
default:
/* TODO: Handle BPF_MAP_TYPE_CPUMAP */ /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
err = -EBADRQC; err = -EBADRQC;
goto err; goto err;
} }
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
return 0; return 0;
err: err:
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
return err; return err;
} }
@ -4042,31 +4017,34 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog) struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map = READ_ONCE(ri->map); enum bpf_map_type map_type = ri->map_type;
u32 index = ri->tgt_index; void *fwd = ri->tgt_value;
struct net_device *fwd; u32 map_id = ri->map_id;
int err = 0; int err;
if (map) ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, ri->map_type = BPF_MAP_TYPE_UNSPEC;
map);
ri->tgt_index = 0; if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
fwd = dev_get_by_index_rcu(dev_net(dev), index); fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
if (unlikely(!fwd)) { if (unlikely(!fwd)) {
err = -EINVAL; err = -EINVAL;
goto err; goto err;
}
err = xdp_ok_fwd_dev(fwd, skb->len);
if (unlikely(err))
goto err;
skb->dev = fwd;
_trace_xdp_redirect(dev, xdp_prog, ri->tgt_index);
generic_xdp_tx(skb, xdp_prog);
return 0;
} }
err = xdp_ok_fwd_dev(fwd, skb->len); return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
if (unlikely(err))
goto err;
skb->dev = fwd;
_trace_xdp_redirect(dev, xdp_prog, index);
generic_xdp_tx(skb, xdp_prog);
return 0;
err: err:
_trace_xdp_redirect_err(dev, xdp_prog, index, err); _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
return err; return err;
} }
@ -4077,10 +4055,12 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
if (unlikely(flags)) if (unlikely(flags))
return XDP_ABORTED; return XDP_ABORTED;
ri->flags = flags; /* NB! Map type UNSPEC and map_id == INT_MAX (never generated
* by map_idr) is used for ifindex based XDP redirect.
*/
ri->tgt_index = ifindex; ri->tgt_index = ifindex;
ri->tgt_value = NULL; ri->map_id = INT_MAX;
WRITE_ONCE(ri->map, NULL); ri->map_type = BPF_MAP_TYPE_UNSPEC;
return XDP_REDIRECT; return XDP_REDIRECT;
} }

View File

@ -87,7 +87,6 @@ static void xsk_map_free(struct bpf_map *map)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct xsk_map *m = container_of(map, struct xsk_map, map);
bpf_clear_redirect_map(map);
synchronize_net(); synchronize_net();
bpf_map_area_free(m); bpf_map_area_free(m);
} }