mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
bpf: Fix bpf_redirect to an ipip/ip6tnl dev
If the bpf program calls bpf_redirect(dev, 0) and dev is an ipip/ip6tnl, it currently includes the mac header. e.g. If dev is ipip, the end result is IP-EthHdr-IP instead of IP-IP. The fix is to pull the mac header. At ingress, skb_postpull_rcsum() is not needed because the ethhdr should have been pulled once already and then got pushed back just before calling the bpf_prog. At egress, this patch calls skb_postpull_rcsum(). If bpf_redirect(dev, BPF_F_INGRESS) is called, it also fails now because it calls dev_forward_skb() which eventually calls eth_type_trans(skb, dev). The eth_type_trans() will set skb->type = PACKET_OTHERHOST because the mac address does not match the redirecting dev->dev_addr. The PACKET_OTHERHOST will eventually cause the ip_rcv() errors out. To fix this, ____dev_forward_skb() is added. Joint work with Daniel Borkmann. Fixes:cfc7381b30
("ip_tunnel: add collect_md mode to IPIP tunnel") Fixes:8d79266bc4
("ip6_tunnel: add collect_md mode to IPv6 tunnels") Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@fb.com> Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
23dd831548
commit
4e3264d21b
@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
bool is_skb_forwardable(const struct net_device *dev,
|
||||
const struct sk_buff *skb);
|
||||
|
||||
static __always_inline int ____dev_forward_skb(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
|
||||
unlikely(!is_skb_forwardable(dev, skb))) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, true);
|
||||
skb->priority = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
extern int netdev_budget;
|
||||
|
@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
|
||||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
|
||||
unlikely(!is_skb_forwardable(dev, skb))) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
int ret = ____dev_forward_skb(dev, skb);
|
||||
|
||||
if (likely(!ret)) {
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, true);
|
||||
skb->priority = 0;
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__dev_forward_skb);
|
||||
|
||||
|
@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
return dev_forward_skb(dev, skb);
|
||||
}
|
||||
|
||||
static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int ret = ____dev_forward_skb(dev, skb);
|
||||
|
||||
if (likely(!ret)) {
|
||||
skb->dev = dev;
|
||||
ret = netif_rx(skb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
/* skb->mac_len is not set on normal egress */
|
||||
unsigned int mlen = skb->network_header - skb->mac_header;
|
||||
|
||||
__skb_pull(skb, mlen);
|
||||
|
||||
/* At ingress, the mac header has already been pulled once.
|
||||
* At egress, skb_pospull_rcsum has to be done in case that
|
||||
* the skb is originated from ingress (i.e. a forwarded skb)
|
||||
* to ensure that rcsum starts at net header.
|
||||
*/
|
||||
if (!skb_at_tc_ingress(skb))
|
||||
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
||||
skb_pop_mac_header(skb);
|
||||
skb_reset_mac_len(skb);
|
||||
return flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
|
||||
}
|
||||
|
||||
static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
bpf_push_mac_rcsum(skb);
|
||||
return flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
||||
}
|
||||
|
||||
static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
switch (dev->type) {
|
||||
case ARPHRD_TUNNEL:
|
||||
case ARPHRD_TUNNEL6:
|
||||
case ARPHRD_SIT:
|
||||
case ARPHRD_IPGRE:
|
||||
case ARPHRD_VOID:
|
||||
case ARPHRD_NONE:
|
||||
return __bpf_redirect_no_mac(skb, dev, flags);
|
||||
default:
|
||||
return __bpf_redirect_common(skb, dev, flags);
|
||||
}
|
||||
}
|
||||
|
||||
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
||||
{
|
||||
struct net_device *dev;
|
||||
@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bpf_push_mac_rcsum(clone);
|
||||
|
||||
return flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
|
||||
return __bpf_redirect(clone, dev, flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_clone_redirect_proto = {
|
||||
@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bpf_push_mac_rcsum(skb);
|
||||
|
||||
return ri->flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
||||
return __bpf_redirect(skb, dev, ri->flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_redirect_proto = {
|
||||
|
Loading…
Reference in New Issue
Block a user