mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
29cfb2aaa4
Add a new bridge port attribute that allows attaching a nexthop object ID to an skb that is redirected to a backup bridge port with VLAN tunneling enabled. Specifically, when redirecting a known unicast packet, read the backup nexthop ID from the bridge port that lost its carrier and set it in the bridge control block of the skb before forwarding it via the backup port. Note that reading the ID from the bridge port should not result in a cache miss as the ID is added next to the 'backup_port' field that was already accessed. After this change, the 'state' field still stays on the first cache line, together with other data path related fields such as 'flags and 'vlgrp': struct net_bridge_port { struct net_bridge * br; /* 0 8 */ struct net_device * dev; /* 8 8 */ netdevice_tracker dev_tracker; /* 16 0 */ struct list_head list; /* 16 16 */ long unsigned int flags; /* 32 8 */ struct net_bridge_vlan_group * vlgrp; /* 40 8 */ struct net_bridge_port * backup_port; /* 48 8 */ u32 backup_nhid; /* 56 4 */ u8 priority; /* 60 1 */ u8 state; /* 61 1 */ u16 port_no; /* 62 2 */ /* --- cacheline 1 boundary (64 bytes) --- */ [...] } __attribute__((__aligned__(8))); When forwarding an skb via a bridge port that has VLAN tunneling enabled, check if the backup nexthop ID stored in the bridge control block is valid (i.e., not zero). If so, instead of attaching the pre-allocated metadata (that only has the tunnel key set), allocate a new metadata, set both the tunnel key and the nexthop object ID and attach it to the skb. By default, do not dump the new attribute to user space as a value of zero is an invalid nexthop object ID. The above is useful for EVPN multihoming. When one of the links composing an Ethernet Segment (ES) fails, traffic needs to be redirected towards the host via one of the other ES peers. For example, if a host is multihomed to three different VTEPs, the backup port of each ES link needs to be set to the VXLAN device and the backup nexthop ID needs to point to an FDB nexthop group that includes the IP addresses of the other two VTEPs. The VXLAN driver will extract the ID from the metadata of the redirected skb, calculate its flow hash and forward it towards one of the other VTEPs. If the ID does not exist, or represents an invalid nexthop object, the VXLAN driver will drop the skb. This relieves the bridge driver from the need to validate the ID. Signed-off-by: Ido Schimmel <idosch@nvidia.com> Acked-by: Nikolay Aleksandrov <razor@blackwall.org> Signed-off-by: David S. Miller <davem@davemloft.net>
350 lines
8.0 KiB
C
350 lines
8.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Forwarding decision
|
|
* Linux ethernet bridge
|
|
*
|
|
* Authors:
|
|
* Lennert Buytenhek <buytenh@gnu.org>
|
|
*/
|
|
|
|
#include <linux/err.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/netpoll.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/netfilter_bridge.h>
|
|
#include "br_private.h"
|
|
|
|
/* Don't forward packets to originating port or forwarding disabled */
|
|
static inline int should_deliver(const struct net_bridge_port *p,
|
|
const struct sk_buff *skb)
|
|
{
|
|
struct net_bridge_vlan_group *vg;
|
|
|
|
vg = nbp_vlan_group_rcu(p);
|
|
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
|
|
p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
|
|
nbp_switchdev_allowed_egress(p, skb) &&
|
|
!br_skb_isolated(p, skb);
|
|
}
|
|
|
|
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
skb_push(skb, ETH_HLEN);
|
|
if (!is_skb_forwardable(skb->dev, skb))
|
|
goto drop;
|
|
|
|
br_drop_fake_rtable(skb);
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
|
eth_type_vlan(skb->protocol)) {
|
|
int depth;
|
|
|
|
if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
|
|
goto drop;
|
|
|
|
skb_set_network_header(skb, depth);
|
|
}
|
|
|
|
br_switchdev_frame_set_offload_fwd_mark(skb);
|
|
|
|
dev_queue_xmit(skb);
|
|
|
|
return 0;
|
|
|
|
drop:
|
|
kfree_skb(skb);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
|
|
|
|
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
skb_clear_tstamp(skb);
|
|
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
|
|
net, sk, skb, NULL, skb->dev,
|
|
br_dev_queue_push_xmit);
|
|
|
|
}
|
|
EXPORT_SYMBOL_GPL(br_forward_finish);
|
|
|
|
static void __br_forward(const struct net_bridge_port *to,
|
|
struct sk_buff *skb, bool local_orig)
|
|
{
|
|
struct net_bridge_vlan_group *vg;
|
|
struct net_device *indev;
|
|
struct net *net;
|
|
int br_hook;
|
|
|
|
/* Mark the skb for forwarding offload early so that br_handle_vlan()
|
|
* can know whether to pop the VLAN header on egress or keep it.
|
|
*/
|
|
nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
|
|
|
|
vg = nbp_vlan_group_rcu(to);
|
|
skb = br_handle_vlan(to->br, to, vg, skb);
|
|
if (!skb)
|
|
return;
|
|
|
|
indev = skb->dev;
|
|
skb->dev = to->dev;
|
|
if (!local_orig) {
|
|
if (skb_warn_if_lro(skb)) {
|
|
kfree_skb(skb);
|
|
return;
|
|
}
|
|
br_hook = NF_BR_FORWARD;
|
|
skb_forward_csum(skb);
|
|
net = dev_net(indev);
|
|
} else {
|
|
if (unlikely(netpoll_tx_running(to->br->dev))) {
|
|
skb_push(skb, ETH_HLEN);
|
|
if (!is_skb_forwardable(skb->dev, skb))
|
|
kfree_skb(skb);
|
|
else
|
|
br_netpoll_send_skb(to, skb);
|
|
return;
|
|
}
|
|
br_hook = NF_BR_LOCAL_OUT;
|
|
net = dev_net(skb->dev);
|
|
indev = NULL;
|
|
}
|
|
|
|
NF_HOOK(NFPROTO_BRIDGE, br_hook,
|
|
net, NULL, skb, indev, skb->dev,
|
|
br_forward_finish);
|
|
}
|
|
|
|
static int deliver_clone(const struct net_bridge_port *prev,
|
|
struct sk_buff *skb, bool local_orig)
|
|
{
|
|
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
if (!skb) {
|
|
dev->stats.tx_dropped++;
|
|
return -ENOMEM;
|
|
}
|
|
|
|
__br_forward(prev, skb, local_orig);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* br_forward - forward a packet to a specific port
|
|
* @to: destination port
|
|
* @skb: packet being forwarded
|
|
* @local_rcv: packet will be received locally after forwarding
|
|
* @local_orig: packet is locally originated
|
|
*
|
|
* Should be called with rcu_read_lock.
|
|
*/
|
|
void br_forward(const struct net_bridge_port *to,
|
|
struct sk_buff *skb, bool local_rcv, bool local_orig)
|
|
{
|
|
if (unlikely(!to))
|
|
goto out;
|
|
|
|
/* redirect to backup link if the destination port is down */
|
|
if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
|
|
struct net_bridge_port *backup_port;
|
|
|
|
backup_port = rcu_dereference(to->backup_port);
|
|
if (unlikely(!backup_port))
|
|
goto out;
|
|
BR_INPUT_SKB_CB(skb)->backup_nhid = READ_ONCE(to->backup_nhid);
|
|
to = backup_port;
|
|
}
|
|
|
|
if (should_deliver(to, skb)) {
|
|
if (local_rcv)
|
|
deliver_clone(to, skb, local_orig);
|
|
else
|
|
__br_forward(to, skb, local_orig);
|
|
return;
|
|
}
|
|
|
|
out:
|
|
if (!local_rcv)
|
|
kfree_skb(skb);
|
|
}
|
|
EXPORT_SYMBOL_GPL(br_forward);
|
|
|
|
static struct net_bridge_port *maybe_deliver(
|
|
struct net_bridge_port *prev, struct net_bridge_port *p,
|
|
struct sk_buff *skb, bool local_orig)
|
|
{
|
|
u8 igmp_type = br_multicast_igmp_type(skb);
|
|
int err;
|
|
|
|
if (!should_deliver(p, skb))
|
|
return prev;
|
|
|
|
nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
|
|
|
|
if (!prev)
|
|
goto out;
|
|
|
|
err = deliver_clone(prev, skb, local_orig);
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
out:
|
|
br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
|
|
|
|
return p;
|
|
}
|
|
|
|
/* called under rcu_read_lock */
|
|
void br_flood(struct net_bridge *br, struct sk_buff *skb,
|
|
enum br_pkt_type pkt_type, bool local_rcv, bool local_orig,
|
|
u16 vid)
|
|
{
|
|
struct net_bridge_port *prev = NULL;
|
|
struct net_bridge_port *p;
|
|
|
|
br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST);
|
|
|
|
list_for_each_entry_rcu(p, &br->port_list, list) {
|
|
/* Do not flood unicast traffic to ports that turn it off, nor
|
|
* other traffic if flood off, except for traffic we originate
|
|
*/
|
|
switch (pkt_type) {
|
|
case BR_PKT_UNICAST:
|
|
if (!(p->flags & BR_FLOOD))
|
|
continue;
|
|
break;
|
|
case BR_PKT_MULTICAST:
|
|
if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
|
|
continue;
|
|
break;
|
|
case BR_PKT_BROADCAST:
|
|
if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
|
|
continue;
|
|
break;
|
|
}
|
|
|
|
/* Do not flood to ports that enable proxy ARP */
|
|
if (p->flags & BR_PROXYARP)
|
|
continue;
|
|
if (BR_INPUT_SKB_CB(skb)->proxyarp_replied &&
|
|
((p->flags & BR_PROXYARP_WIFI) ||
|
|
br_is_neigh_suppress_enabled(p, vid)))
|
|
continue;
|
|
|
|
prev = maybe_deliver(prev, p, skb, local_orig);
|
|
if (IS_ERR(prev))
|
|
goto out;
|
|
}
|
|
|
|
if (!prev)
|
|
goto out;
|
|
|
|
if (local_rcv)
|
|
deliver_clone(prev, skb, local_orig);
|
|
else
|
|
__br_forward(prev, skb, local_orig);
|
|
return;
|
|
|
|
out:
|
|
if (!local_rcv)
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
|
|
static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
|
|
const unsigned char *addr, bool local_orig)
|
|
{
|
|
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
|
|
const unsigned char *src = eth_hdr(skb)->h_source;
|
|
|
|
if (!should_deliver(p, skb))
|
|
return;
|
|
|
|
/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
|
|
if (skb->dev == p->dev && ether_addr_equal(src, addr))
|
|
return;
|
|
|
|
skb = skb_copy(skb, GFP_ATOMIC);
|
|
if (!skb) {
|
|
dev->stats.tx_dropped++;
|
|
return;
|
|
}
|
|
|
|
if (!is_broadcast_ether_addr(addr))
|
|
memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
|
|
|
|
__br_forward(p, skb, local_orig);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
|
|
struct sk_buff *skb,
|
|
struct net_bridge_mcast *brmctx,
|
|
bool local_rcv, bool local_orig)
|
|
{
|
|
struct net_bridge_port *prev = NULL;
|
|
struct net_bridge_port_group *p;
|
|
bool allow_mode_include = true;
|
|
struct hlist_node *rp;
|
|
|
|
rp = br_multicast_get_first_rport_node(brmctx, skb);
|
|
|
|
if (mdst) {
|
|
p = rcu_dereference(mdst->ports);
|
|
if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) &&
|
|
br_multicast_is_star_g(&mdst->addr))
|
|
allow_mode_include = false;
|
|
} else {
|
|
p = NULL;
|
|
br_tc_skb_miss_set(skb, true);
|
|
}
|
|
|
|
while (p || rp) {
|
|
struct net_bridge_port *port, *lport, *rport;
|
|
|
|
lport = p ? p->key.port : NULL;
|
|
rport = br_multicast_rport_from_node_skb(rp, skb);
|
|
|
|
if ((unsigned long)lport > (unsigned long)rport) {
|
|
port = lport;
|
|
|
|
if (port->flags & BR_MULTICAST_TO_UNICAST) {
|
|
maybe_deliver_addr(lport, skb, p->eth_addr,
|
|
local_orig);
|
|
goto delivered;
|
|
}
|
|
if ((!allow_mode_include &&
|
|
p->filter_mode == MCAST_INCLUDE) ||
|
|
(p->flags & MDB_PG_FLAGS_BLOCKED))
|
|
goto delivered;
|
|
} else {
|
|
port = rport;
|
|
}
|
|
|
|
prev = maybe_deliver(prev, port, skb, local_orig);
|
|
if (IS_ERR(prev))
|
|
goto out;
|
|
delivered:
|
|
if ((unsigned long)lport >= (unsigned long)port)
|
|
p = rcu_dereference(p->next);
|
|
if ((unsigned long)rport >= (unsigned long)port)
|
|
rp = rcu_dereference(hlist_next_rcu(rp));
|
|
}
|
|
|
|
if (!prev)
|
|
goto out;
|
|
|
|
if (local_rcv)
|
|
deliver_clone(prev, skb, local_orig);
|
|
else
|
|
__br_forward(prev, skb, local_orig);
|
|
return;
|
|
|
|
out:
|
|
if (!local_rcv)
|
|
kfree_skb(skb);
|
|
}
|
|
#endif
|