diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 6be9fed50ed5..976f06462223 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -26,7 +26,6 @@ #include "sja1105_tas.h" #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull -#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, unsigned int startup_delay) @@ -136,6 +135,9 @@ static int sja1105_commit_pvid(struct dsa_switch *ds, int port) drop_untagged = true; } + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + drop_untagged = true; + return sja1105_drop_untagged(ds, port, drop_untagged); } @@ -217,6 +219,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) */ if (dsa_port_is_dsa(dp)) dp->learning = true; + + /* Disallow untagged packets from being received on the + * CPU and DSA ports. + */ + if (dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)) + mac[dp->index].drpuntag = true; } return 0; diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index 6b0dc9ff92d1..8c5601f1c979 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -16,6 +16,8 @@ #define ETH_P_SJA1105_META 0x0008 #define ETH_P_SJA1110 0xdadc +#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) + /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull #define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 5b80a9049e2c..a49308fbd19f 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -168,6 +168,36 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid); } +/* Transform untagged control packets into pvid-tagged control packets so that + * all packets sent by this tagger are VLAN-tagged and we can configure the + * switch to drop untagged packets coming from the DSA master. + */ +static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp, + struct sk_buff *skb, u8 pcp) +{ + __be16 xmit_tpid = htons(sja1105_xmit_tpid(dp)); + struct vlan_ethhdr *hdr; + + /* If VLAN tag is in hwaccel area, move it to the payload + * to deal with both cases uniformly and to ensure that + * the VLANs are added in the right order. + */ + if (unlikely(skb_vlan_tag_present(skb))) { + skb = __vlan_hwaccel_push_inside(skb); + if (!skb) + return NULL; + } + + hdr = (struct vlan_ethhdr *)skb_mac_header(skb); + + /* If skb is already VLAN-tagged, leave that VLAN ID in place */ + if (hdr->h_vlan_proto == xmit_tpid) + return skb; + + return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) | + SJA1105_DEFAULT_VLAN); +} + static struct sk_buff *sja1105_xmit(struct sk_buff *skb, struct net_device *netdev) { @@ -183,8 +213,13 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb, * but instead SPI-installed management routes. Part 2 of this * is the .port_deferred_xmit driver callback. */ - if (unlikely(sja1105_is_link_local(skb))) + if (unlikely(sja1105_is_link_local(skb))) { + skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp); + if (!skb) + return NULL; + return sja1105_defer_xmit(dp, skb); + } return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); @@ -213,6 +248,10 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb, return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); + skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp); + if (!skb) + return NULL; + skb_push(skb, SJA1110_HEADER_LEN); dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);