Merge branch 'mtk_eth_soc-rx-vlan-offload-improvement-dsa-hardware-untag-support'

Felix Fietkau says:

====================
mtk_eth_soc rx vlan offload improvement + dsa hardware untag support

This series improves rx vlan offloading on mtk_eth_soc and extends it to
support hardware DSA untagging where possible.
This improves performance by avoiding calls into the DSA tag driver receive
function, including mangling of skb->data.

This is split out of a previous series, which added other fixes and
multiqueue support
====================

Link: https://lore.kernel.org/r/20221114124214.58199-1-nbd@nbd.name
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-11-15 20:23:18 -08:00
commit 0f54d36e2f
4 changed files with 110 additions and 16 deletions

View File

@ -23,6 +23,7 @@
#include <linux/jhash.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@ -1936,16 +1937,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
htons(RX_DMA_VPID(trxd.rxd4)),
RX_DMA_VID(trxd.rxd4));
} else if (trxd.rxd2 & RX_DMA_VTAG) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
__vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
RX_DMA_VID(trxd.rxd3));
}
}
/* If the device is attached to a dsa switch, the special
* tag inserted in VLAN field by hw switch can * be offloaded
* by RX HW VLAN offload. Clear vlan info.
*/
if (netdev_uses_dsa(netdev))
__vlan_hwaccel_clear_tag(skb);
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
__vlan_hwaccel_clear_tag(skb);
}
skb_record_rx_queue(skb, 0);
@ -2724,15 +2731,30 @@ static netdev_features_t mtk_fix_features(struct net_device *dev,
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
int err = 0;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
netdev_features_t diff = dev->features ^ features;
int i;
if (!((dev->features ^ features) & NETIF_F_LRO))
return 0;
if (!(features & NETIF_F_LRO))
if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
mtk_hwlro_netdev_disable(dev);
return err;
/* Set RX VLAN offloading */
if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
MTK_CDMP_EG_CTRL);
/* sync features with other MAC */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i] || eth->netdev[i] == dev)
continue;
eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
}
return 0;
}
/* wait for DMA to finish whatever it is doing before we start using it again */
@ -2975,11 +2997,46 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
mtk_w32(eth, 0, MTK_RST_GL);
}
static bool mtk_uses_dsa(struct net_device *dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
return netdev_uses_dsa(dev) &&
dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
#else
return false;
#endif
}
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int err;
int i, err;
if (mtk_uses_dsa(dev) && !eth->prog) {
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
if (md_dst)
continue;
md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!md_dst)
return -ENOMEM;
md_dst->u.port_info.port_id = i;
eth->dsa_meta[i] = md_dst;
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
* one MAC does not use DSA.
*/
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
val &= ~MTK_CDMP_STAG_EN;
mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
}
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
@ -3306,6 +3363,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
}
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
@ -3523,6 +3584,12 @@ static int mtk_free_dev(struct mtk_eth *eth)
free_netdev(eth->netdev[i]);
}
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
if (!eth->dsa_meta[i])
break;
metadata_dst_free(eth->dsa_meta[i]);
}
return 0;
}

View File

@ -22,6 +22,9 @@
#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
#define MTK_MAX_DSA_PORTS 7
#define MTK_DSA_PORT_MASK GENMASK(2, 0)
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
@ -91,6 +94,9 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
/* CDMQ Exgress Control Register */
#define MTK_CDMQ_EG_CTRL 0x1404
/* CDMP Ingress Control Register */
#define MTK_CDMP_IG_CTRL 0x400
#define MTK_CDMP_STAG_EN BIT(0)
@ -1121,6 +1127,8 @@ struct mtk_eth {
int ip_align;
struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
struct mtk_ppe *ppe[2];
struct rhashtable flow_table;

View File

@ -971,12 +971,14 @@ bool __skb_flow_dissect(const struct net *net,
#if IS_ENABLED(CONFIG_NET_DSA)
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
proto == htons(ETH_P_XDSA))) {
struct metadata_dst *md_dst = skb_metadata_dst(skb);
const struct dsa_device_ops *ops;
int offset = 0;
ops = skb->dev->dsa_ptr->tag_ops;
/* Only DSA header taggers break flow dissection */
if (ops->needed_headroom) {
if (ops->needed_headroom &&
(!md_dst || md_dst->type != METADATA_HW_PORT_MUX)) {
if (ops->flow_dissect)
ops->flow_dissect(skb, &proto, &offset);
else

View File

@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <linux/ptp_classify.h>
#include <net/dst_metadata.h>
#include "dsa_priv.h"
@ -216,6 +217,7 @@ static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *unused)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
struct dsa_slave_priv *p;
@ -229,7 +231,22 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
return 0;
nskb = cpu_dp->rcv(skb, dev);
if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
unsigned int port = md_dst->u.port_info.port_id;
skb_dst_drop(skb);
if (!skb_has_extensions(skb))
skb->slow_gro = 0;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (likely(skb->dev)) {
dsa_default_offload_fwd_mark(skb);
nskb = skb;
}
} else {
nskb = cpu_dp->rcv(skb, dev);
}
if (!nskb) {
kfree_skb(skb);
return 0;