Merge branch 'net-ethernet-mtk_eth_soc-add-basic-support-for-mt7988-soc'

Daniel Golle says:

====================
net: ethernet: mtk_eth_soc: add basic support for MT7988 SoC

The MediaTek MT7988 SoC introduces a new version (3) of the NETSYS
block and comes with three instead of two MACs.

The first MAC can be internally connected to a built-in Gigabit
Ethernet switch with four 1000M/100M/10M twisted pair user ports.

The second MAC can be internally connected to a built-in 2500Base-T
Ethernet PHY.

There are two SerDes units which can be operated in USXGMII, 10GBase-(K)R,
5GBase-R, 2500Base-X, 1000Base-X or SGMII interface mode.

This series adds initial support for NETSYS v3 and the first MAC of the
MT7988 SoC connecting the built-in DSA switch.

The switch is supported since commit 110c18bfed ("net: dsa: mt7530:
introduce driver for MT7988 built-in switch").

Basic support for the 1000M/100M/10M built-in PHYs connected to the
switch ports is present since commit ("98c485eaf509b net: phy: add
driver for MediaTek SoC built-in GE PHYs").
====================

Link: https://lore.kernel.org/r/cover.1690246066.git.daniel@makrotopia.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-07-26 22:04:40 -07:00
commit 707116b6b3
7 changed files with 669 additions and 227 deletions

View File

@ -19,10 +19,12 @@ properties:
enum:
- mediatek,mt2701-eth
- mediatek,mt7623-eth
- mediatek,mt7621-eth
- mediatek,mt7622-eth
- mediatek,mt7629-eth
- mediatek,mt7981-eth
- mediatek,mt7986-eth
- mediatek,mt7988-eth
- ralink,rt5350-eth
reg:
@ -32,7 +34,7 @@ properties:
clock-names: true
interrupts:
minItems: 3
minItems: 1
maxItems: 4
power-domains:
@ -60,6 +62,12 @@ properties:
Phandle to the mediatek hifsys controller used to provide various clocks
and reset to the system.
mediatek,infracfg:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to the syscon node that handles the path from GMAC to
PHY variants.
mediatek,sgmiisys:
$ref: /schemas/types.yaml#/definitions/phandle-array
minItems: 1
@ -121,6 +129,8 @@ allOf:
- const: gp1
- const: gp2
mediatek,infracfg: false
mediatek,pctl:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@ -131,6 +141,32 @@ allOf:
mediatek,wed-pcie: false
- if:
properties:
compatible:
contains:
enum:
- mediatek,mt7621-eth
then:
properties:
interrupts:
maxItems: 1
clocks:
minItems: 2
maxItems: 2
clock-names:
items:
- const: ethif
- const: fe
mediatek,infracfg: false
mediatek,wed: false
mediatek,wed-pcie: false
- if:
properties:
compatible:
@ -159,6 +195,8 @@ allOf:
- const: sgmii_ck
- const: eth2pll
mediatek,infracfg: false
mediatek,sgmiisys:
minItems: 1
maxItems: 1
@ -204,12 +242,6 @@ allOf:
- const: sgmii_ck
- const: eth2pll
mediatek,infracfg:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to the syscon node that handles the path from GMAC to
PHY variants.
mediatek,sgmiisys:
minItems: 2
maxItems: 2
@ -250,6 +282,8 @@ allOf:
- const: netsys0
- const: netsys1
mediatek,infracfg: false
mediatek,sgmiisys:
minItems: 2
maxItems: 2
@ -286,6 +320,67 @@ allOf:
- const: netsys0
- const: netsys1
mediatek,infracfg: false
mediatek,sgmiisys:
minItems: 2
maxItems: 2
- if:
properties:
compatible:
contains:
const: mediatek,mt7988-eth
then:
properties:
interrupts:
minItems: 4
clocks:
minItems: 34
maxItems: 34
clock-names:
items:
- const: crypto
- const: fe
- const: gp2
- const: gp1
- const: gp3
- const: ethwarp_wocpu2
- const: ethwarp_wocpu1
- const: ethwarp_wocpu0
- const: esw
- const: netsys0
- const: netsys1
- const: sgmii_tx250m
- const: sgmii_rx250m
- const: sgmii2_tx250m
- const: sgmii2_rx250m
- const: top_usxgmii0_sel
- const: top_usxgmii1_sel
- const: top_sgm0_sel
- const: top_sgm1_sel
- const: top_xfi_phy0_xtal_sel
- const: top_xfi_phy1_xtal_sel
- const: top_eth_gmii_sel
- const: top_eth_refck_50m_sel
- const: top_eth_sys_200m_sel
- const: top_eth_sys_sel
- const: top_eth_xgmii_sel
- const: top_eth_mii_sel
- const: top_netsys_sel
- const: top_netsys_500m_sel
- const: top_netsys_pao_2x_sel
- const: top_netsys_sync_250m_sel
- const: top_netsys_ppefb_250m_sel
- const: top_netsys_warp_sel
- const: wocpu1
- const: wocpu0
- const: xgp1
- const: xgp2
- const: xgp3
mediatek,sgmiisys:
minItems: 2
maxItems: 2

View File

@ -15,10 +15,10 @@
struct mtk_eth_muxc {
const char *name;
int cap_bit;
int (*set_path)(struct mtk_eth *eth, int path);
int (*set_path)(struct mtk_eth *eth, u64 path);
};
static const char *mtk_eth_path_name(int path)
static const char *mtk_eth_path_name(u64 path)
{
switch (path) {
case MTK_ETH_PATH_GMAC1_RGMII:
@ -40,10 +40,10 @@ static const char *mtk_eth_path_name(int path)
}
}
static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
{
bool updated = true;
u32 val, mask, set;
u32 mask, set, reg;
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
@ -59,11 +59,13 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
break;
}
if (updated) {
val = mtk_r32(eth, MTK_MAC_MISC);
val = (val & mask) | set;
mtk_w32(eth, val, MTK_MAC_MISC);
}
if (mtk_is_netsys_v3_or_greater(eth))
reg = MTK_MAC_MISC_V3;
else
reg = MTK_MAC_MISC;
if (updated)
mtk_m32(eth, mask, set, reg);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
@ -71,7 +73,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
return 0;
}
static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@ -94,7 +96,7 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
return 0;
}
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0, mask = 0, reg = 0;
bool updated = true;
@ -125,7 +127,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
return 0;
}
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@ -163,7 +165,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
return 0;
}
static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@ -218,7 +220,7 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
},
};
static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path)
{
int i, err = 0;
@ -249,7 +251,7 @@ out:
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int path;
u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
@ -260,7 +262,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
int path = 0;
u64 path = 0;
if (mac_id == 1)
path = MTK_ETH_PATH_GMAC2_GEPHY;
@ -274,7 +276,7 @@ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int path;
u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
MTK_ETH_PATH_GMAC2_RGMII;

View File

@ -152,6 +152,54 @@ static const struct mtk_reg_map mt7986_reg_map = {
.pse_oq_sta = 0x01a0,
};
static const struct mtk_reg_map mt7988_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
.rx_ptr = 0x6900,
.rx_cnt_cfg = 0x6904,
.pcrx_ptr = 0x6908,
.glo_cfg = 0x6a04,
.rst_idx = 0x6a08,
.delay_irq = 0x6a0c,
.irq_status = 0x6a20,
.irq_mask = 0x6a28,
.adma_rx_dbg0 = 0x6a38,
.int_grp = 0x6a50,
},
.qdma = {
.qtx_cfg = 0x4400,
.qtx_sch = 0x4404,
.rx_ptr = 0x4500,
.rx_cnt_cfg = 0x4504,
.qcrx_ptr = 0x4508,
.glo_cfg = 0x4604,
.rst_idx = 0x4608,
.delay_irq = 0x460c,
.fc_th = 0x4610,
.int_grp = 0x4620,
.hred = 0x4644,
.ctx_ptr = 0x4700,
.dtx_ptr = 0x4704,
.crx_ptr = 0x4710,
.drx_ptr = 0x4714,
.fq_head = 0x4720,
.fq_tail = 0x4724,
.fq_count = 0x4728,
.fq_blen = 0x472c,
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
.gdma_to_ppe = 0x3333,
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
[1] = 0x4c00,
},
.pse_iq_sta = 0x0180,
.pse_oq_sta = 0x01a0,
};
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@ -179,10 +227,54 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
"ethif",
"sgmiitop",
"esw",
"gp0",
"gp1",
"gp2",
"gp3",
"xgp1",
"xgp2",
"xgp3",
"crypto",
"fe",
"trgpll",
"sgmii_tx250m",
"sgmii_rx250m",
"sgmii_cdr_ref",
"sgmii_cdr_fb",
"sgmii2_tx250m",
"sgmii2_rx250m",
"sgmii2_cdr_ref",
"sgmii2_cdr_fb",
"sgmii_ck",
"eth2pll",
"wocpu0",
"wocpu1",
"netsys0",
"netsys1",
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
"top_usxgmii0_sel",
"top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
"top_xfi_phy0_xtal_sel",
"top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
"top_eth_sys_sel",
"top_eth_xgmii_sel",
"top_eth_mii_sel",
"top_netsys_sel",
"top_netsys_500m_sel",
"top_netsys_pao_2x_sel",
"top_netsys_sync_250m_sel",
"top_netsys_ppefb_250m_sel",
"top_netsys_warp_sel",
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@ -195,7 +287,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
{
u32 val;
@ -400,6 +492,19 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
}
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
(GSW_IPG_11 << GSWRX_IPG_SHIFT),
MTK_GSW_CFG);
}
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
@ -459,6 +564,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
goto init_err;
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
break;
default:
goto err_phy;
}
@ -528,6 +635,15 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
return;
}
/* Setup gmac */
if (mtk_is_netsys_v3_or_greater(eth) &&
mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
mtk_setup_bridge_switch(eth);
}
return;
err_phy:
@ -593,7 +709,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
if (IS_ENABLED(CONFIG_SOC_MT7621)) {
@ -740,11 +856,15 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
/* Configure MDC Turbo Mode */
if (mtk_is_netsys_v3_or_greater(eth))
mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
/* Configure MDC Divider */
val = mtk_r32(eth, MTK_PPSC);
val &= ~PPSC_MDC_CFG;
val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO;
mtk_w32(eth, val, MTK_PPSC);
val = FIELD_PREP(PPSC_MDC_CFG, divider);
if (!mtk_is_netsys_v3_or_greater(eth))
val |= PPSC_MDC_TURBO;
mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
@ -876,17 +996,32 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
hw_stats->tx_skip +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
if (mtk_is_netsys_v3_or_greater(eth)) {
hw_stats->tx_skip +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
hw_stats->tx_collisions +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
hw_stats->tx_bytes +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
} else {
hw_stats->tx_skip +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
}
}
u64_stats_update_end(&hw_stats->syncp);
@ -896,7 +1031,7 @@ static void mtk_stats_update(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->mac[i] || !eth->mac[i]->hw_stats)
continue;
if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
@ -970,7 +1105,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@ -1028,7 +1163,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
@ -1190,7 +1325,19 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
/* set forward port */
switch (mac->id) {
case MTK_GMAC1_ID:
data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
break;
case MTK_GMAC2_ID:
data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
break;
case MTK_GMAC3_ID:
data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
break;
}
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
@ -1201,6 +1348,8 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM_V2;
if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
data |= TX_DMA_SPTAG_V3;
}
WRITE_ONCE(desc->txd5, data);
@ -1219,7 +1368,7 @@ static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
mtk_tx_set_dma_desc_v2(dev, txd, info);
else
mtk_tx_set_dma_desc_v1(dev, txd, info);
@ -1266,8 +1415,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, itxd, &txd_info);
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
itx_buf->mac_id = mac->id;
setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
@ -1315,8 +1463,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
tx_buf->mac_id = mac->id;
setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
txd_info.size, k++);
@ -1401,7 +1548,7 @@ static int mtk_queue_stopped(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
if (netif_queue_stopped(eth->netdev[i]))
@ -1415,7 +1562,7 @@ static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
netif_tx_wake_all_queues(eth->netdev[i]);
@ -1526,7 +1673,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
return eth->soc->version == 2;
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
@ -1618,7 +1765,7 @@ static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
}
mtk_tx_set_dma_desc(dev, txd, txd_info);
tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
tx_buf->mac_id = mac->id;
tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@ -1868,13 +2015,26 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
break;
/* find out which mac the packet come from. values start at 1 */
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
!(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
if (mtk_is_netsys_v2_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
switch (val) {
case PSE_GDM1_PORT:
case PSE_GDM2_PORT:
mac = val - 1;
break;
case PSE_GDM3_PORT:
mac = MTK_GMAC3_ID;
break;
default:
break;
}
} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
!(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
}
if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
!eth->netdev[mac]))
goto release_desc;
@ -1964,7 +2124,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@ -1989,8 +2149,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
(trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
netdev_uses_dsa(netdev)) {
unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
@ -2094,7 +2254,6 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
int mac = 0;
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
@ -2102,15 +2261,13 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
tx_buf = mtk_desc_to_tx_buf(ring, desc,
eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
if (!tx_buf->data)
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
if (tx_buf->type == MTK_TYPE_SKB)
mtk_poll_tx_done(eth, state, mac, tx_buf->data);
mtk_poll_tx_done(eth, state, tx_buf->mac_id,
tx_buf->data);
budget--;
}
@ -2300,7 +2457,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
@ -2353,14 +2510,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
ofs += MTK_QTX_OFFSET;
}
val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
@ -2489,7 +2646,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@ -2911,7 +3068,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
const struct mtk_soc_data *soc = eth->soc;
int i;
for (i = 0; i < MTK_MAC_COUNT; i++)
for (i = 0; i < MTK_MAX_DEVS; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
@ -3037,7 +3194,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
@ -3065,8 +3222,13 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
for (i = 0; i < MTK_MAC_COUNT; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
for (i = 0; i < MTK_MAX_DEVS; i++) {
u32 val;
if (!eth->netdev[i])
continue;
val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
/* default setup the forward port to send frame to PDMA */
val &= ~0xffff;
@ -3076,7 +3238,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
if (netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@ -3183,7 +3345,7 @@ static int mtk_open(struct net_device *dev)
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return 0;
if (mtk_uses_dsa(dev) && !eth->prog) {
@ -3449,7 +3611,7 @@ static void mtk_hw_reset(struct mtk_eth *eth)
{
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
val = RSTCTRL_PPE0_V2;
} else {
@ -3461,7 +3623,7 @@ static void mtk_hw_reset(struct mtk_eth *eth)
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
}
@ -3487,7 +3649,7 @@ static void mtk_hw_warm_reset(struct mtk_eth *eth)
return;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
else
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
@ -3657,7 +3819,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@ -3678,15 +3840,15 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
struct net_device *dev = eth->netdev[i];
mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
if (dev) {
struct mtk_mac *mac = netdev_priv(dev);
if (!dev)
continue;
mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
}
mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
mtk_set_mcr_max_rx(netdev_priv(dev),
dev->mtu + MTK_RX_ETH_HLEN);
}
/* Indicates CDM to parse the MTK special tag from CPU
@ -3694,7 +3856,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v1(eth)) {
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
@ -3716,7 +3878,24 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v3_or_greater(eth)) {
/* PSE should not drop port1, port8 and port9 packets */
mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
/* GDM and CDM Threshold */
mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
/* PSE GDM3 MIB counter has incorrect hw default values,
* so the driver ought to read clear the values beforehand
* in case ethtool retrieve wrong mib values.
*/
for (i = 0; i < 0x80; i += 0x4)
mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
} else if (!mtk_is_netsys_v1(eth)) {
/* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
@ -3866,7 +4045,7 @@ static void mtk_pending_work(struct work_struct *work)
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
@ -3882,8 +4061,8 @@ static void mtk_pending_work(struct work_struct *work)
mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i] || !test_bit(i, &restart))
continue;
if (mtk_open(eth->netdev[i])) {
@ -3910,7 +4089,7 @@ static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
free_netdev(eth->netdev[i]);
@ -3929,7 +4108,7 @@ static int mtk_unreg_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
@ -4231,7 +4410,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
id = be32_to_cpup(_id);
if (id >= MTK_MAC_COUNT) {
if (id >= MTK_MAX_DEVS) {
dev_err(eth->dev, "%d is not a valid mac id\n", id);
return -EINVAL;
}
@ -4279,7 +4458,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
spin_lock_init(&mac->hw_stats->stats_lock);
u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
if (mtk_is_netsys_v3_or_greater(eth))
mac->hw_stats->reg_offset = id * 0x80;
else
mac->hw_stats->reg_offset = id * 0x40;
/* phylink create */
err = of_get_phy_mode(np, &phy_mode);
@ -4333,6 +4516,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.supported_interfaces);
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
MAC_10000FD;
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
}
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
@ -4391,7 +4585,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
rtnl_lock();
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
dev = eth->netdev[i];
if (!dev || !(dev->flags & IFF_UP))
@ -4521,7 +4715,7 @@ static int mtk_probe(struct platform_device *pdev)
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
@ -4629,9 +4823,8 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
u32 num_ppe;
u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
@ -4698,7 +4891,7 @@ static int mtk_remove(struct platform_device *pdev)
int i;
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
@ -4723,6 +4916,7 @@ static const struct mtk_soc_data mt2701_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@ -4739,6 +4933,7 @@ static const struct mtk_soc_data mt7621_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
@ -4759,6 +4954,7 @@ static const struct mtk_soc_data mt7622_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
.offload_version = 2,
.hash_offset = 2,
.has_accounting = true,
@ -4779,6 +4975,7 @@ static const struct mtk_soc_data mt7623_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
@ -4801,6 +4998,7 @@ static const struct mtk_soc_data mt7629_data = {
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
.has_accounting = true,
.version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@ -4818,6 +5016,7 @@ static const struct mtk_soc_data mt7981_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7981_CLKS_BITMAP,
.required_pctl = false,
.version = 2,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
@ -4839,6 +5038,7 @@ static const struct mtk_soc_data mt7986_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
.version = 2,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
@ -4853,12 +5053,31 @@ static const struct mtk_soc_data mt7986_data = {
},
};
static const struct mtk_soc_data mt7988_data = {
.reg_map = &mt7988_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7988_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7988_CLKS_BITMAP,
.required_pctl = false,
.version = 3,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
};
static const struct mtk_soc_data rt5350_data = {
.reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@ -4870,14 +5089,15 @@ static const struct mtk_soc_data rt5350_data = {
};
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);

View File

@ -33,7 +33,6 @@
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
#define MTK_DMA_SIZE 512
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@ -118,14 +117,21 @@
#define MTK_CDMP_EG_CTRL 0x404
/* GDM Exgress Control Register */
#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
#define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
0x540 : 0x500 + (_x * 0x1000); })
#define MTK_GDMA_SPECIAL_TAG BIT(24)
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_STRP_CRC BIT(16)
#define MTK_GDMA_TO_PDMA 0x0
#define MTK_GDMA_DROP_ALL 0x7777
/* GDM Egress Control Register */
#define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
0x544 : 0x504 + (_x * 0x1000); })
#define MTK_GDMA_XGDM_SEL BIT(31)
/* Unicast Filter MAC Address Register - Low */
#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
@ -288,8 +294,6 @@
/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT BIT(0)
#define MTK_STAT_OFFSET 0x40
/* QDMA TX NUM */
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
#define MTK_QDMA_GMAC2_QID 8
@ -302,6 +306,8 @@
#define TX_DMA_CHKSUM_V2 (0x7 << 28)
#define TX_DMA_TSO_V2 BIT(31)
#define TX_DMA_SPTAG_V3 BIT(27)
/* QDMA V2 descriptor txd4 */
#define TX_DMA_FPORT_SHIFT_V2 8
#define TX_DMA_FPORT_MASK_V2 0xf
@ -389,7 +395,26 @@
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
#define MTK_MAC_MISC_V3 0x10010
#define MTK_MUX_TO_ESW BIT(0)
#define MISC_MDC_TURBO BIT(4)
/* XMAC status registers */
#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
#define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
#define MTK_XGMAC_LINK_STS BIT(0)
/* GSW bridge registers */
#define MTK_GSW_CFG (0x10080)
#define GSWTX_IPG_MASK GENMASK(19, 16)
#define GSWTX_IPG_SHIFT 16
#define GSWRX_IPG_MASK GENMASK(3, 0)
#define GSWRX_IPG_SHIFT 0
#define GSW_IPG_11 11
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
@ -635,12 +660,6 @@ enum mtk_tx_flags {
*/
MTK_TX_FLAGS_SINGLE0 = 0x01,
MTK_TX_FLAGS_PAGE0 = 0x02,
/* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
* SKB out instead of looking up through hardware TX descriptor.
*/
MTK_TX_FLAGS_FPORT0 = 0x04,
MTK_TX_FLAGS_FPORT1 = 0x08,
};
/* This enum allows us to identify how the clock is defined on the array of the
@ -653,6 +672,11 @@ enum mtk_clks_map {
MTK_CLK_GP0,
MTK_CLK_GP1,
MTK_CLK_GP2,
MTK_CLK_GP3,
MTK_CLK_XGP1,
MTK_CLK_XGP2,
MTK_CLK_XGP3,
MTK_CLK_CRYPTO,
MTK_CLK_FE,
MTK_CLK_TRGPLL,
MTK_CLK_SGMII_TX_250M,
@ -669,63 +693,145 @@ enum mtk_clks_map {
MTK_CLK_WOCPU1,
MTK_CLK_NETSYS0,
MTK_CLK_NETSYS1,
MTK_CLK_ETHWARP_WOCPU2,
MTK_CLK_ETHWARP_WOCPU1,
MTK_CLK_ETHWARP_WOCPU0,
MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
MTK_CLK_TOP_SGM_0_SEL,
MTK_CLK_TOP_SGM_1_SEL,
MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
MTK_CLK_TOP_ETH_GMII_SEL,
MTK_CLK_TOP_ETH_REFCK_50M_SEL,
MTK_CLK_TOP_ETH_SYS_200M_SEL,
MTK_CLK_TOP_ETH_SYS_SEL,
MTK_CLK_TOP_ETH_XGMII_SEL,
MTK_CLK_TOP_ETH_MII_SEL,
MTK_CLK_TOP_NETSYS_SEL,
MTK_CLK_TOP_NETSYS_500M_SEL,
MTK_CLK_TOP_NETSYS_PAO_2X_SEL,
MTK_CLK_TOP_NETSYS_SYNC_250M_SEL,
MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL,
MTK_CLK_TOP_NETSYS_WARP_SEL,
MTK_CLK_MAX
};
#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
BIT(MTK_CLK_TRGPLL))
#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_GP2) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
BIT(MTK_CLK_SGMII_RX_250M) | \
BIT(MTK_CLK_SGMII_CDR_REF) | \
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
#define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
BIT_ULL(MTK_CLK_TRGPLL))
#define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
BIT_ULL(MTK_CLK_GP2) | \
BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
BIT_ULL(MTK_CLK_SGMII_CK) | \
BIT_ULL(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
#define MT7628_CLKS_BITMAP (0)
#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
BIT(MTK_CLK_SGMII_RX_250M) | \
BIT(MTK_CLK_SGMII_CDR_REF) | \
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII2_TX_250M) | \
BIT(MTK_CLK_SGMII2_RX_250M) | \
BIT(MTK_CLK_SGMII2_CDR_REF) | \
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_WOCPU0) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
BIT(MTK_CLK_SGMII_RX_250M) | \
BIT(MTK_CLK_SGMII_CDR_REF) | \
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII2_TX_250M) | \
BIT(MTK_CLK_SGMII2_RX_250M) | \
BIT(MTK_CLK_SGMII2_CDR_REF) | \
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK))
#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
BIT(MTK_CLK_SGMII_RX_250M) | \
BIT(MTK_CLK_SGMII_CDR_REF) | \
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII2_TX_250M) | \
BIT(MTK_CLK_SGMII2_RX_250M) | \
BIT(MTK_CLK_SGMII2_CDR_REF) | \
BIT(MTK_CLK_SGMII2_CDR_FB))
#define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \
BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
BIT_ULL(MTK_CLK_SGMII_CK) | \
BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP))
#define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
BIT_ULL(MTK_CLK_GP1) | \
BIT_ULL(MTK_CLK_WOCPU0) | \
BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
BIT_ULL(MTK_CLK_SGMII_CK))
#define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
BIT_ULL(MTK_CLK_GP1) | \
BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \
BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
BIT_ULL(MTK_CLK_CRYPTO) | \
BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \
BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \
BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \
BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \
BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \
BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \
BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
/* PSE Port Definition */
enum mtk_pse_port {
PSE_ADMA_PORT = 0,
PSE_GDM1_PORT,
PSE_GDM2_PORT,
PSE_PPE0_PORT,
PSE_PPE1_PORT,
PSE_QDMA_TX_PORT,
PSE_QDMA_RX_PORT,
PSE_DROP_PORT,
PSE_WDMA0_PORT,
PSE_WDMA1_PORT,
PSE_TDMA_PORT,
PSE_NONE_PORT,
PSE_PPE2_PORT,
PSE_WDMA2_PORT,
PSE_EIP197_PORT,
PSE_GDM3_PORT,
PSE_PORT_MAX
};
/* GMAC Identifier */
enum mtk_gmac_id {
MTK_GMAC1_ID = 0,
MTK_GMAC2_ID,
MTK_GMAC3_ID,
MTK_GMAC_ID_MAX
};
enum mtk_tx_buf_type {
MTK_TYPE_SKB,
MTK_TYPE_XDP_TX,
@ -744,7 +850,8 @@ struct mtk_tx_buf {
enum mtk_tx_buf_type type;
void *data;
u32 flags;
u16 mac_id;
u16 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
DEFINE_DMA_UNMAP_ADDR(dma_addr1);
@ -820,7 +927,6 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
MTK_U3_COPHY_V2_BIT,
@ -843,42 +949,41 @@ enum mkt_eth_capabilities {
};
/* Supported hardware group on SoCs */
#define MTK_RGMII BIT(MTK_RGMII_BIT)
#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
#define MTK_SGMII BIT(MTK_SGMII_BIT)
#define MTK_ESW BIT(MTK_ESW_BIT)
#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
#define MTK_MUX BIT(MTK_MUX_BIT)
#define MTK_INFRA BIT(MTK_INFRA_BIT)
#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT(MTK_QDMA_BIT)
#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
/* Supported path present on SoCs */
#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
@ -934,11 +1039,13 @@ enum mkt_eth_capabilities {
#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
MTK_RSTCTRL_PPE1)
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
MTK_RSTCTRL_PPE1)
#define MT7988_CAPS (MTK_GDM1_ESW | MTK_QDMA | MTK_RSTCTRL_PPE1)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@ -1009,6 +1116,7 @@ struct mtk_reg_map {
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
* @hash_offset Flow table hash offset.
* @version SoC version.
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
@ -1022,11 +1130,12 @@ struct mtk_reg_map {
struct mtk_soc_data {
const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
u32 caps;
u32 required_clks;
u64 caps;
u64 required_clks;
bool required_pctl;
u8 offload_version;
u8 hash_offset;
u8 version;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;
@ -1043,8 +1152,8 @@ struct mtk_soc_data {
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
/* currently no SoC has more than 3 macs */
#define MTK_MAX_DEVS 3
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
@ -1183,6 +1292,21 @@ struct mtk_mac {
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
{
return eth->soc->version == 1;
}
static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
{
return eth->soc->version > 1;
}
static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
{
return eth->soc->version > 2;
}
static inline struct mtk_foe_entry *
mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
{
@ -1193,7 +1317,7 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
return MTK_FOE_IB1_BIND_TIMESTAMP;
@ -1201,7 +1325,7 @@ static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_PPPOE_V2;
return MTK_FOE_IB1_BIND_PPPOE;
@ -1209,7 +1333,7 @@ static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
return MTK_FOE_IB1_BIND_VLAN_TAG;
@ -1217,7 +1341,7 @@ static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
return MTK_FOE_IB1_BIND_VLAN_LAYER;
@ -1225,7 +1349,7 @@ static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
@ -1233,7 +1357,7 @@ static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
@ -1241,7 +1365,7 @@ static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_PACKET_TYPE_V2;
return MTK_FOE_IB1_PACKET_TYPE;
@ -1249,7 +1373,7 @@ static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
@ -1257,7 +1381,7 @@ static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB2_MULTICAST_V2;
return MTK_FOE_IB2_MULTICAST;
@ -1268,6 +1392,7 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);

View File

@ -208,7 +208,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
memset(entry, 0, sizeof(*entry));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
@ -272,7 +272,7 @@ int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
u32 val = *ib2;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
val &= ~MTK_FOE_IB2_DEST_PORT_V2;
val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
} else {
@ -423,7 +423,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
MTK_FOE_IB2_WDMA_WINFO_V2;
@ -447,7 +447,7 @@ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
{
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_QID_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
@ -603,7 +603,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
struct mtk_foe_entry *hwe;
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
timestamp);
@ -619,7 +619,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
hwe->ib1 = entry->ib1;
if (ppe->accounting) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
val = MTK_FOE_IB2_MIB_CNT_V2;
else
val = MTK_FOE_IB2_MIB_CNT;
@ -979,7 +979,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_TB_CFG_INFO_SEL;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
@ -995,7 +995,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
MTK_PPE_MD_TOAP_BYP_CRSN1 |
MTK_PPE_MD_TOAP_BYP_CRSN2 |
@ -1037,7 +1037,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
}

View File

@ -193,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
info.bss, info.wcid);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
pse_port = 8;

View File

@ -1091,7 +1091,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
} else {
struct mtk_eth *eth = dev->hw->eth;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
if (mtk_is_netsys_v2_or_greater(eth))
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
@ -1907,7 +1907,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
if (hw->version == 1) {
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,