mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
net: ethernet: mtk_eth_soc: handle dma buffer size soc specific
The mainline MTK ethernet driver suffers long time from rarly but
annoying tx queue timeouts. We think that this is caused by fixed
dma sizes hardcoded for all SoCs.
We suspect this problem arises from a low level of free TX DMADs,
the TX Ring alomost full.
The transmit timeout is caused by the Tx queue not waking up. The
Tx queue stops when the free counter is less than ring->thres, and
it will wake up once the free counter is greater than ring->thres.
If the CPU is too late to wake up the Tx queues, it may cause a
transmit timeout.
Therefore, we increased the TX and RX DMADs to improve this error
situation.
Use the dma-size implementation from SDK in a per SoC manner. In
difference to SDK we have no RSS feature yet, so all RX/TX sizes
should be raised from 512 to 2048 byte except fqdma on mt7988 to
avoid the tx timeout issue.
Fixes: 656e705243
("net-next: mediatek: add support for MT7623 ethernet")
Suggested-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5b4b62a169
commit
c57e558194
@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
|||||||
{
|
{
|
||||||
const struct mtk_soc_data *soc = eth->soc;
|
const struct mtk_soc_data *soc = eth->soc;
|
||||||
dma_addr_t phy_ring_tail;
|
dma_addr_t phy_ring_tail;
|
||||||
int cnt = MTK_QDMA_RING_SIZE;
|
int cnt = soc->tx.fq_dma_size;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
int i;
|
int i, j, len;
|
||||||
|
|
||||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
|
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
|
||||||
eth->scratch_ring = eth->sram_base;
|
eth->scratch_ring = eth->sram_base;
|
||||||
@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
|||||||
cnt * soc->tx.desc_size,
|
cnt * soc->tx.desc_size,
|
||||||
ð->phy_scratch_ring,
|
ð->phy_scratch_ring,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (unlikely(!eth->scratch_ring))
|
if (unlikely(!eth->scratch_ring))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
|
|
||||||
if (unlikely(!eth->scratch_head))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
dma_addr = dma_map_single(eth->dma_dev,
|
|
||||||
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
|
phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
|
||||||
|
|
||||||
for (i = 0; i < cnt; i++) {
|
for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
|
||||||
dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
|
||||||
struct mtk_tx_dma_v2 *txd;
|
eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
|
||||||
|
|
||||||
txd = eth->scratch_ring + i * soc->tx.desc_size;
|
if (unlikely(!eth->scratch_head[j]))
|
||||||
txd->txd1 = addr;
|
return -ENOMEM;
|
||||||
if (i < cnt - 1)
|
|
||||||
txd->txd2 = eth->phy_scratch_ring +
|
|
||||||
(i + 1) * soc->tx.desc_size;
|
|
||||||
|
|
||||||
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
dma_addr = dma_map_single(eth->dma_dev,
|
||||||
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
|
eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
|
||||||
txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
|
DMA_FROM_DEVICE);
|
||||||
txd->txd4 = 0;
|
|
||||||
if (mtk_is_netsys_v2_or_greater(eth)) {
|
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||||
txd->txd5 = 0;
|
return -ENOMEM;
|
||||||
txd->txd6 = 0;
|
|
||||||
txd->txd7 = 0;
|
for (i = 0; i < cnt; i++) {
|
||||||
txd->txd8 = 0;
|
struct mtk_tx_dma_v2 *txd;
|
||||||
|
|
||||||
|
txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
|
||||||
|
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
||||||
|
if (j * MTK_FQ_DMA_LENGTH + i < cnt)
|
||||||
|
txd->txd2 = eth->phy_scratch_ring +
|
||||||
|
(j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
|
||||||
|
|
||||||
|
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
||||||
|
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
|
||||||
|
txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
|
||||||
|
|
||||||
|
txd->txd4 = 0;
|
||||||
|
if (mtk_is_netsys_v2_or_greater(eth)) {
|
||||||
|
txd->txd5 = 0;
|
||||||
|
txd->txd6 = 0;
|
||||||
|
txd->txd7 = 0;
|
||||||
|
txd->txd8 = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
|||||||
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||||
ring_size = MTK_QDMA_RING_SIZE;
|
ring_size = MTK_QDMA_RING_SIZE;
|
||||||
else
|
else
|
||||||
ring_size = MTK_DMA_SIZE;
|
ring_size = soc->tx.dma_size;
|
||||||
|
|
||||||
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
|
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
|||||||
goto no_tx_mem;
|
goto no_tx_mem;
|
||||||
|
|
||||||
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
|
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
|
||||||
ring->dma = eth->sram_base + ring_size * sz;
|
ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
|
||||||
ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
|
ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
|
||||||
} else {
|
} else {
|
||||||
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
||||||
&ring->phys, GFP_KERNEL);
|
&ring->phys, GFP_KERNEL);
|
||||||
@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
|
|||||||
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||||
{
|
{
|
||||||
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||||
|
const struct mtk_soc_data *soc = eth->soc;
|
||||||
struct mtk_rx_ring *ring;
|
struct mtk_rx_ring *ring;
|
||||||
int rx_data_len, rx_dma_size, tx_ring_size;
|
int rx_data_len, rx_dma_size, tx_ring_size;
|
||||||
int i;
|
int i;
|
||||||
@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
|||||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||||
tx_ring_size = MTK_QDMA_RING_SIZE;
|
tx_ring_size = MTK_QDMA_RING_SIZE;
|
||||||
else
|
else
|
||||||
tx_ring_size = MTK_DMA_SIZE;
|
tx_ring_size = soc->tx.dma_size;
|
||||||
|
|
||||||
if (rx_flag == MTK_RX_FLAGS_QDMA) {
|
if (rx_flag == MTK_RX_FLAGS_QDMA) {
|
||||||
if (ring_no)
|
if (ring_no)
|
||||||
@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
|||||||
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
|
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
|
||||||
} else {
|
} else {
|
||||||
rx_data_len = ETH_DATA_LEN;
|
rx_data_len = ETH_DATA_LEN;
|
||||||
rx_dma_size = MTK_DMA_SIZE;
|
rx_dma_size = soc->rx.dma_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->frag_size = mtk_max_frag_size(rx_data_len);
|
ring->frag_size = mtk_max_frag_size(rx_data_len);
|
||||||
@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
|
|||||||
mtk_rx_clean(eth, ð->rx_ring[i], false);
|
mtk_rx_clean(eth, ð->rx_ring[i], false);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(eth->scratch_head);
|
for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
|
||||||
|
kfree(eth->scratch_head[i]);
|
||||||
|
eth->scratch_head[i] = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mtk_hw_reset_check(struct mtk_eth *eth)
|
static bool mtk_hw_reset_check(struct mtk_eth *eth)
|
||||||
@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma),
|
.desc_size = sizeof(struct mtk_tx_dma),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
.irq_done_mask = MTK_RX_DONE_INT,
|
.irq_done_mask = MTK_RX_DONE_INT,
|
||||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
},
|
},
|
||||||
@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma),
|
.desc_size = sizeof(struct mtk_tx_dma),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
.irq_done_mask = MTK_RX_DONE_INT,
|
.irq_done_mask = MTK_RX_DONE_INT,
|
||||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
},
|
},
|
||||||
@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma),
|
.desc_size = sizeof(struct mtk_tx_dma),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
.irq_done_mask = MTK_RX_DONE_INT,
|
.irq_done_mask = MTK_RX_DONE_INT,
|
||||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
},
|
},
|
||||||
@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma),
|
.desc_size = sizeof(struct mtk_tx_dma),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
.irq_done_mask = MTK_RX_DONE_INT,
|
.irq_done_mask = MTK_RX_DONE_INT,
|
||||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
},
|
},
|
||||||
@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma),
|
.desc_size = sizeof(struct mtk_tx_dma),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
.irq_done_mask = MTK_RX_DONE_INT,
|
.irq_done_mask = MTK_RX_DONE_INT,
|
||||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
},
|
},
|
||||||
@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||||
.dma_len_offset = 8,
|
.dma_len_offset = 8,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = {
|
|||||||
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||||
.dma_len_offset = 8,
|
.dma_len_offset = 8,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = {
|
|||||||
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||||
.dma_len_offset = 8,
|
.dma_len_offset = 8,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
|
.fq_dma_size = MTK_DMA_SIZE(4K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma_v2),
|
.desc_size = sizeof(struct mtk_rx_dma_v2),
|
||||||
@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = {
|
|||||||
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||||
.dma_len_offset = 8,
|
.dma_len_offset = 8,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = {
|
|||||||
.desc_size = sizeof(struct mtk_tx_dma),
|
.desc_size = sizeof(struct mtk_tx_dma),
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
.rx = {
|
.rx = {
|
||||||
.desc_size = sizeof(struct mtk_rx_dma),
|
.desc_size = sizeof(struct mtk_rx_dma),
|
||||||
@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = {
|
|||||||
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
|
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
|
||||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||||
.dma_len_offset = 16,
|
.dma_len_offset = 16,
|
||||||
|
.dma_size = MTK_DMA_SIZE(2K),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -32,7 +32,9 @@
|
|||||||
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
||||||
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
||||||
#define MTK_QDMA_RING_SIZE 2048
|
#define MTK_QDMA_RING_SIZE 2048
|
||||||
#define MTK_DMA_SIZE 512
|
#define MTK_DMA_SIZE(x) (SZ_##x)
|
||||||
|
#define MTK_FQ_DMA_HEAD 32
|
||||||
|
#define MTK_FQ_DMA_LENGTH 2048
|
||||||
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
|
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
|
||||||
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
|
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
|
||||||
#define MTK_DMA_DUMMY_DESC 0xffffffff
|
#define MTK_DMA_DUMMY_DESC 0xffffffff
|
||||||
@ -1176,6 +1178,8 @@ struct mtk_soc_data {
|
|||||||
u32 desc_size;
|
u32 desc_size;
|
||||||
u32 dma_max_len;
|
u32 dma_max_len;
|
||||||
u32 dma_len_offset;
|
u32 dma_len_offset;
|
||||||
|
u32 dma_size;
|
||||||
|
u32 fq_dma_size;
|
||||||
} tx;
|
} tx;
|
||||||
struct {
|
struct {
|
||||||
u32 desc_size;
|
u32 desc_size;
|
||||||
@ -1183,6 +1187,7 @@ struct mtk_soc_data {
|
|||||||
u32 dma_l4_valid;
|
u32 dma_l4_valid;
|
||||||
u32 dma_max_len;
|
u32 dma_max_len;
|
||||||
u32 dma_len_offset;
|
u32 dma_len_offset;
|
||||||
|
u32 dma_size;
|
||||||
} rx;
|
} rx;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1264,7 +1269,7 @@ struct mtk_eth {
|
|||||||
struct napi_struct rx_napi;
|
struct napi_struct rx_napi;
|
||||||
void *scratch_ring;
|
void *scratch_ring;
|
||||||
dma_addr_t phy_scratch_ring;
|
dma_addr_t phy_scratch_ring;
|
||||||
void *scratch_head;
|
void *scratch_head[MTK_FQ_DMA_HEAD];
|
||||||
struct clk *clks[MTK_CLK_MAX];
|
struct clk *clks[MTK_CLK_MAX];
|
||||||
|
|
||||||
struct mii_bus *mii_bus;
|
struct mii_bus *mii_bus;
|
||||||
|
Loading…
Reference in New Issue
Block a user