mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-21 21:34:58 +08:00
Merge branch 'net-stmmac-Some-improvements-and-a-fix'
Jose Abreu says: ==================== net: stmmac: Some improvements and a fix Some performace improvements (01/03 and 03/03) and a fix (02/03), all for -next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
de90573e30
@ -3,6 +3,7 @@ config STMMAC_ETH
|
|||||||
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
|
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
|
||||||
depends on HAS_IOMEM && HAS_DMA
|
depends on HAS_IOMEM && HAS_DMA
|
||||||
select MII
|
select MII
|
||||||
|
select PAGE_POOL
|
||||||
select PHYLINK
|
select PHYLINK
|
||||||
select CRC32
|
select CRC32
|
||||||
imply PTP_1588_CLOCK
|
imply PTP_1588_CLOCK
|
||||||
|
@ -252,6 +252,7 @@ struct stmmac_safety_stats {
|
|||||||
#define STMMAC_MAX_COAL_TX_TICK 100000
|
#define STMMAC_MAX_COAL_TX_TICK 100000
|
||||||
#define STMMAC_TX_MAX_FRAMES 256
|
#define STMMAC_TX_MAX_FRAMES 256
|
||||||
#define STMMAC_TX_FRAMES 1
|
#define STMMAC_TX_FRAMES 1
|
||||||
|
#define STMMAC_RX_FRAMES 25
|
||||||
|
|
||||||
/* Packets types */
|
/* Packets types */
|
||||||
enum packets_types {
|
enum packets_types {
|
||||||
|
@ -289,18 +289,18 @@ static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
|
|||||||
|
|
||||||
static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
|
static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_rx_phy, u32 chan)
|
dma_addr_t dma_rx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
/* Write RX descriptors address */
|
/* Write RX descriptors address */
|
||||||
writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST);
|
writel(lower_32_bits(dma_rx_phy), ioaddr + EMAC_RX_DESC_LIST);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
|
static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_tx_phy, u32 chan)
|
dma_addr_t dma_tx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
/* Write TX descriptors address */
|
/* Write TX descriptors address */
|
||||||
writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST);
|
writel(lower_32_bits(dma_tx_phy), ioaddr + EMAC_TX_DESC_LIST);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sun8i_dwmac_dump_regs() - Dump EMAC address space
|
/* sun8i_dwmac_dump_regs() - Dump EMAC address space
|
||||||
|
@ -112,18 +112,18 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
|
|||||||
|
|
||||||
static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
|
static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_rx_phy, u32 chan)
|
dma_addr_t dma_rx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
/* RX descriptor base address list must be written into DMA CSR3 */
|
/* RX descriptor base address list must be written into DMA CSR3 */
|
||||||
writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
|
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
|
static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_tx_phy, u32 chan)
|
dma_addr_t dma_tx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
/* TX descriptor base address list must be written into DMA CSR4 */
|
/* TX descriptor base address list must be written into DMA CSR4 */
|
||||||
writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
|
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
|
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
|
||||||
|
@ -31,18 +31,18 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
|
|||||||
|
|
||||||
static void dwmac100_dma_init_rx(void __iomem *ioaddr,
|
static void dwmac100_dma_init_rx(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_rx_phy, u32 chan)
|
dma_addr_t dma_rx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
/* RX descriptor base addr lists must be written into DMA CSR3 */
|
/* RX descriptor base addr lists must be written into DMA CSR3 */
|
||||||
writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
|
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwmac100_dma_init_tx(void __iomem *ioaddr,
|
static void dwmac100_dma_init_tx(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_tx_phy, u32 chan)
|
dma_addr_t dma_tx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
/* TX descriptor base addr lists must be written into DMA CSR4 */
|
/* TX descriptor base addr lists must be written into DMA CSR4 */
|
||||||
writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
|
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store and Forward capability is not used at all.
|
/* Store and Forward capability is not used at all.
|
||||||
|
@ -70,7 +70,7 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
|
|||||||
|
|
||||||
static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
|
static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_rx_phy, u32 chan)
|
dma_addr_t dma_rx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
u32 value;
|
u32 value;
|
||||||
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
|
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
|
||||||
@ -79,12 +79,12 @@ static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
|
|||||||
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
|
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
|
||||||
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
|
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
|
||||||
|
|
||||||
writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
|
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
|
static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_tx_phy, u32 chan)
|
dma_addr_t dma_tx_phy, u32 chan)
|
||||||
{
|
{
|
||||||
u32 value;
|
u32 value;
|
||||||
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
|
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
|
||||||
@ -97,7 +97,7 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
|
|||||||
|
|
||||||
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
|
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
|
||||||
|
|
||||||
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
|
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwmac4_dma_init_channel(void __iomem *ioaddr,
|
static void dwmac4_dma_init_channel(void __iomem *ioaddr,
|
||||||
|
@ -199,7 +199,9 @@
|
|||||||
#define XGMAC_RxPBL GENMASK(21, 16)
|
#define XGMAC_RxPBL GENMASK(21, 16)
|
||||||
#define XGMAC_RxPBL_SHIFT 16
|
#define XGMAC_RxPBL_SHIFT 16
|
||||||
#define XGMAC_RXST BIT(0)
|
#define XGMAC_RXST BIT(0)
|
||||||
|
#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
|
||||||
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
|
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
|
||||||
|
#define XGMAC_DMA_CH_RxDESC_HADDR(x) (0x00003118 + (0x80 * (x)))
|
||||||
#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
|
#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
|
||||||
#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
|
#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
|
||||||
#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
|
#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
|
||||||
|
@ -44,7 +44,7 @@ static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
|
|||||||
|
|
||||||
static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
|
static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_rx_phy, u32 chan)
|
dma_addr_t phy, u32 chan)
|
||||||
{
|
{
|
||||||
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
|
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
|
||||||
u32 value;
|
u32 value;
|
||||||
@ -54,12 +54,13 @@ static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
|
|||||||
value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
|
value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
|
||||||
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
|
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
|
||||||
|
|
||||||
writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
|
writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
|
||||||
|
writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
|
static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_tx_phy, u32 chan)
|
dma_addr_t phy, u32 chan)
|
||||||
{
|
{
|
||||||
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
|
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
|
||||||
u32 value;
|
u32 value;
|
||||||
@ -70,7 +71,8 @@ static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
|
|||||||
value |= XGMAC_OSP;
|
value |= XGMAC_OSP;
|
||||||
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
|
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
|
||||||
|
|
||||||
writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
|
writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
|
||||||
|
writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
|
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
|
||||||
|
@ -150,10 +150,10 @@ struct stmmac_dma_ops {
|
|||||||
struct stmmac_dma_cfg *dma_cfg, u32 chan);
|
struct stmmac_dma_cfg *dma_cfg, u32 chan);
|
||||||
void (*init_rx_chan)(void __iomem *ioaddr,
|
void (*init_rx_chan)(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_rx_phy, u32 chan);
|
dma_addr_t phy, u32 chan);
|
||||||
void (*init_tx_chan)(void __iomem *ioaddr,
|
void (*init_tx_chan)(void __iomem *ioaddr,
|
||||||
struct stmmac_dma_cfg *dma_cfg,
|
struct stmmac_dma_cfg *dma_cfg,
|
||||||
u32 dma_tx_phy, u32 chan);
|
dma_addr_t phy, u32 chan);
|
||||||
/* Configure the AXI Bus Mode Register */
|
/* Configure the AXI Bus Mode Register */
|
||||||
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
|
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
|
||||||
/* Dump DMA registers */
|
/* Dump DMA registers */
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include <linux/ptp_clock_kernel.h>
|
#include <linux/ptp_clock_kernel.h>
|
||||||
#include <linux/net_tstamp.h>
|
#include <linux/net_tstamp.h>
|
||||||
#include <linux/reset.h>
|
#include <linux/reset.h>
|
||||||
|
#include <net/page_pool.h>
|
||||||
|
|
||||||
struct stmmac_resources {
|
struct stmmac_resources {
|
||||||
void __iomem *addr;
|
void __iomem *addr;
|
||||||
@ -54,13 +55,19 @@ struct stmmac_tx_queue {
|
|||||||
u32 mss;
|
u32 mss;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct stmmac_rx_buffer {
|
||||||
|
struct page *page;
|
||||||
|
dma_addr_t addr;
|
||||||
|
};
|
||||||
|
|
||||||
struct stmmac_rx_queue {
|
struct stmmac_rx_queue {
|
||||||
|
u32 rx_count_frames;
|
||||||
u32 queue_index;
|
u32 queue_index;
|
||||||
|
struct page_pool *page_pool;
|
||||||
|
struct stmmac_rx_buffer *buf_pool;
|
||||||
struct stmmac_priv *priv_data;
|
struct stmmac_priv *priv_data;
|
||||||
struct dma_extended_desc *dma_erx;
|
struct dma_extended_desc *dma_erx;
|
||||||
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
|
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
|
||||||
struct sk_buff **rx_skbuff;
|
|
||||||
dma_addr_t *rx_skbuff_dma;
|
|
||||||
unsigned int cur_rx;
|
unsigned int cur_rx;
|
||||||
unsigned int dirty_rx;
|
unsigned int dirty_rx;
|
||||||
u32 rx_zeroc_thresh;
|
u32 rx_zeroc_thresh;
|
||||||
@ -110,6 +117,7 @@ struct stmmac_priv {
|
|||||||
/* Frequently used values are kept adjacent for cache effect */
|
/* Frequently used values are kept adjacent for cache effect */
|
||||||
u32 tx_coal_frames;
|
u32 tx_coal_frames;
|
||||||
u32 tx_coal_timer;
|
u32 tx_coal_timer;
|
||||||
|
u32 rx_coal_frames;
|
||||||
|
|
||||||
int tx_coalesce;
|
int tx_coalesce;
|
||||||
int hwts_tx_en;
|
int hwts_tx_en;
|
||||||
|
@ -701,8 +701,10 @@ static int stmmac_get_coalesce(struct net_device *dev,
|
|||||||
ec->tx_coalesce_usecs = priv->tx_coal_timer;
|
ec->tx_coalesce_usecs = priv->tx_coal_timer;
|
||||||
ec->tx_max_coalesced_frames = priv->tx_coal_frames;
|
ec->tx_max_coalesced_frames = priv->tx_coal_frames;
|
||||||
|
|
||||||
if (priv->use_riwt)
|
if (priv->use_riwt) {
|
||||||
|
ec->rx_max_coalesced_frames = priv->rx_coal_frames;
|
||||||
ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
|
ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -715,7 +717,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
|
|||||||
unsigned int rx_riwt;
|
unsigned int rx_riwt;
|
||||||
|
|
||||||
/* Check not supported parameters */
|
/* Check not supported parameters */
|
||||||
if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
|
if ((ec->rx_coalesce_usecs_irq) ||
|
||||||
(ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
|
(ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
|
||||||
(ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
|
(ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
|
||||||
(ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
|
(ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
|
||||||
@ -749,6 +751,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
|
|||||||
/* Only copy relevant parameters, ignore all others. */
|
/* Only copy relevant parameters, ignore all others. */
|
||||||
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
|
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
|
||||||
priv->tx_coal_timer = ec->tx_coalesce_usecs;
|
priv->tx_coal_timer = ec->tx_coalesce_usecs;
|
||||||
|
priv->rx_coal_frames = ec->rx_max_coalesced_frames;
|
||||||
priv->rx_riwt = rx_riwt;
|
priv->rx_riwt = rx_riwt;
|
||||||
stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
|
stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
|
||||||
|
|
||||||
|
@ -1197,26 +1197,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|||||||
int i, gfp_t flags, u32 queue)
|
int i, gfp_t flags, u32 queue)
|
||||||
{
|
{
|
||||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||||
struct sk_buff *skb;
|
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
||||||
|
|
||||||
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
|
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
|
||||||
if (!skb) {
|
if (!buf->page)
|
||||||
netdev_err(priv->dev,
|
|
||||||
"%s: Rx init fails; skb is NULL\n", __func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
rx_q->rx_skbuff[i] = skb;
|
|
||||||
rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
|
|
||||||
priv->dma_buf_sz,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
|
|
||||||
netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
|
|
||||||
dev_kfree_skb_any(skb);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
|
|
||||||
|
|
||||||
|
buf->addr = page_pool_get_dma_addr(buf->page);
|
||||||
|
stmmac_set_desc_addr(priv, p, buf->addr);
|
||||||
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
|
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
|
||||||
stmmac_init_desc3(priv, p);
|
stmmac_init_desc3(priv, p);
|
||||||
|
|
||||||
@ -1232,13 +1220,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|||||||
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
|
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
|
||||||
{
|
{
|
||||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||||
|
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
||||||
|
|
||||||
if (rx_q->rx_skbuff[i]) {
|
if (buf->page)
|
||||||
dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
|
page_pool_put_page(rx_q->page_pool, buf->page, false);
|
||||||
priv->dma_buf_sz, DMA_FROM_DEVICE);
|
buf->page = NULL;
|
||||||
dev_kfree_skb_any(rx_q->rx_skbuff[i]);
|
|
||||||
}
|
|
||||||
rx_q->rx_skbuff[i] = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1321,10 +1307,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|||||||
queue);
|
queue);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_init_rx_buffers;
|
goto err_init_rx_buffers;
|
||||||
|
|
||||||
netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
|
|
||||||
rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
|
|
||||||
(unsigned int)rx_q->rx_skbuff_dma[i]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rx_q->cur_rx = 0;
|
rx_q->cur_rx = 0;
|
||||||
@ -1498,8 +1480,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|||||||
sizeof(struct dma_extended_desc),
|
sizeof(struct dma_extended_desc),
|
||||||
rx_q->dma_erx, rx_q->dma_rx_phy);
|
rx_q->dma_erx, rx_q->dma_rx_phy);
|
||||||
|
|
||||||
kfree(rx_q->rx_skbuff_dma);
|
kfree(rx_q->buf_pool);
|
||||||
kfree(rx_q->rx_skbuff);
|
if (rx_q->page_pool) {
|
||||||
|
page_pool_request_shutdown(rx_q->page_pool);
|
||||||
|
page_pool_destroy(rx_q->page_pool);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1551,20 +1536,29 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|||||||
/* RX queues buffers and DMA */
|
/* RX queues buffers and DMA */
|
||||||
for (queue = 0; queue < rx_count; queue++) {
|
for (queue = 0; queue < rx_count; queue++) {
|
||||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||||
|
struct page_pool_params pp_params = { 0 };
|
||||||
|
|
||||||
rx_q->queue_index = queue;
|
rx_q->queue_index = queue;
|
||||||
rx_q->priv_data = priv;
|
rx_q->priv_data = priv;
|
||||||
|
|
||||||
rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
|
pp_params.flags = PP_FLAG_DMA_MAP;
|
||||||
sizeof(dma_addr_t),
|
pp_params.pool_size = DMA_RX_SIZE;
|
||||||
GFP_KERNEL);
|
pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
|
||||||
if (!rx_q->rx_skbuff_dma)
|
pp_params.nid = dev_to_node(priv->device);
|
||||||
goto err_dma;
|
pp_params.dev = priv->device;
|
||||||
|
pp_params.dma_dir = DMA_FROM_DEVICE;
|
||||||
|
|
||||||
rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
|
rx_q->page_pool = page_pool_create(&pp_params);
|
||||||
sizeof(struct sk_buff *),
|
if (IS_ERR(rx_q->page_pool)) {
|
||||||
GFP_KERNEL);
|
ret = PTR_ERR(rx_q->page_pool);
|
||||||
if (!rx_q->rx_skbuff)
|
rx_q->page_pool = NULL;
|
||||||
|
goto err_dma;
|
||||||
|
}
|
||||||
|
|
||||||
|
rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE,
|
||||||
|
sizeof(*rx_q->buf_pool),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!rx_q->buf_pool)
|
||||||
goto err_dma;
|
goto err_dma;
|
||||||
|
|
||||||
if (priv->extend_desc) {
|
if (priv->extend_desc) {
|
||||||
@ -2268,20 +2262,21 @@ static void stmmac_tx_timer(struct timer_list *t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* stmmac_init_tx_coalesce - init tx mitigation options.
|
* stmmac_init_coalesce - init mitigation options.
|
||||||
* @priv: driver private structure
|
* @priv: driver private structure
|
||||||
* Description:
|
* Description:
|
||||||
* This inits the transmit coalesce parameters: i.e. timer rate,
|
* This inits the coalesce parameters: i.e. timer rate,
|
||||||
* timer handler and default threshold used for enabling the
|
* timer handler and default threshold used for enabling the
|
||||||
* interrupt on completion bit.
|
* interrupt on completion bit.
|
||||||
*/
|
*/
|
||||||
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
|
static void stmmac_init_coalesce(struct stmmac_priv *priv)
|
||||||
{
|
{
|
||||||
u32 tx_channel_count = priv->plat->tx_queues_to_use;
|
u32 tx_channel_count = priv->plat->tx_queues_to_use;
|
||||||
u32 chan;
|
u32 chan;
|
||||||
|
|
||||||
priv->tx_coal_frames = STMMAC_TX_FRAMES;
|
priv->tx_coal_frames = STMMAC_TX_FRAMES;
|
||||||
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
|
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
|
||||||
|
priv->rx_coal_frames = STMMAC_RX_FRAMES;
|
||||||
|
|
||||||
for (chan = 0; chan < tx_channel_count; chan++) {
|
for (chan = 0; chan < tx_channel_count; chan++) {
|
||||||
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
|
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
|
||||||
@ -2651,7 +2646,7 @@ static int stmmac_open(struct net_device *dev)
|
|||||||
goto init_error;
|
goto init_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
stmmac_init_tx_coalesce(priv);
|
stmmac_init_coalesce(priv);
|
||||||
|
|
||||||
phylink_start(priv->phylink);
|
phylink_start(priv->phylink);
|
||||||
|
|
||||||
@ -3285,55 +3280,32 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
|||||||
int dirty = stmmac_rx_dirty(priv, queue);
|
int dirty = stmmac_rx_dirty(priv, queue);
|
||||||
unsigned int entry = rx_q->dirty_rx;
|
unsigned int entry = rx_q->dirty_rx;
|
||||||
|
|
||||||
int bfsize = priv->dma_buf_sz;
|
|
||||||
|
|
||||||
while (dirty-- > 0) {
|
while (dirty-- > 0) {
|
||||||
|
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
|
||||||
struct dma_desc *p;
|
struct dma_desc *p;
|
||||||
|
bool use_rx_wd;
|
||||||
|
|
||||||
if (priv->extend_desc)
|
if (priv->extend_desc)
|
||||||
p = (struct dma_desc *)(rx_q->dma_erx + entry);
|
p = (struct dma_desc *)(rx_q->dma_erx + entry);
|
||||||
else
|
else
|
||||||
p = rx_q->dma_rx + entry;
|
p = rx_q->dma_rx + entry;
|
||||||
|
|
||||||
if (likely(!rx_q->rx_skbuff[entry])) {
|
if (!buf->page) {
|
||||||
struct sk_buff *skb;
|
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
|
||||||
|
if (!buf->page)
|
||||||
skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
|
|
||||||
if (unlikely(!skb)) {
|
|
||||||
/* so for a while no zero-copy! */
|
|
||||||
rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
|
|
||||||
if (unlikely(net_ratelimit()))
|
|
||||||
dev_err(priv->device,
|
|
||||||
"fail to alloc skb entry %d\n",
|
|
||||||
entry);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
rx_q->rx_skbuff[entry] = skb;
|
|
||||||
rx_q->rx_skbuff_dma[entry] =
|
|
||||||
dma_map_single(priv->device, skb->data, bfsize,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
if (dma_mapping_error(priv->device,
|
|
||||||
rx_q->rx_skbuff_dma[entry])) {
|
|
||||||
netdev_err(priv->dev, "Rx DMA map failed\n");
|
|
||||||
dev_kfree_skb(skb);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
|
|
||||||
stmmac_refill_desc3(priv, rx_q, p);
|
|
||||||
|
|
||||||
if (rx_q->rx_zeroc_thresh > 0)
|
|
||||||
rx_q->rx_zeroc_thresh--;
|
|
||||||
|
|
||||||
netif_dbg(priv, rx_status, priv->dev,
|
|
||||||
"refill entry #%d\n", entry);
|
|
||||||
}
|
}
|
||||||
dma_wmb();
|
|
||||||
|
|
||||||
stmmac_set_rx_owner(priv, p, priv->use_riwt);
|
buf->addr = page_pool_get_dma_addr(buf->page);
|
||||||
|
stmmac_set_desc_addr(priv, p, buf->addr);
|
||||||
|
stmmac_refill_desc3(priv, rx_q, p);
|
||||||
|
|
||||||
|
rx_q->rx_count_frames++;
|
||||||
|
rx_q->rx_count_frames %= priv->rx_coal_frames;
|
||||||
|
use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
|
||||||
|
|
||||||
dma_wmb();
|
dma_wmb();
|
||||||
|
stmmac_set_rx_owner(priv, p, use_rx_wd);
|
||||||
|
|
||||||
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
|
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
|
||||||
}
|
}
|
||||||
@ -3358,9 +3330,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||||||
unsigned int next_entry = rx_q->cur_rx;
|
unsigned int next_entry = rx_q->cur_rx;
|
||||||
int coe = priv->hw->rx_csum;
|
int coe = priv->hw->rx_csum;
|
||||||
unsigned int count = 0;
|
unsigned int count = 0;
|
||||||
bool xmac;
|
|
||||||
|
|
||||||
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
|
|
||||||
|
|
||||||
if (netif_msg_rx_status(priv)) {
|
if (netif_msg_rx_status(priv)) {
|
||||||
void *rx_head;
|
void *rx_head;
|
||||||
@ -3374,11 +3343,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||||||
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
||||||
}
|
}
|
||||||
while (count < limit) {
|
while (count < limit) {
|
||||||
|
struct stmmac_rx_buffer *buf;
|
||||||
|
struct dma_desc *np, *p;
|
||||||
int entry, status;
|
int entry, status;
|
||||||
struct dma_desc *p;
|
|
||||||
struct dma_desc *np;
|
|
||||||
|
|
||||||
entry = next_entry;
|
entry = next_entry;
|
||||||
|
buf = &rx_q->buf_pool[entry];
|
||||||
|
|
||||||
if (priv->extend_desc)
|
if (priv->extend_desc)
|
||||||
p = (struct dma_desc *)(rx_q->dma_erx + entry);
|
p = (struct dma_desc *)(rx_q->dma_erx + entry);
|
||||||
@ -3408,20 +3378,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||||||
stmmac_rx_extended_status(priv, &priv->dev->stats,
|
stmmac_rx_extended_status(priv, &priv->dev->stats,
|
||||||
&priv->xstats, rx_q->dma_erx + entry);
|
&priv->xstats, rx_q->dma_erx + entry);
|
||||||
if (unlikely(status == discard_frame)) {
|
if (unlikely(status == discard_frame)) {
|
||||||
|
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
||||||
priv->dev->stats.rx_errors++;
|
priv->dev->stats.rx_errors++;
|
||||||
if (priv->hwts_rx_en && !priv->extend_desc) {
|
buf->page = NULL;
|
||||||
/* DESC2 & DESC3 will be overwritten by device
|
|
||||||
* with timestamp value, hence reinitialize
|
|
||||||
* them in stmmac_rx_refill() function so that
|
|
||||||
* device can reuse it.
|
|
||||||
*/
|
|
||||||
dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
|
|
||||||
rx_q->rx_skbuff[entry] = NULL;
|
|
||||||
dma_unmap_single(priv->device,
|
|
||||||
rx_q->rx_skbuff_dma[entry],
|
|
||||||
priv->dma_buf_sz,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int frame_len;
|
int frame_len;
|
||||||
@ -3461,58 +3420,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||||||
frame_len, status);
|
frame_len, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The zero-copy is always used for all the sizes
|
skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
|
||||||
* in case of GMAC4 because it needs
|
if (unlikely(!skb)) {
|
||||||
* to refill the used descriptors, always.
|
priv->dev->stats.rx_dropped++;
|
||||||
*/
|
continue;
|
||||||
if (unlikely(!xmac &&
|
|
||||||
((frame_len < priv->rx_copybreak) ||
|
|
||||||
stmmac_rx_threshold_count(rx_q)))) {
|
|
||||||
skb = netdev_alloc_skb_ip_align(priv->dev,
|
|
||||||
frame_len);
|
|
||||||
if (unlikely(!skb)) {
|
|
||||||
if (net_ratelimit())
|
|
||||||
dev_warn(priv->device,
|
|
||||||
"packet dropped\n");
|
|
||||||
priv->dev->stats.rx_dropped++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_sync_single_for_cpu(priv->device,
|
|
||||||
rx_q->rx_skbuff_dma
|
|
||||||
[entry], frame_len,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
skb_copy_to_linear_data(skb,
|
|
||||||
rx_q->
|
|
||||||
rx_skbuff[entry]->data,
|
|
||||||
frame_len);
|
|
||||||
|
|
||||||
skb_put(skb, frame_len);
|
|
||||||
dma_sync_single_for_device(priv->device,
|
|
||||||
rx_q->rx_skbuff_dma
|
|
||||||
[entry], frame_len,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
} else {
|
|
||||||
skb = rx_q->rx_skbuff[entry];
|
|
||||||
if (unlikely(!skb)) {
|
|
||||||
if (net_ratelimit())
|
|
||||||
netdev_err(priv->dev,
|
|
||||||
"%s: Inconsistent Rx chain\n",
|
|
||||||
priv->dev->name);
|
|
||||||
priv->dev->stats.rx_dropped++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
prefetch(skb->data - NET_IP_ALIGN);
|
|
||||||
rx_q->rx_skbuff[entry] = NULL;
|
|
||||||
rx_q->rx_zeroc_thresh++;
|
|
||||||
|
|
||||||
skb_put(skb, frame_len);
|
|
||||||
dma_unmap_single(priv->device,
|
|
||||||
rx_q->rx_skbuff_dma[entry],
|
|
||||||
priv->dma_buf_sz,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dma_sync_single_for_cpu(priv->device, buf->addr,
|
||||||
|
frame_len, DMA_FROM_DEVICE);
|
||||||
|
skb_copy_to_linear_data(skb, page_address(buf->page),
|
||||||
|
frame_len);
|
||||||
|
skb_put(skb, frame_len);
|
||||||
|
dma_sync_single_for_device(priv->device, buf->addr,
|
||||||
|
frame_len, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (netif_msg_pktdata(priv)) {
|
if (netif_msg_pktdata(priv)) {
|
||||||
netdev_dbg(priv->dev, "frame received (%dbytes)",
|
netdev_dbg(priv->dev, "frame received (%dbytes)",
|
||||||
frame_len);
|
frame_len);
|
||||||
@ -3532,6 +3453,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||||||
|
|
||||||
napi_gro_receive(&ch->rx_napi, skb);
|
napi_gro_receive(&ch->rx_napi, skb);
|
||||||
|
|
||||||
|
/* Data payload copied into SKB, page ready for recycle */
|
||||||
|
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
||||||
|
buf->page = NULL;
|
||||||
|
|
||||||
priv->dev->stats.rx_packets++;
|
priv->dev->stats.rx_packets++;
|
||||||
priv->dev->stats.rx_bytes += frame_len;
|
priv->dev->stats.rx_bytes += frame_len;
|
||||||
}
|
}
|
||||||
@ -4631,7 +4556,7 @@ int stmmac_resume(struct device *dev)
|
|||||||
stmmac_clear_descriptors(priv);
|
stmmac_clear_descriptors(priv);
|
||||||
|
|
||||||
stmmac_hw_setup(ndev, false);
|
stmmac_hw_setup(ndev, false);
|
||||||
stmmac_init_tx_coalesce(priv);
|
stmmac_init_coalesce(priv);
|
||||||
stmmac_set_rx_mode(ndev);
|
stmmac_set_rx_mode(ndev);
|
||||||
|
|
||||||
stmmac_enable_all_queues(priv);
|
stmmac_enable_all_queues(priv);
|
||||||
|
Loading…
Reference in New Issue
Block a user