net: korina: Use DMA API

Instead of messing with MIPS specific macros use DMA API for mapping
descriptors and skbs.

Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Thomas Bogendoerfer 2021-04-19 00:19:43 +02:00 committed by David S. Miller
parent 0fe632471a
commit 0fc96939a9

View File

@ -110,10 +110,15 @@ struct korina_private {
struct dma_reg __iomem *tx_dma_regs; struct dma_reg __iomem *tx_dma_regs;
struct dma_desc *td_ring; /* transmit descriptor ring */ struct dma_desc *td_ring; /* transmit descriptor ring */
struct dma_desc *rd_ring; /* receive descriptor ring */ struct dma_desc *rd_ring; /* receive descriptor ring */
dma_addr_t td_dma;
dma_addr_t rd_dma;
struct sk_buff *tx_skb[KORINA_NUM_TDS]; struct sk_buff *tx_skb[KORINA_NUM_TDS];
struct sk_buff *rx_skb[KORINA_NUM_RDS]; struct sk_buff *rx_skb[KORINA_NUM_RDS];
dma_addr_t rx_skb_dma[KORINA_NUM_RDS];
dma_addr_t tx_skb_dma[KORINA_NUM_TDS];
int rx_next_done; int rx_next_done;
int rx_chain_head; int rx_chain_head;
int rx_chain_tail; int rx_chain_tail;
@ -138,10 +143,21 @@ struct korina_private {
struct mii_if_info mii_if; struct mii_if_info mii_if;
struct work_struct restart_task; struct work_struct restart_task;
struct net_device *dev; struct net_device *dev;
struct device *dmadev;
}; };
extern unsigned int idt_cpu_freq; extern unsigned int idt_cpu_freq;
static dma_addr_t korina_tx_dma(struct korina_private *lp, int idx)
{
return lp->td_dma + (idx * sizeof(struct dma_desc));
}
static dma_addr_t korina_rx_dma(struct korina_private *lp, int idx)
{
return lp->rd_dma + (idx * sizeof(struct dma_desc));
}
static inline void korina_abort_dma(struct net_device *dev, static inline void korina_abort_dma(struct net_device *dev,
struct dma_reg *ch) struct dma_reg *ch)
{ {
@ -176,14 +192,17 @@ static void korina_abort_rx(struct net_device *dev)
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{ {
struct korina_private *lp = netdev_priv(dev); struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
u32 length;
u32 chain_prev, chain_next; u32 chain_prev, chain_next;
unsigned long flags;
struct dma_desc *td; struct dma_desc *td;
dma_addr_t ca;
u32 length;
int idx;
spin_lock_irqsave(&lp->lock, flags); spin_lock_irqsave(&lp->lock, flags);
td = &lp->td_ring[lp->tx_chain_tail]; idx = lp->tx_chain_tail;
td = &lp->td_ring[idx];
/* stop queue when full, drop pkts if queue already full */ /* stop queue when full, drop pkts if queue already full */
if (lp->tx_count >= (KORINA_NUM_TDS - 2)) { if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
@ -191,26 +210,26 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
if (lp->tx_count == (KORINA_NUM_TDS - 2)) if (lp->tx_count == (KORINA_NUM_TDS - 2))
netif_stop_queue(dev); netif_stop_queue(dev);
else { else
dev->stats.tx_dropped++; goto drop_packet;
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
} }
lp->tx_count++; lp->tx_count++;
lp->tx_skb[lp->tx_chain_tail] = skb; lp->tx_skb[idx] = skb;
length = skb->len; length = skb->len;
dma_cache_wback((u32)skb->data, skb->len);
/* Setup the transmit descriptor. */ /* Setup the transmit descriptor. */
td->ca = CPHYSADDR(skb->data); ca = dma_map_single(lp->dmadev, skb->data, length, DMA_TO_DEVICE);
chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK; if (dma_mapping_error(lp->dmadev, ca))
chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK; goto drop_packet;
lp->tx_skb_dma[idx] = ca;
td->ca = ca;
chain_prev = (idx - 1) & KORINA_TDS_MASK;
chain_next = (idx + 1) & KORINA_TDS_MASK;
if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
if (lp->tx_chain_status == desc_empty) { if (lp->tx_chain_status == desc_empty) {
@ -220,8 +239,8 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
/* Move tail */ /* Move tail */
lp->tx_chain_tail = chain_next; lp->tx_chain_tail = chain_next;
/* Write to NDPTR */ /* Write to NDPTR */
writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), writel(korina_tx_dma(lp, lp->tx_chain_head),
&lp->tx_dma_regs->dmandptr); &lp->tx_dma_regs->dmandptr);
/* Move head to tail */ /* Move head to tail */
lp->tx_chain_head = lp->tx_chain_tail; lp->tx_chain_head = lp->tx_chain_tail;
} else { } else {
@ -232,12 +251,12 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
lp->td_ring[chain_prev].control &= lp->td_ring[chain_prev].control &=
~DMA_DESC_COF; ~DMA_DESC_COF;
/* Link to prev */ /* Link to prev */
lp->td_ring[chain_prev].link = CPHYSADDR(td); lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
/* Move tail */ /* Move tail */
lp->tx_chain_tail = chain_next; lp->tx_chain_tail = chain_next;
/* Write to NDPTR */ /* Write to NDPTR */
writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), writel(korina_tx_dma(lp, lp->tx_chain_head),
&(lp->tx_dma_regs->dmandptr)); &lp->tx_dma_regs->dmandptr);
/* Move head to tail */ /* Move head to tail */
lp->tx_chain_head = lp->tx_chain_tail; lp->tx_chain_head = lp->tx_chain_tail;
lp->tx_chain_status = desc_empty; lp->tx_chain_status = desc_empty;
@ -256,7 +275,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
DMA_DESC_COF | DMA_DESC_IOF; DMA_DESC_COF | DMA_DESC_IOF;
lp->td_ring[chain_prev].control &= lp->td_ring[chain_prev].control &=
~DMA_DESC_COF; ~DMA_DESC_COF;
lp->td_ring[chain_prev].link = CPHYSADDR(td); lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
lp->tx_chain_tail = chain_next; lp->tx_chain_tail = chain_next;
} }
} }
@ -264,6 +283,13 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev); netif_trans_update(dev);
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
drop_packet:
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -344,8 +370,8 @@ static int korina_rx(struct net_device *dev, int limit)
struct korina_private *lp = netdev_priv(dev); struct korina_private *lp = netdev_priv(dev);
struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
struct sk_buff *skb, *skb_new; struct sk_buff *skb, *skb_new;
u8 *pkt_buf;
u32 devcs, pkt_len, dmas; u32 devcs, pkt_len, dmas;
dma_addr_t ca;
int count; int count;
for (count = 0; count < limit; count++) { for (count = 0; count < limit; count++) {
@ -381,20 +407,22 @@ static int korina_rx(struct net_device *dev, int limit)
goto next; goto next;
} }
pkt_len = RCVPKT_LENGTH(devcs);
/* must be the (first and) last
* descriptor then */
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
/* invalidate the cache */
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
/* Malloc up new buffer. */ /* Malloc up new buffer. */
skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
if (!skb_new) if (!skb_new)
break; break;
ca = dma_map_single(lp->dmadev, skb_new->data, KORINA_RBSIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(lp->dmadev, ca)) {
dev_kfree_skb_any(skb_new);
break;
}
pkt_len = RCVPKT_LENGTH(devcs);
dma_unmap_single(lp->dmadev, lp->rx_skb_dma[lp->rx_next_done],
pkt_len, DMA_FROM_DEVICE);
/* Do not count the CRC */ /* Do not count the CRC */
skb_put(skb, pkt_len - 4); skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
@ -409,15 +437,13 @@ static int korina_rx(struct net_device *dev, int limit)
dev->stats.multicast++; dev->stats.multicast++;
lp->rx_skb[lp->rx_next_done] = skb_new; lp->rx_skb[lp->rx_next_done] = skb_new;
lp->rx_skb_dma[lp->rx_next_done] = ca;
next: next:
rd->devcs = 0; rd->devcs = 0;
/* Restore descriptor's curr_addr */ /* Restore descriptor's curr_addr */
if (skb_new) rd->ca = lp->rx_skb_dma[lp->rx_next_done];
rd->ca = CPHYSADDR(skb_new->data);
else
rd->ca = CPHYSADDR(skb->data);
rd->control = DMA_COUNT(KORINA_RBSIZE) | rd->control = DMA_COUNT(KORINA_RBSIZE) |
DMA_DESC_COD | DMA_DESC_IOD; DMA_DESC_COD | DMA_DESC_IOD;
@ -438,9 +464,9 @@ next:
lp->dma_halt_cnt++; lp->dma_halt_cnt++;
rd->devcs = 0; rd->devcs = 0;
skb = lp->rx_skb[lp->rx_next_done]; rd->ca = lp->rx_skb_dma[lp->rx_next_done];
rd->ca = CPHYSADDR(skb->data); writel(korina_rx_dma(lp, rd - lp->rd_ring),
writel(CPHYSADDR(rd), &lp->rx_dma_regs->dmandptr); &lp->rx_dma_regs->dmandptr);
} }
return count; return count;
@ -563,6 +589,10 @@ static void korina_tx(struct net_device *dev)
/* We must always free the original skb */ /* We must always free the original skb */
if (lp->tx_skb[lp->tx_next_done]) { if (lp->tx_skb[lp->tx_next_done]) {
dma_unmap_single(lp->dmadev,
lp->tx_skb_dma[lp->tx_next_done],
lp->tx_skb[lp->tx_next_done]->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]); dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
lp->tx_skb[lp->tx_next_done] = NULL; lp->tx_skb[lp->tx_next_done] = NULL;
} }
@ -609,8 +639,8 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
if (lp->tx_chain_status == desc_filled && if (lp->tx_chain_status == desc_filled &&
(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), writel(korina_tx_dma(lp, lp->tx_chain_head),
&(lp->tx_dma_regs->dmandptr)); &lp->tx_dma_regs->dmandptr);
lp->tx_chain_status = desc_empty; lp->tx_chain_status = desc_empty;
lp->tx_chain_head = lp->tx_chain_tail; lp->tx_chain_head = lp->tx_chain_tail;
netif_trans_update(dev); netif_trans_update(dev);
@ -730,6 +760,7 @@ static int korina_alloc_ring(struct net_device *dev)
{ {
struct korina_private *lp = netdev_priv(dev); struct korina_private *lp = netdev_priv(dev);
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t ca;
int i; int i;
/* Initialize the transmit descriptors */ /* Initialize the transmit descriptors */
@ -752,13 +783,18 @@ static int korina_alloc_ring(struct net_device *dev)
lp->rd_ring[i].control = DMA_DESC_IOD | lp->rd_ring[i].control = DMA_DESC_IOD |
DMA_COUNT(KORINA_RBSIZE); DMA_COUNT(KORINA_RBSIZE);
lp->rd_ring[i].devcs = 0; lp->rd_ring[i].devcs = 0;
lp->rd_ring[i].ca = CPHYSADDR(skb->data); ca = dma_map_single(lp->dmadev, skb->data, KORINA_RBSIZE,
lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); DMA_FROM_DEVICE);
if (dma_mapping_error(lp->dmadev, ca))
return -ENOMEM;
lp->rd_ring[i].ca = ca;
lp->rx_skb_dma[i] = ca;
lp->rd_ring[i].link = korina_rx_dma(lp, i + 1);
} }
/* loop back receive descriptors, so the last /* loop back receive descriptors, so the last
* descriptor points to the first one */ * descriptor points to the first one */
lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]); lp->rd_ring[i - 1].link = lp->rd_dma;
lp->rd_ring[i - 1].control |= DMA_DESC_COD; lp->rd_ring[i - 1].control |= DMA_DESC_COD;
lp->rx_next_done = 0; lp->rx_next_done = 0;
@ -776,16 +812,22 @@ static void korina_free_ring(struct net_device *dev)
for (i = 0; i < KORINA_NUM_RDS; i++) { for (i = 0; i < KORINA_NUM_RDS; i++) {
lp->rd_ring[i].control = 0; lp->rd_ring[i].control = 0;
if (lp->rx_skb[i]) if (lp->rx_skb[i]) {
dma_unmap_single(lp->dmadev, lp->rx_skb_dma[i],
KORINA_RBSIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(lp->rx_skb[i]); dev_kfree_skb_any(lp->rx_skb[i]);
lp->rx_skb[i] = NULL; lp->rx_skb[i] = NULL;
}
} }
for (i = 0; i < KORINA_NUM_TDS; i++) { for (i = 0; i < KORINA_NUM_TDS; i++) {
lp->td_ring[i].control = 0; lp->td_ring[i].control = 0;
if (lp->tx_skb[i]) if (lp->tx_skb[i]) {
dma_unmap_single(lp->dmadev, lp->tx_skb_dma[i],
lp->tx_skb[i]->len, DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skb[i]); dev_kfree_skb_any(lp->tx_skb[i]);
lp->tx_skb[i] = NULL; lp->tx_skb[i] = NULL;
}
} }
} }
@ -818,7 +860,7 @@ static int korina_init(struct net_device *dev)
writel(0, &lp->rx_dma_regs->dmas); writel(0, &lp->rx_dma_regs->dmas);
/* Start Rx DMA */ /* Start Rx DMA */
writel(0, &lp->rx_dma_regs->dmandptr); writel(0, &lp->rx_dma_regs->dmandptr);
writel(CPHYSADDR(&lp->rd_ring[0]), &lp->rx_dma_regs->dmadptr); writel(korina_rx_dma(lp, 0), &lp->rx_dma_regs->dmadptr);
writel(readl(&lp->tx_dma_regs->dmasm) & writel(readl(&lp->tx_dma_regs->dmasm) &
~(DMA_STAT_FINI | DMA_STAT_ERR), ~(DMA_STAT_FINI | DMA_STAT_ERR),
@ -1053,21 +1095,21 @@ static int korina_probe(struct platform_device *pdev)
} }
lp->tx_dma_regs = p; lp->tx_dma_regs = p;
lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL); lp->td_ring = dmam_alloc_coherent(&pdev->dev, TD_RING_SIZE,
&lp->td_dma, GFP_KERNEL);
if (!lp->td_ring) if (!lp->td_ring)
return -ENOMEM; return -ENOMEM;
dma_cache_inv((unsigned long)(lp->td_ring), lp->rd_ring = dmam_alloc_coherent(&pdev->dev, RD_RING_SIZE,
TD_RING_SIZE + RD_RING_SIZE); &lp->rd_dma, GFP_KERNEL);
if (!lp->rd_ring)
/* now convert TD_RING pointer to KSEG1 */ return -ENOMEM;
lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
spin_lock_init(&lp->lock); spin_lock_init(&lp->lock);
/* just use the rx dma irq */ /* just use the rx dma irq */
dev->irq = lp->rx_irq; dev->irq = lp->rx_irq;
lp->dev = dev; lp->dev = dev;
lp->dmadev = &pdev->dev;
dev->netdev_ops = &korina_netdev_ops; dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops; dev->ethtool_ops = &netdev_ethtool_ops;
@ -1085,7 +1127,6 @@ static int korina_probe(struct platform_device *pdev)
if (rc < 0) { if (rc < 0) {
printk(KERN_ERR DRV_NAME printk(KERN_ERR DRV_NAME
": cannot register net device: %d\n", rc); ": cannot register net device: %d\n", rc);
kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
return rc; return rc;
} }
timer_setup(&lp->media_check_timer, korina_poll_media, 0); timer_setup(&lp->media_check_timer, korina_poll_media, 0);
@ -1100,9 +1141,6 @@ static int korina_probe(struct platform_device *pdev)
static int korina_remove(struct platform_device *pdev) static int korina_remove(struct platform_device *pdev)
{ {
struct korina_device *bif = platform_get_drvdata(pdev); struct korina_device *bif = platform_get_drvdata(pdev);
struct korina_private *lp = netdev_priv(bif->dev);
kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
unregister_netdev(bif->dev); unregister_netdev(bif->dev);