2006-01-20 17:03:34 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2005-2006 Atmel Corporation
|
|
|
|
*
|
2013-07-08 15:37:19 +08:00
|
|
|
* SPDX-License-Identifier: GPL-2.0+
|
2006-01-20 17:03:34 +08:00
|
|
|
*/
|
|
|
|
#include <common.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The u-boot networking stack is a little weird. It seems like the
|
|
|
|
* networking core allocates receive buffers up front without any
|
|
|
|
* regard to the hardware that's supposed to actually receive those
|
|
|
|
* packets.
|
|
|
|
*
|
|
|
|
* The MACB receives packets into 128-byte receive buffers, so the
|
|
|
|
* buffers allocated by the core isn't very practical to use. We'll
|
|
|
|
* allocate our own, but we need one such buffer in case a packet
|
|
|
|
* wraps around the DMA ring so that we have to copy it.
|
|
|
|
*
|
2008-10-16 21:01:15 +08:00
|
|
|
* Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
|
2006-01-20 17:03:34 +08:00
|
|
|
* configuration header. This way, the core allocates one RX buffer
|
|
|
|
* and one TX buffer, each of which can hold a ethernet packet of
|
|
|
|
* maximum size.
|
|
|
|
*
|
|
|
|
* For some reason, the networking core unconditionally specifies a
|
|
|
|
* 32-byte packet "alignment" (which really should be called
|
|
|
|
* "padding"). MACB shouldn't need that, but we'll refrain from any
|
|
|
|
* core modifications here...
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <net.h>
|
2008-09-01 13:22:04 +08:00
|
|
|
#include <netdev.h>
|
2006-01-20 17:03:34 +08:00
|
|
|
#include <malloc.h>
|
2009-12-17 21:07:15 +08:00
|
|
|
#include <miiphy.h>
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
#include <linux/mii.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/dma-mapping.h>
|
|
|
|
#include <asm/arch/clk.h>
|
2013-08-19 10:35:47 +08:00
|
|
|
#include <asm-generic/errno.h>
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
#include "macb.h"
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
#define MACB_RX_BUFFER_SIZE 4096
|
|
|
|
#define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128)
|
|
|
|
#define MACB_TX_RING_SIZE 16
|
|
|
|
#define MACB_TX_TIMEOUT 1000
|
|
|
|
#define MACB_AUTONEG_TIMEOUT 5000000
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
struct macb_dma_desc {
|
|
|
|
u32 addr;
|
|
|
|
u32 ctrl;
|
|
|
|
};
|
|
|
|
|
2014-05-27 16:31:05 +08:00
|
|
|
#define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc))
|
|
|
|
#define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE))
|
|
|
|
#define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE))
|
2015-06-03 16:45:44 +08:00
|
|
|
#define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1))
|
2014-05-27 16:31:05 +08:00
|
|
|
|
2006-01-20 17:03:34 +08:00
|
|
|
#define RXADDR_USED 0x00000001
|
|
|
|
#define RXADDR_WRAP 0x00000002
|
|
|
|
|
|
|
|
#define RXBUF_FRMLEN_MASK 0x00000fff
|
|
|
|
#define RXBUF_FRAME_START 0x00004000
|
|
|
|
#define RXBUF_FRAME_END 0x00008000
|
|
|
|
#define RXBUF_TYPEID_MATCH 0x00400000
|
|
|
|
#define RXBUF_ADDR4_MATCH 0x00800000
|
|
|
|
#define RXBUF_ADDR3_MATCH 0x01000000
|
|
|
|
#define RXBUF_ADDR2_MATCH 0x02000000
|
|
|
|
#define RXBUF_ADDR1_MATCH 0x04000000
|
|
|
|
#define RXBUF_BROADCAST 0x80000000
|
|
|
|
|
|
|
|
#define TXBUF_FRMLEN_MASK 0x000007ff
|
|
|
|
#define TXBUF_FRAME_END 0x00008000
|
|
|
|
#define TXBUF_NOCRC 0x00010000
|
|
|
|
#define TXBUF_EXHAUSTED 0x08000000
|
|
|
|
#define TXBUF_UNDERRUN 0x10000000
|
|
|
|
#define TXBUF_MAXRETRY 0x20000000
|
|
|
|
#define TXBUF_WRAP 0x40000000
|
|
|
|
#define TXBUF_USED 0x80000000
|
|
|
|
|
|
|
|
struct macb_device {
|
|
|
|
void *regs;
|
|
|
|
|
|
|
|
unsigned int rx_tail;
|
|
|
|
unsigned int tx_head;
|
|
|
|
unsigned int tx_tail;
|
2016-05-05 21:28:09 +08:00
|
|
|
unsigned int next_rx_tail;
|
|
|
|
bool wrapped;
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
void *rx_buffer;
|
|
|
|
void *tx_buffer;
|
|
|
|
struct macb_dma_desc *rx_ring;
|
|
|
|
struct macb_dma_desc *tx_ring;
|
|
|
|
|
|
|
|
unsigned long rx_buffer_dma;
|
|
|
|
unsigned long rx_ring_dma;
|
|
|
|
unsigned long tx_ring_dma;
|
|
|
|
|
2015-06-03 16:45:44 +08:00
|
|
|
struct macb_dma_desc *dummy_desc;
|
|
|
|
unsigned long dummy_desc_dma;
|
|
|
|
|
2006-01-20 17:03:34 +08:00
|
|
|
const struct device *dev;
|
|
|
|
struct eth_device netdev;
|
|
|
|
unsigned short phy_addr;
|
2013-04-24 15:59:27 +08:00
|
|
|
struct mii_dev *bus;
|
2006-01-20 17:03:34 +08:00
|
|
|
};
|
|
|
|
#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
|
|
|
|
|
2013-04-24 15:59:28 +08:00
|
|
|
static int macb_is_gem(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:50:34 +08:00
|
|
|
#ifndef cpu_is_sama5d2
|
|
|
|
#define cpu_is_sama5d2() 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef cpu_is_sama5d4
|
|
|
|
#define cpu_is_sama5d4() 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int gem_is_gigabit_capable(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
/*
|
2016-05-04 16:47:31 +08:00
|
|
|
* The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
|
2015-12-16 21:50:34 +08:00
|
|
|
* configured to support only 10/100.
|
|
|
|
*/
|
|
|
|
return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
|
|
|
|
}
|
|
|
|
|
2006-01-20 17:03:34 +08:00
|
|
|
static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
|
|
|
|
{
|
|
|
|
unsigned long netctl;
|
|
|
|
unsigned long netstat;
|
|
|
|
unsigned long frame;
|
|
|
|
|
|
|
|
netctl = macb_readl(macb, NCR);
|
|
|
|
netctl |= MACB_BIT(MPE);
|
|
|
|
macb_writel(macb, NCR, netctl);
|
|
|
|
|
|
|
|
frame = (MACB_BF(SOF, 1)
|
|
|
|
| MACB_BF(RW, 1)
|
|
|
|
| MACB_BF(PHYA, macb->phy_addr)
|
|
|
|
| MACB_BF(REGA, reg)
|
|
|
|
| MACB_BF(CODE, 2)
|
|
|
|
| MACB_BF(DATA, value));
|
|
|
|
macb_writel(macb, MAN, frame);
|
|
|
|
|
|
|
|
do {
|
|
|
|
netstat = macb_readl(macb, NSR);
|
|
|
|
} while (!(netstat & MACB_BIT(IDLE)));
|
|
|
|
|
|
|
|
netctl = macb_readl(macb, NCR);
|
|
|
|
netctl &= ~MACB_BIT(MPE);
|
|
|
|
macb_writel(macb, NCR, netctl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
|
|
|
|
{
|
|
|
|
unsigned long netctl;
|
|
|
|
unsigned long netstat;
|
|
|
|
unsigned long frame;
|
|
|
|
|
|
|
|
netctl = macb_readl(macb, NCR);
|
|
|
|
netctl |= MACB_BIT(MPE);
|
|
|
|
macb_writel(macb, NCR, netctl);
|
|
|
|
|
|
|
|
frame = (MACB_BF(SOF, 1)
|
|
|
|
| MACB_BF(RW, 2)
|
|
|
|
| MACB_BF(PHYA, macb->phy_addr)
|
|
|
|
| MACB_BF(REGA, reg)
|
|
|
|
| MACB_BF(CODE, 2));
|
|
|
|
macb_writel(macb, MAN, frame);
|
|
|
|
|
|
|
|
do {
|
|
|
|
netstat = macb_readl(macb, NSR);
|
|
|
|
} while (!(netstat & MACB_BIT(IDLE)));
|
|
|
|
|
|
|
|
frame = macb_readl(macb, MAN);
|
|
|
|
|
|
|
|
netctl = macb_readl(macb, NCR);
|
|
|
|
netctl &= ~MACB_BIT(MPE);
|
|
|
|
macb_writel(macb, NCR, netctl);
|
|
|
|
|
|
|
|
return MACB_BFEXT(DATA, frame);
|
|
|
|
}
|
|
|
|
|
2013-06-25 08:06:38 +08:00
|
|
|
void __weak arch_get_mdio_control(const char *name)
|
2012-12-13 19:52:52 +08:00
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-24 15:59:27 +08:00
|
|
|
#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
|
2009-12-17 21:07:15 +08:00
|
|
|
|
2010-07-28 06:35:08 +08:00
|
|
|
int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value)
|
2009-12-17 21:07:15 +08:00
|
|
|
{
|
|
|
|
struct eth_device *dev = eth_get_dev_by_name(devname);
|
|
|
|
struct macb_device *macb = to_macb(dev);
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
if (macb->phy_addr != phy_adr)
|
2009-12-17 21:07:15 +08:00
|
|
|
return -1;
|
|
|
|
|
2012-12-13 19:52:52 +08:00
|
|
|
arch_get_mdio_control(devname);
|
2009-12-17 21:07:15 +08:00
|
|
|
*value = macb_mdio_read(macb, reg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-07-28 06:35:08 +08:00
|
|
|
int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value)
|
2009-12-17 21:07:15 +08:00
|
|
|
{
|
|
|
|
struct eth_device *dev = eth_get_dev_by_name(devname);
|
|
|
|
struct macb_device *macb = to_macb(dev);
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
if (macb->phy_addr != phy_adr)
|
2009-12-17 21:07:15 +08:00
|
|
|
return -1;
|
|
|
|
|
2012-12-13 19:52:52 +08:00
|
|
|
arch_get_mdio_control(devname);
|
2009-12-17 21:07:15 +08:00
|
|
|
macb_mdio_write(macb, reg, value);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-05-27 16:31:05 +08:00
|
|
|
#define RX 1
|
|
|
|
#define TX 0
|
|
|
|
static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
|
|
|
|
{
|
|
|
|
if (rx)
|
|
|
|
invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
|
|
|
|
MACB_RX_DMA_DESC_SIZE);
|
|
|
|
else
|
|
|
|
invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
|
|
|
|
MACB_TX_DMA_DESC_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
|
|
|
|
{
|
|
|
|
if (rx)
|
|
|
|
flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
|
|
|
|
MACB_RX_DMA_DESC_SIZE);
|
|
|
|
else
|
|
|
|
flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
|
|
|
|
MACB_TX_DMA_DESC_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void macb_flush_rx_buffer(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
|
|
|
|
MACB_RX_BUFFER_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
|
|
|
|
MACB_RX_BUFFER_SIZE);
|
|
|
|
}
|
2009-12-17 21:07:15 +08:00
|
|
|
|
2007-07-10 06:30:01 +08:00
|
|
|
#if defined(CONFIG_CMD_NET)
|
2006-01-20 17:03:34 +08:00
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static int _macb_send(struct macb_device *macb, const char *name, void *packet,
|
|
|
|
int length)
|
2006-01-20 17:03:34 +08:00
|
|
|
{
|
|
|
|
unsigned long paddr, ctrl;
|
|
|
|
unsigned int tx_head = macb->tx_head;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
ctrl = length & TXBUF_FRMLEN_MASK;
|
|
|
|
ctrl |= TXBUF_FRAME_END;
|
2014-05-27 04:55:18 +08:00
|
|
|
if (tx_head == (MACB_TX_RING_SIZE - 1)) {
|
2006-01-20 17:03:34 +08:00
|
|
|
ctrl |= TXBUF_WRAP;
|
|
|
|
macb->tx_head = 0;
|
2014-05-27 04:55:18 +08:00
|
|
|
} else {
|
2006-01-20 17:03:34 +08:00
|
|
|
macb->tx_head++;
|
2014-05-27 04:55:18 +08:00
|
|
|
}
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
macb->tx_ring[tx_head].ctrl = ctrl;
|
|
|
|
macb->tx_ring[tx_head].addr = paddr;
|
2007-05-02 19:22:38 +08:00
|
|
|
barrier();
|
2014-05-27 16:31:05 +08:00
|
|
|
macb_flush_ring_desc(macb, TX);
|
|
|
|
/* Do we need check paddr and length is dcache line aligned? */
|
|
|
|
flush_dcache_range(paddr, paddr + length);
|
2006-01-20 17:03:34 +08:00
|
|
|
macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I guess this is necessary because the networking core may
|
|
|
|
* re-use the transmit buffer as soon as we return...
|
|
|
|
*/
|
2014-05-27 04:55:18 +08:00
|
|
|
for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
|
2007-05-02 19:22:38 +08:00
|
|
|
barrier();
|
2014-05-27 16:31:05 +08:00
|
|
|
macb_invalidate_ring_desc(macb, TX);
|
2007-05-02 19:22:38 +08:00
|
|
|
ctrl = macb->tx_ring[tx_head].ctrl;
|
|
|
|
if (ctrl & TXBUF_USED)
|
2006-01-20 17:03:34 +08:00
|
|
|
break;
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_unmap_single(packet, length, paddr);
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
if (i <= MACB_TX_TIMEOUT) {
|
2006-01-20 17:03:34 +08:00
|
|
|
if (ctrl & TXBUF_UNDERRUN)
|
2016-05-05 21:28:09 +08:00
|
|
|
printf("%s: TX underrun\n", name);
|
2006-01-20 17:03:34 +08:00
|
|
|
if (ctrl & TXBUF_EXHAUSTED)
|
2016-05-05 21:28:09 +08:00
|
|
|
printf("%s: TX buffers exhausted in mid frame\n", name);
|
2007-05-02 19:22:38 +08:00
|
|
|
} else {
|
2016-05-05 21:28:09 +08:00
|
|
|
printf("%s: TX timeout\n", name);
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No one cares anyway */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void reclaim_rx_buffers(struct macb_device *macb,
|
|
|
|
unsigned int new_tail)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
i = macb->rx_tail;
|
2014-05-27 16:31:05 +08:00
|
|
|
|
|
|
|
macb_invalidate_ring_desc(macb, RX);
|
2006-01-20 17:03:34 +08:00
|
|
|
while (i > new_tail) {
|
|
|
|
macb->rx_ring[i].addr &= ~RXADDR_USED;
|
|
|
|
i++;
|
2014-05-27 04:55:18 +08:00
|
|
|
if (i > MACB_RX_RING_SIZE)
|
2006-01-20 17:03:34 +08:00
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (i < new_tail) {
|
|
|
|
macb->rx_ring[i].addr &= ~RXADDR_USED;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
2007-05-02 19:22:38 +08:00
|
|
|
barrier();
|
2014-05-27 16:31:05 +08:00
|
|
|
macb_flush_ring_desc(macb, RX);
|
2006-01-20 17:03:34 +08:00
|
|
|
macb->rx_tail = new_tail;
|
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static int _macb_recv(struct macb_device *macb, uchar **packetp)
|
2006-01-20 17:03:34 +08:00
|
|
|
{
|
2016-05-05 21:28:09 +08:00
|
|
|
unsigned int next_rx_tail = macb->next_rx_tail;
|
2006-01-20 17:03:34 +08:00
|
|
|
void *buffer;
|
|
|
|
int length;
|
|
|
|
u32 status;
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
macb->wrapped = false;
|
2006-01-20 17:03:34 +08:00
|
|
|
for (;;) {
|
2014-05-27 16:31:05 +08:00
|
|
|
macb_invalidate_ring_desc(macb, RX);
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED))
|
|
|
|
return -EAGAIN;
|
2006-01-20 17:03:34 +08:00
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
status = macb->rx_ring[next_rx_tail].ctrl;
|
2006-01-20 17:03:34 +08:00
|
|
|
if (status & RXBUF_FRAME_START) {
|
2016-05-05 21:28:09 +08:00
|
|
|
if (next_rx_tail != macb->rx_tail)
|
|
|
|
reclaim_rx_buffers(macb, next_rx_tail);
|
|
|
|
macb->wrapped = false;
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (status & RXBUF_FRAME_END) {
|
|
|
|
buffer = macb->rx_buffer + 128 * macb->rx_tail;
|
|
|
|
length = status & RXBUF_FRMLEN_MASK;
|
2014-05-27 16:31:05 +08:00
|
|
|
|
|
|
|
macb_invalidate_rx_buffer(macb);
|
2016-05-05 21:28:09 +08:00
|
|
|
if (macb->wrapped) {
|
2006-01-20 17:03:34 +08:00
|
|
|
unsigned int headlen, taillen;
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
headlen = 128 * (MACB_RX_RING_SIZE
|
2006-01-20 17:03:34 +08:00
|
|
|
- macb->rx_tail);
|
|
|
|
taillen = length - headlen;
|
2015-04-08 14:41:06 +08:00
|
|
|
memcpy((void *)net_rx_packets[0],
|
2006-01-20 17:03:34 +08:00
|
|
|
buffer, headlen);
|
2015-04-08 14:41:06 +08:00
|
|
|
memcpy((void *)net_rx_packets[0] + headlen,
|
2006-01-20 17:03:34 +08:00
|
|
|
macb->rx_buffer, taillen);
|
2016-05-05 21:28:09 +08:00
|
|
|
*packetp = (void *)net_rx_packets[0];
|
|
|
|
} else {
|
|
|
|
*packetp = buffer;
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
if (++next_rx_tail >= MACB_RX_RING_SIZE)
|
|
|
|
next_rx_tail = 0;
|
|
|
|
macb->next_rx_tail = next_rx_tail;
|
|
|
|
return length;
|
2006-01-20 17:03:34 +08:00
|
|
|
} else {
|
2016-05-05 21:28:09 +08:00
|
|
|
if (++next_rx_tail >= MACB_RX_RING_SIZE) {
|
|
|
|
macb->wrapped = true;
|
|
|
|
next_rx_tail = 0;
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
}
|
2007-05-02 19:22:38 +08:00
|
|
|
barrier();
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static void macb_phy_reset(struct macb_device *macb, const char *name)
|
2006-01-20 17:03:34 +08:00
|
|
|
{
|
|
|
|
int i;
|
2007-05-02 19:31:53 +08:00
|
|
|
u16 status, adv;
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
adv = ADVERTISE_CSMA | ADVERTISE_ALL;
|
|
|
|
macb_mdio_write(macb, MII_ADVERTISE, adv);
|
2016-05-05 21:28:09 +08:00
|
|
|
printf("%s: Starting autonegotiation...\n", name);
|
2006-01-20 17:03:34 +08:00
|
|
|
macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
|
|
|
|
| BMCR_ANRESTART));
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
|
2006-01-20 17:03:34 +08:00
|
|
|
status = macb_mdio_read(macb, MII_BMSR);
|
|
|
|
if (status & BMSR_ANEGCOMPLETE)
|
|
|
|
break;
|
|
|
|
udelay(100);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & BMSR_ANEGCOMPLETE)
|
2016-05-05 21:28:09 +08:00
|
|
|
printf("%s: Autonegotiation complete\n", name);
|
2006-01-20 17:03:34 +08:00
|
|
|
else
|
|
|
|
printf("%s: Autonegotiation timed out (status=0x%04x)\n",
|
2016-05-05 21:28:09 +08:00
|
|
|
name, status);
|
2007-05-02 19:31:53 +08:00
|
|
|
}
|
2006-01-20 17:03:34 +08:00
|
|
|
|
2009-01-23 19:56:31 +08:00
|
|
|
#ifdef CONFIG_MACB_SEARCH_PHY
|
|
|
|
static int macb_phy_find(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u16 phy_id;
|
|
|
|
|
|
|
|
/* Search for PHY... */
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
macb->phy_addr = i;
|
|
|
|
phy_id = macb_mdio_read(macb, MII_PHYSID1);
|
|
|
|
if (phy_id != 0xffff) {
|
|
|
|
printf("%s: PHY present at %d\n", macb->netdev.name, i);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PHY isn't up to snuff */
|
2012-08-16 09:50:04 +08:00
|
|
|
printf("%s: PHY not found\n", macb->netdev.name);
|
2009-01-23 19:56:31 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MACB_SEARCH_PHY */
|
|
|
|
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static int macb_phy_init(struct macb_device *macb, const char *name)
|
2007-05-02 19:31:53 +08:00
|
|
|
{
|
2013-04-24 15:59:27 +08:00
|
|
|
#ifdef CONFIG_PHYLIB
|
|
|
|
struct phy_device *phydev;
|
|
|
|
#endif
|
2007-05-02 19:31:53 +08:00
|
|
|
u32 ncfgr;
|
|
|
|
u16 phy_id, status, adv, lpa;
|
|
|
|
int media, speed, duplex;
|
|
|
|
int i;
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
arch_get_mdio_control(name);
|
2009-01-23 19:56:31 +08:00
|
|
|
#ifdef CONFIG_MACB_SEARCH_PHY
|
|
|
|
/* Auto-detect phy_addr */
|
2014-05-27 04:55:18 +08:00
|
|
|
if (!macb_phy_find(macb))
|
2009-01-23 19:56:31 +08:00
|
|
|
return 0;
|
|
|
|
#endif /* CONFIG_MACB_SEARCH_PHY */
|
|
|
|
|
2007-05-02 19:31:53 +08:00
|
|
|
/* Check if the PHY is up to snuff... */
|
|
|
|
phy_id = macb_mdio_read(macb, MII_PHYSID1);
|
|
|
|
if (phy_id == 0xffff) {
|
2016-05-05 21:28:09 +08:00
|
|
|
printf("%s: No PHY present\n", name);
|
2007-05-02 19:31:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-24 15:59:27 +08:00
|
|
|
#ifdef CONFIG_PHYLIB
|
2013-08-19 10:35:47 +08:00
|
|
|
/* need to consider other phy interface mode */
|
2016-05-05 21:28:09 +08:00
|
|
|
phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
|
2013-08-19 10:35:47 +08:00
|
|
|
PHY_INTERFACE_MODE_RGMII);
|
|
|
|
if (!phydev) {
|
|
|
|
printf("phy_connect failed\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2013-04-24 15:59:27 +08:00
|
|
|
phy_config(phydev);
|
|
|
|
#endif
|
|
|
|
|
2007-05-02 19:31:53 +08:00
|
|
|
status = macb_mdio_read(macb, MII_BMSR);
|
2006-01-20 17:03:34 +08:00
|
|
|
if (!(status & BMSR_LSTATUS)) {
|
2007-05-02 19:31:53 +08:00
|
|
|
/* Try to re-negotiate if we don't have link already. */
|
2016-05-05 21:28:09 +08:00
|
|
|
macb_phy_reset(macb, name);
|
2007-05-02 19:31:53 +08:00
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
|
2006-01-20 17:03:34 +08:00
|
|
|
status = macb_mdio_read(macb, MII_BMSR);
|
|
|
|
if (status & BMSR_LSTATUS)
|
|
|
|
break;
|
2007-05-02 19:31:53 +08:00
|
|
|
udelay(100);
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(status & BMSR_LSTATUS)) {
|
|
|
|
printf("%s: link down (status: 0x%04x)\n",
|
2016-05-05 21:28:09 +08:00
|
|
|
name, status);
|
2006-01-20 17:03:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2013-04-24 15:59:28 +08:00
|
|
|
|
2015-12-16 21:50:34 +08:00
|
|
|
/* First check for GMAC and that it is GiB capable */
|
|
|
|
if (gem_is_gigabit_capable(macb)) {
|
2013-04-24 15:59:28 +08:00
|
|
|
lpa = macb_mdio_read(macb, MII_STAT1000);
|
|
|
|
|
2014-09-19 05:46:48 +08:00
|
|
|
if (lpa & (LPA_1000FULL | LPA_1000HALF)) {
|
|
|
|
duplex = ((lpa & LPA_1000FULL) ? 1 : 0);
|
|
|
|
|
|
|
|
printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
|
2016-05-05 21:28:09 +08:00
|
|
|
name,
|
2013-04-24 15:59:28 +08:00
|
|
|
duplex ? "full" : "half",
|
|
|
|
lpa);
|
|
|
|
|
|
|
|
ncfgr = macb_readl(macb, NCFGR);
|
2014-09-19 05:46:48 +08:00
|
|
|
ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
|
|
|
|
ncfgr |= GEM_BIT(GBE);
|
|
|
|
|
2013-04-24 15:59:28 +08:00
|
|
|
if (duplex)
|
|
|
|
ncfgr |= MACB_BIT(FD);
|
2014-09-19 05:46:48 +08:00
|
|
|
|
2013-04-24 15:59:28 +08:00
|
|
|
macb_writel(macb, NCFGR, ncfgr);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fall back for EMAC checking */
|
|
|
|
adv = macb_mdio_read(macb, MII_ADVERTISE);
|
|
|
|
lpa = macb_mdio_read(macb, MII_LPA);
|
|
|
|
media = mii_nway_result(lpa & adv);
|
|
|
|
speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
|
|
|
|
? 1 : 0);
|
|
|
|
duplex = (media & ADVERTISE_FULL) ? 1 : 0;
|
|
|
|
printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
|
2016-05-05 21:28:09 +08:00
|
|
|
name,
|
2013-04-24 15:59:28 +08:00
|
|
|
speed ? "100" : "10",
|
|
|
|
duplex ? "full" : "half",
|
|
|
|
lpa);
|
|
|
|
|
|
|
|
ncfgr = macb_readl(macb, NCFGR);
|
2015-03-04 13:35:16 +08:00
|
|
|
ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
|
2013-04-24 15:59:28 +08:00
|
|
|
if (speed)
|
|
|
|
ncfgr |= MACB_BIT(SPD);
|
|
|
|
if (duplex)
|
|
|
|
ncfgr |= MACB_BIT(FD);
|
|
|
|
macb_writel(macb, NCFGR, ncfgr);
|
|
|
|
|
|
|
|
return 1;
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
|
2015-06-03 16:45:44 +08:00
|
|
|
static int gmac_init_multi_queues(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
int i, num_queues = 1;
|
|
|
|
u32 queue_mask;
|
|
|
|
|
|
|
|
/* bit 0 is never set but queue 0 always exists */
|
|
|
|
queue_mask = gem_readl(macb, DCFG6) & 0xff;
|
|
|
|
queue_mask |= 0x1;
|
|
|
|
|
|
|
|
for (i = 1; i < MACB_MAX_QUEUES; i++)
|
|
|
|
if (queue_mask & (1 << i))
|
|
|
|
num_queues++;
|
|
|
|
|
|
|
|
macb->dummy_desc->ctrl = TXBUF_USED;
|
|
|
|
macb->dummy_desc->addr = 0;
|
|
|
|
flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
|
|
|
|
MACB_TX_DUMMY_DMA_DESC_SIZE);
|
|
|
|
|
|
|
|
for (i = 1; i < num_queues; i++)
|
|
|
|
gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static int _macb_init(struct macb_device *macb, const char *name)
|
2006-01-20 17:03:34 +08:00
|
|
|
{
|
|
|
|
unsigned long paddr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* macb_halt should have been called at some point before now,
|
|
|
|
* so we'll assume the controller is idle.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* initialize DMA descriptors */
|
|
|
|
paddr = macb->rx_buffer_dma;
|
2014-05-27 04:55:18 +08:00
|
|
|
for (i = 0; i < MACB_RX_RING_SIZE; i++) {
|
|
|
|
if (i == (MACB_RX_RING_SIZE - 1))
|
2006-01-20 17:03:34 +08:00
|
|
|
paddr |= RXADDR_WRAP;
|
|
|
|
macb->rx_ring[i].addr = paddr;
|
|
|
|
macb->rx_ring[i].ctrl = 0;
|
|
|
|
paddr += 128;
|
|
|
|
}
|
2014-05-27 16:31:05 +08:00
|
|
|
macb_flush_ring_desc(macb, RX);
|
|
|
|
macb_flush_rx_buffer(macb);
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
for (i = 0; i < MACB_TX_RING_SIZE; i++) {
|
2006-01-20 17:03:34 +08:00
|
|
|
macb->tx_ring[i].addr = 0;
|
2014-05-27 04:55:18 +08:00
|
|
|
if (i == (MACB_TX_RING_SIZE - 1))
|
2006-01-20 17:03:34 +08:00
|
|
|
macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
|
|
|
|
else
|
|
|
|
macb->tx_ring[i].ctrl = TXBUF_USED;
|
|
|
|
}
|
2014-05-27 16:31:05 +08:00
|
|
|
macb_flush_ring_desc(macb, TX);
|
|
|
|
|
2014-05-27 04:55:18 +08:00
|
|
|
macb->rx_tail = 0;
|
|
|
|
macb->tx_head = 0;
|
|
|
|
macb->tx_tail = 0;
|
2016-05-05 21:28:09 +08:00
|
|
|
macb->next_rx_tail = 0;
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
macb_writel(macb, RBQP, macb->rx_ring_dma);
|
|
|
|
macb_writel(macb, TBQP, macb->tx_ring_dma);
|
|
|
|
|
2013-04-24 15:59:28 +08:00
|
|
|
if (macb_is_gem(macb)) {
|
2015-06-03 16:45:44 +08:00
|
|
|
/* Check the multi queue and initialize the queue for tx */
|
|
|
|
gmac_init_multi_queues(macb);
|
|
|
|
|
2014-11-10 15:24:01 +08:00
|
|
|
/*
|
|
|
|
* When the GMAC IP with GE feature, this bit is used to
|
|
|
|
* select interface between RGMII and GMII.
|
|
|
|
* When the GMAC IP without GE feature, this bit is used
|
|
|
|
* to select interface between RMII and MII.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
|
2013-04-24 15:59:28 +08:00
|
|
|
gem_writel(macb, UR, GEM_BIT(RGMII));
|
|
|
|
#else
|
|
|
|
gem_writel(macb, UR, 0);
|
|
|
|
#endif
|
|
|
|
} else {
|
2006-01-20 17:03:34 +08:00
|
|
|
/* choose RMII or MII mode. This depends on the board */
|
|
|
|
#ifdef CONFIG_RMII
|
2013-04-24 15:59:26 +08:00
|
|
|
#ifdef CONFIG_AT91FAMILY
|
2008-01-04 05:15:56 +08:00
|
|
|
macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
|
|
|
|
#else
|
2006-01-20 17:03:34 +08:00
|
|
|
macb_writel(macb, USRIO, 0);
|
2008-01-04 05:15:56 +08:00
|
|
|
#endif
|
|
|
|
#else
|
2013-04-24 15:59:26 +08:00
|
|
|
#ifdef CONFIG_AT91FAMILY
|
2008-01-04 05:15:56 +08:00
|
|
|
macb_writel(macb, USRIO, MACB_BIT(CLKEN));
|
2006-01-20 17:03:34 +08:00
|
|
|
#else
|
|
|
|
macb_writel(macb, USRIO, MACB_BIT(MII));
|
|
|
|
#endif
|
2008-01-04 05:15:56 +08:00
|
|
|
#endif /* CONFIG_RMII */
|
2013-04-24 15:59:28 +08:00
|
|
|
}
|
2006-01-20 17:03:34 +08:00
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
if (!macb_phy_init(macb, name))
|
2008-01-10 07:15:53 +08:00
|
|
|
return -1;
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
/* Enable TX and RX */
|
|
|
|
macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
|
|
|
|
|
2008-01-10 07:15:53 +08:00
|
|
|
return 0;
|
2006-01-20 17:03:34 +08:00
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static void _macb_halt(struct macb_device *macb)
|
2006-01-20 17:03:34 +08:00
|
|
|
{
|
|
|
|
u32 ncr, tsr;
|
|
|
|
|
|
|
|
/* Halt the controller and wait for any ongoing transmission to end. */
|
|
|
|
ncr = macb_readl(macb, NCR);
|
|
|
|
ncr |= MACB_BIT(THALT);
|
|
|
|
macb_writel(macb, NCR, ncr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
tsr = macb_readl(macb, TSR);
|
|
|
|
} while (tsr & MACB_BIT(TGO));
|
|
|
|
|
|
|
|
/* Disable TX and RX, and clear statistics */
|
|
|
|
macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
|
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
|
2010-06-02 02:55:42 +08:00
|
|
|
{
|
|
|
|
u32 hwaddr_bottom;
|
|
|
|
u16 hwaddr_top;
|
|
|
|
|
|
|
|
/* set hardware address */
|
2016-05-05 21:28:09 +08:00
|
|
|
hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
|
|
|
|
enetaddr[2] << 16 | enetaddr[3] << 24;
|
2010-06-02 02:55:42 +08:00
|
|
|
macb_writel(macb, SA1B, hwaddr_bottom);
|
2016-05-05 21:28:09 +08:00
|
|
|
hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
|
2010-06-02 02:55:42 +08:00
|
|
|
macb_writel(macb, SA1T, hwaddr_top);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-24 15:59:28 +08:00
|
|
|
static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long macb_hz = get_macb_pclk_rate(id);
|
|
|
|
|
|
|
|
if (macb_hz < 20000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV8);
|
|
|
|
else if (macb_hz < 40000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV16);
|
|
|
|
else if (macb_hz < 80000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV32);
|
|
|
|
else
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV64);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long macb_hz = get_macb_pclk_rate(id);
|
|
|
|
|
|
|
|
if (macb_hz < 20000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV8);
|
|
|
|
else if (macb_hz < 40000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV16);
|
|
|
|
else if (macb_hz < 80000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV32);
|
|
|
|
else if (macb_hz < 120000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV48);
|
|
|
|
else if (macb_hz < 160000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV64);
|
|
|
|
else
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV96);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2013-09-18 15:07:44 +08:00
|
|
|
/*
|
|
|
|
* Get the DMA bus width field of the network configuration register that we
|
|
|
|
* should program. We find the width from decoding the design configuration
|
|
|
|
* register to find the maximum supported data bus width.
|
|
|
|
*/
|
|
|
|
static u32 macb_dbw(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
|
|
|
|
case 4:
|
|
|
|
return GEM_BF(DBW, GEM_DBW128);
|
|
|
|
case 2:
|
|
|
|
return GEM_BF(DBW, GEM_DBW64);
|
|
|
|
case 1:
|
|
|
|
default:
|
|
|
|
return GEM_BF(DBW, GEM_DBW32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
static void _macb_eth_initialize(struct macb_device *macb)
|
|
|
|
{
|
|
|
|
int id = 0; /* This is not used by functions we call */
|
|
|
|
u32 ncfgr;
|
|
|
|
|
|
|
|
/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
|
|
|
|
macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
|
|
|
|
&macb->rx_buffer_dma);
|
|
|
|
macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
|
|
|
|
&macb->rx_ring_dma);
|
|
|
|
macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
|
|
|
|
&macb->tx_ring_dma);
|
|
|
|
macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
|
|
|
|
&macb->dummy_desc_dma);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do some basic initialization so that we at least can talk
|
|
|
|
* to the PHY
|
|
|
|
*/
|
|
|
|
if (macb_is_gem(macb)) {
|
|
|
|
ncfgr = gem_mdc_clk_div(id, macb);
|
|
|
|
ncfgr |= macb_dbw(macb);
|
|
|
|
} else {
|
|
|
|
ncfgr = macb_mdc_clk_div(id, macb);
|
|
|
|
}
|
|
|
|
|
|
|
|
macb_writel(macb, NCFGR, ncfgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_send(struct eth_device *netdev, void *packet, int length)
|
|
|
|
{
|
|
|
|
struct macb_device *macb = to_macb(netdev);
|
|
|
|
|
|
|
|
return _macb_send(macb, netdev->name, packet, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_recv(struct eth_device *netdev)
|
|
|
|
{
|
|
|
|
struct macb_device *macb = to_macb(netdev);
|
|
|
|
uchar *packet;
|
|
|
|
int length;
|
|
|
|
|
|
|
|
macb->wrapped = false;
|
|
|
|
for (;;) {
|
|
|
|
macb->next_rx_tail = macb->rx_tail;
|
|
|
|
length = _macb_recv(macb, &packet);
|
|
|
|
if (length >= 0) {
|
|
|
|
net_process_received_packet(packet, length);
|
|
|
|
reclaim_rx_buffers(macb, macb->next_rx_tail);
|
|
|
|
} else if (length < 0) {
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_init(struct eth_device *netdev, bd_t *bd)
|
|
|
|
{
|
|
|
|
struct macb_device *macb = to_macb(netdev);
|
|
|
|
|
|
|
|
return _macb_init(macb, netdev->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_halt(struct eth_device *netdev)
|
|
|
|
{
|
|
|
|
struct macb_device *macb = to_macb(netdev);
|
|
|
|
|
|
|
|
return _macb_halt(macb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_write_hwaddr(struct eth_device *netdev)
|
|
|
|
{
|
|
|
|
struct macb_device *macb = to_macb(netdev);
|
|
|
|
|
|
|
|
return _macb_write_hwaddr(macb, netdev->enetaddr);
|
|
|
|
}
|
|
|
|
|
2006-01-20 17:03:34 +08:00
|
|
|
int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
|
|
|
|
{
|
|
|
|
struct macb_device *macb;
|
|
|
|
struct eth_device *netdev;
|
|
|
|
|
|
|
|
macb = malloc(sizeof(struct macb_device));
|
|
|
|
if (!macb) {
|
|
|
|
printf("Error: Failed to allocate memory for MACB%d\n", id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memset(macb, 0, sizeof(struct macb_device));
|
|
|
|
|
|
|
|
netdev = &macb->netdev;
|
|
|
|
|
|
|
|
macb->regs = regs;
|
|
|
|
macb->phy_addr = phy_addr;
|
|
|
|
|
2013-04-24 15:59:28 +08:00
|
|
|
if (macb_is_gem(macb))
|
|
|
|
sprintf(netdev->name, "gmac%d", id);
|
|
|
|
else
|
|
|
|
sprintf(netdev->name, "macb%d", id);
|
|
|
|
|
2006-01-20 17:03:34 +08:00
|
|
|
netdev->init = macb_init;
|
|
|
|
netdev->halt = macb_halt;
|
|
|
|
netdev->send = macb_send;
|
|
|
|
netdev->recv = macb_recv;
|
2010-06-02 02:55:42 +08:00
|
|
|
netdev->write_hwaddr = macb_write_hwaddr;
|
2006-01-20 17:03:34 +08:00
|
|
|
|
2016-05-05 21:28:09 +08:00
|
|
|
_macb_eth_initialize(macb);
|
2006-01-20 17:03:34 +08:00
|
|
|
|
|
|
|
eth_register(netdev);
|
|
|
|
|
2013-04-24 15:59:27 +08:00
|
|
|
#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
|
2009-12-17 21:07:15 +08:00
|
|
|
miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
|
2013-04-24 15:59:27 +08:00
|
|
|
macb->bus = miiphy_get_dev_by_name(netdev->name);
|
2009-12-17 21:07:15 +08:00
|
|
|
#endif
|
2006-01-20 17:03:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-10 06:30:01 +08:00
|
|
|
#endif
|