mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-07 05:04:04 +08:00
mv643xx_eth: pass port identifier to register accessors
Pass a struct mv643xx_private * to the register accessor functions, as a preparation for having multiple mv643xx_eth silicon blocks. (Since this causes some 80 column straddling, and the mv_ prefix is useless anyway, rename mv_read to rdl and mv_write to wrl to compensate.) Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Reviewed-by: Tzachi Perelstein <tzachi@marvell.com> Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
This commit is contained in:
parent
c1b35a28f2
commit
ec69d651ac
@ -615,12 +615,12 @@ static void __iomem *mv643xx_eth_base;
|
|||||||
/* used to protect SMI_REG, which is shared across ports */
|
/* used to protect SMI_REG, which is shared across ports */
|
||||||
static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
|
static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
|
||||||
|
|
||||||
static inline u32 mv_read(int offset)
|
static inline u32 rdl(struct mv643xx_private *mp, int offset)
|
||||||
{
|
{
|
||||||
return readl(mv643xx_eth_base + offset);
|
return readl(mv643xx_eth_base + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mv_write(int offset, u32 data)
|
static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
|
||||||
{
|
{
|
||||||
writel(data, mv643xx_eth_base + offset);
|
writel(data, mv643xx_eth_base + offset);
|
||||||
}
|
}
|
||||||
@ -746,12 +746,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
|
|||||||
struct mv643xx_private *mp = netdev_priv(dev);
|
struct mv643xx_private *mp = netdev_priv(dev);
|
||||||
u32 config_reg;
|
u32 config_reg;
|
||||||
|
|
||||||
config_reg = mv_read(PORT_CONFIG_REG(mp->port_num));
|
config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num));
|
||||||
if (dev->flags & IFF_PROMISC)
|
if (dev->flags & IFF_PROMISC)
|
||||||
config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
|
config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
|
||||||
else
|
else
|
||||||
config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
|
config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
|
||||||
mv_write(PORT_CONFIG_REG(mp->port_num), config_reg);
|
wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg);
|
||||||
|
|
||||||
eth_port_set_multicast_list(dev);
|
eth_port_set_multicast_list(dev);
|
||||||
}
|
}
|
||||||
@ -987,7 +987,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
|
|||||||
u32 o_pscr, n_pscr;
|
u32 o_pscr, n_pscr;
|
||||||
unsigned int queues;
|
unsigned int queues;
|
||||||
|
|
||||||
o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
|
o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
|
||||||
n_pscr = o_pscr;
|
n_pscr = o_pscr;
|
||||||
|
|
||||||
/* clear speed, duplex and rx buffer size fields */
|
/* clear speed, duplex and rx buffer size fields */
|
||||||
@ -1010,14 +1010,14 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
|
|||||||
|
|
||||||
if (n_pscr != o_pscr) {
|
if (n_pscr != o_pscr) {
|
||||||
if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
|
if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
|
||||||
else {
|
else {
|
||||||
queues = mv643xx_eth_port_disable_tx(mp);
|
queues = mv643xx_eth_port_disable_tx(mp);
|
||||||
|
|
||||||
o_pscr &= ~SERIAL_PORT_ENABLE;
|
o_pscr &= ~SERIAL_PORT_ENABLE;
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
|
||||||
if (queues)
|
if (queues)
|
||||||
mv643xx_eth_port_enable_tx(mp, queues);
|
mv643xx_eth_port_enable_tx(mp, queues);
|
||||||
}
|
}
|
||||||
@ -1043,13 +1043,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
|
|||||||
unsigned int port_num = mp->port_num;
|
unsigned int port_num = mp->port_num;
|
||||||
|
|
||||||
/* Read interrupt cause registers */
|
/* Read interrupt cause registers */
|
||||||
eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) &
|
eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) &
|
||||||
ETH_INT_UNMASK_ALL;
|
ETH_INT_UNMASK_ALL;
|
||||||
if (eth_int_cause & ETH_INT_CAUSE_EXT) {
|
if (eth_int_cause & ETH_INT_CAUSE_EXT) {
|
||||||
eth_int_cause_ext = mv_read(
|
eth_int_cause_ext = rdl(mp,
|
||||||
INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
|
INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
|
||||||
ETH_INT_UNMASK_ALL_EXT;
|
ETH_INT_UNMASK_ALL_EXT;
|
||||||
mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num),
|
wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num),
|
||||||
~eth_int_cause_ext);
|
~eth_int_cause_ext);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1076,10 +1076,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
|
|||||||
#ifdef MV643XX_NAPI
|
#ifdef MV643XX_NAPI
|
||||||
if (eth_int_cause & ETH_INT_CAUSE_RX) {
|
if (eth_int_cause & ETH_INT_CAUSE_RX) {
|
||||||
/* schedule the NAPI poll routine to maintain port */
|
/* schedule the NAPI poll routine to maintain port */
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
|
||||||
|
|
||||||
/* wait for previous write to complete */
|
/* wait for previous write to complete */
|
||||||
mv_read(INTERRUPT_MASK_REG(port_num));
|
rdl(mp, INTERRUPT_MASK_REG(port_num));
|
||||||
|
|
||||||
netif_rx_schedule(dev, &mp->napi);
|
netif_rx_schedule(dev, &mp->napi);
|
||||||
}
|
}
|
||||||
@ -1132,9 +1132,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
|
|||||||
unsigned int coal = ((t_clk / 1000000) * delay) / 64;
|
unsigned int coal = ((t_clk / 1000000) * delay) / 64;
|
||||||
|
|
||||||
/* Set RX Coalescing mechanism */
|
/* Set RX Coalescing mechanism */
|
||||||
mv_write(SDMA_CONFIG_REG(port_num),
|
wrl(mp, SDMA_CONFIG_REG(port_num),
|
||||||
((coal & 0x3fff) << 8) |
|
((coal & 0x3fff) << 8) |
|
||||||
(mv_read(SDMA_CONFIG_REG(port_num))
|
(rdl(mp, SDMA_CONFIG_REG(port_num))
|
||||||
& 0xffc000ff));
|
& 0xffc000ff));
|
||||||
|
|
||||||
return coal;
|
return coal;
|
||||||
@ -1170,7 +1170,7 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
|
|||||||
unsigned int coal = ((t_clk / 1000000) * delay) / 64;
|
unsigned int coal = ((t_clk / 1000000) * delay) / 64;
|
||||||
|
|
||||||
/* Set TX Coalescing mechanism */
|
/* Set TX Coalescing mechanism */
|
||||||
mv_write(TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
|
wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
|
||||||
|
|
||||||
return coal;
|
return coal;
|
||||||
}
|
}
|
||||||
@ -1307,10 +1307,10 @@ static int mv643xx_eth_open(struct net_device *dev)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Clear any pending ethernet port interrupts */
|
/* Clear any pending ethernet port interrupts */
|
||||||
mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
|
wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
|
||||||
mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
|
wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
|
||||||
/* wait for previous write to complete */
|
/* wait for previous write to complete */
|
||||||
mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num));
|
rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num));
|
||||||
|
|
||||||
err = request_irq(dev->irq, mv643xx_eth_int_handler,
|
err = request_irq(dev->irq, mv643xx_eth_int_handler,
|
||||||
IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
|
IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
|
||||||
@ -1416,10 +1416,10 @@ static int mv643xx_eth_open(struct net_device *dev)
|
|||||||
eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL);
|
eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL);
|
||||||
|
|
||||||
/* Unmask phy and link status changes interrupts */
|
/* Unmask phy and link status changes interrupts */
|
||||||
mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
|
wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
|
||||||
|
|
||||||
/* Unmask RX buffer and TX end interrupt */
|
/* Unmask RX buffer and TX end interrupt */
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1498,9 +1498,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
|
|||||||
unsigned int port_num = mp->port_num;
|
unsigned int port_num = mp->port_num;
|
||||||
|
|
||||||
/* Mask all interrupts on ethernet port */
|
/* Mask all interrupts on ethernet port */
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
|
||||||
/* wait for previous write to complete */
|
/* wait for previous write to complete */
|
||||||
mv_read(INTERRUPT_MASK_REG(port_num));
|
rdl(mp, INTERRUPT_MASK_REG(port_num));
|
||||||
|
|
||||||
#ifdef MV643XX_NAPI
|
#ifdef MV643XX_NAPI
|
||||||
napi_disable(&mp->napi);
|
napi_disable(&mp->napi);
|
||||||
@ -1539,15 +1539,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
work_done = 0;
|
work_done = 0;
|
||||||
if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
|
if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
|
||||||
!= (u32) mp->rx_used_desc_q)
|
!= (u32) mp->rx_used_desc_q)
|
||||||
work_done = mv643xx_eth_receive_queue(dev, budget);
|
work_done = mv643xx_eth_receive_queue(dev, budget);
|
||||||
|
|
||||||
if (work_done < budget) {
|
if (work_done < budget) {
|
||||||
netif_rx_complete(dev, napi);
|
netif_rx_complete(dev, napi);
|
||||||
mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
|
wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
|
||||||
mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
|
wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
@ -1753,13 +1753,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
|
|||||||
struct mv643xx_private *mp = netdev_priv(netdev);
|
struct mv643xx_private *mp = netdev_priv(netdev);
|
||||||
int port_num = mp->port_num;
|
int port_num = mp->port_num;
|
||||||
|
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
|
||||||
/* wait for previous write to complete */
|
/* wait for previous write to complete */
|
||||||
mv_read(INTERRUPT_MASK_REG(port_num));
|
rdl(mp, INTERRUPT_MASK_REG(port_num));
|
||||||
|
|
||||||
mv643xx_eth_int_handler(netdev->irq, netdev);
|
mv643xx_eth_int_handler(netdev->irq, netdev);
|
||||||
|
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -2012,8 +2012,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
|
|||||||
unsigned int port_num = mp->port_num;
|
unsigned int port_num = mp->port_num;
|
||||||
|
|
||||||
/* Mask all interrupts on ethernet port */
|
/* Mask all interrupts on ethernet port */
|
||||||
mv_write(INTERRUPT_MASK_REG(port_num), 0);
|
wrl(mp, INTERRUPT_MASK_REG(port_num), 0);
|
||||||
mv_read (INTERRUPT_MASK_REG(port_num));
|
rdl(mp, INTERRUPT_MASK_REG(port_num));
|
||||||
|
|
||||||
eth_port_reset(mp);
|
eth_port_reset(mp);
|
||||||
}
|
}
|
||||||
@ -2278,28 +2278,28 @@ static void eth_port_start(struct net_device *dev)
|
|||||||
|
|
||||||
/* Assignment of Tx CTRP of given queue */
|
/* Assignment of Tx CTRP of given queue */
|
||||||
tx_curr_desc = mp->tx_curr_desc_q;
|
tx_curr_desc = mp->tx_curr_desc_q;
|
||||||
mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
|
wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
|
||||||
(u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
|
(u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
|
||||||
|
|
||||||
/* Assignment of Rx CRDP of given queue */
|
/* Assignment of Rx CRDP of given queue */
|
||||||
rx_curr_desc = mp->rx_curr_desc_q;
|
rx_curr_desc = mp->rx_curr_desc_q;
|
||||||
mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
|
wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
|
||||||
(u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
|
(u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
|
||||||
|
|
||||||
/* Add the assigned Ethernet address to the port's address table */
|
/* Add the assigned Ethernet address to the port's address table */
|
||||||
eth_port_uc_addr_set(mp, dev->dev_addr);
|
eth_port_uc_addr_set(mp, dev->dev_addr);
|
||||||
|
|
||||||
/* Assign port configuration and command. */
|
/* Assign port configuration and command. */
|
||||||
mv_write(PORT_CONFIG_REG(port_num),
|
wrl(mp, PORT_CONFIG_REG(port_num),
|
||||||
PORT_CONFIG_DEFAULT_VALUE);
|
PORT_CONFIG_DEFAULT_VALUE);
|
||||||
|
|
||||||
mv_write(PORT_CONFIG_EXTEND_REG(port_num),
|
wrl(mp, PORT_CONFIG_EXTEND_REG(port_num),
|
||||||
PORT_CONFIG_EXTEND_DEFAULT_VALUE);
|
PORT_CONFIG_EXTEND_DEFAULT_VALUE);
|
||||||
|
|
||||||
pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
|
pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
|
||||||
|
|
||||||
pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
|
pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
|
||||||
|
|
||||||
pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
|
pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
|
||||||
DISABLE_AUTO_NEG_SPEED_GMII |
|
DISABLE_AUTO_NEG_SPEED_GMII |
|
||||||
@ -2307,20 +2307,20 @@ static void eth_port_start(struct net_device *dev)
|
|||||||
DO_NOT_FORCE_LINK_FAIL |
|
DO_NOT_FORCE_LINK_FAIL |
|
||||||
SERIAL_PORT_CONTROL_RESERVED;
|
SERIAL_PORT_CONTROL_RESERVED;
|
||||||
|
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
|
||||||
|
|
||||||
pscr |= SERIAL_PORT_ENABLE;
|
pscr |= SERIAL_PORT_ENABLE;
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
|
||||||
|
|
||||||
/* Assign port SDMA configuration */
|
/* Assign port SDMA configuration */
|
||||||
mv_write(SDMA_CONFIG_REG(port_num),
|
wrl(mp, SDMA_CONFIG_REG(port_num),
|
||||||
PORT_SDMA_CONFIG_DEFAULT_VALUE);
|
PORT_SDMA_CONFIG_DEFAULT_VALUE);
|
||||||
|
|
||||||
/* Enable port Rx. */
|
/* Enable port Rx. */
|
||||||
mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
|
mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
|
||||||
|
|
||||||
/* Disable port bandwidth limits by clearing MTU register */
|
/* Disable port bandwidth limits by clearing MTU register */
|
||||||
mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0);
|
wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0);
|
||||||
|
|
||||||
/* save phy settings across reset */
|
/* save phy settings across reset */
|
||||||
mv643xx_get_settings(dev, ðtool_cmd);
|
mv643xx_get_settings(dev, ðtool_cmd);
|
||||||
@ -2343,8 +2343,8 @@ static void eth_port_uc_addr_set(struct mv643xx_private *mp,
|
|||||||
mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
|
mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
|
||||||
(p_addr[3] << 0);
|
(p_addr[3] << 0);
|
||||||
|
|
||||||
mv_write(MAC_ADDR_LOW(port_num), mac_l);
|
wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
|
||||||
mv_write(MAC_ADDR_HIGH(port_num), mac_h);
|
wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
|
||||||
|
|
||||||
/* Accept frames with this address */
|
/* Accept frames with this address */
|
||||||
table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
|
table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
|
||||||
@ -2361,8 +2361,8 @@ static void eth_port_uc_addr_get(struct mv643xx_private *mp,
|
|||||||
unsigned int mac_h;
|
unsigned int mac_h;
|
||||||
unsigned int mac_l;
|
unsigned int mac_l;
|
||||||
|
|
||||||
mac_h = mv_read(MAC_ADDR_HIGH(port_num));
|
mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
|
||||||
mac_l = mv_read(MAC_ADDR_LOW(port_num));
|
mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
|
||||||
|
|
||||||
p_addr[0] = (mac_h >> 24) & 0xff;
|
p_addr[0] = (mac_h >> 24) & 0xff;
|
||||||
p_addr[1] = (mac_h >> 16) & 0xff;
|
p_addr[1] = (mac_h >> 16) & 0xff;
|
||||||
@ -2392,9 +2392,9 @@ static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
|
|||||||
reg_offset = entry % 4; /* Entry offset within the register */
|
reg_offset = entry % 4; /* Entry offset within the register */
|
||||||
|
|
||||||
/* Set "accepts frame bit" at specified table entry */
|
/* Set "accepts frame bit" at specified table entry */
|
||||||
table_reg = mv_read(table + tbl_offset);
|
table_reg = rdl(mp, table + tbl_offset);
|
||||||
table_reg |= 0x01 << (8 * reg_offset);
|
table_reg |= 0x01 << (8 * reg_offset);
|
||||||
mv_write(table + tbl_offset, table_reg);
|
wrl(mp, table + tbl_offset, table_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2527,7 +2527,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
|
|||||||
* 3-1 Queue ETH_Q0=0
|
* 3-1 Queue ETH_Q0=0
|
||||||
* 7-4 Reserved = 0;
|
* 7-4 Reserved = 0;
|
||||||
*/
|
*/
|
||||||
mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
|
wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
|
||||||
|
|
||||||
/* Set all entries in DA filter other multicast
|
/* Set all entries in DA filter other multicast
|
||||||
* table (Ex_dFOMT)
|
* table (Ex_dFOMT)
|
||||||
@ -2537,7 +2537,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
|
|||||||
* 3-1 Queue ETH_Q0=0
|
* 3-1 Queue ETH_Q0=0
|
||||||
* 7-4 Reserved = 0;
|
* 7-4 Reserved = 0;
|
||||||
*/
|
*/
|
||||||
mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
|
wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2547,11 +2547,11 @@ static void eth_port_set_multicast_list(struct net_device *dev)
|
|||||||
*/
|
*/
|
||||||
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
|
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
|
||||||
/* Clear DA filter special multicast table (Ex_dFSMT) */
|
/* Clear DA filter special multicast table (Ex_dFSMT) */
|
||||||
mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
|
wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
|
||||||
(eth_port_num) + table_index, 0);
|
(eth_port_num) + table_index, 0);
|
||||||
|
|
||||||
/* Clear DA filter other multicast table (Ex_dFOMT) */
|
/* Clear DA filter other multicast table (Ex_dFOMT) */
|
||||||
mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
|
wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE
|
||||||
(eth_port_num) + table_index, 0);
|
(eth_port_num) + table_index, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2586,15 +2586,15 @@ static void eth_port_init_mac_tables(struct mv643xx_private *mp)
|
|||||||
|
|
||||||
/* Clear DA filter unicast table (Ex_dFUT) */
|
/* Clear DA filter unicast table (Ex_dFUT) */
|
||||||
for (table_index = 0; table_index <= 0xC; table_index += 4)
|
for (table_index = 0; table_index <= 0xC; table_index += 4)
|
||||||
mv_write(DA_FILTER_UNICAST_TABLE_BASE(port_num) +
|
wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) +
|
||||||
table_index, 0);
|
table_index, 0);
|
||||||
|
|
||||||
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
|
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
|
||||||
/* Clear DA filter special multicast table (Ex_dFSMT) */
|
/* Clear DA filter special multicast table (Ex_dFSMT) */
|
||||||
mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) +
|
wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) +
|
||||||
table_index, 0);
|
table_index, 0);
|
||||||
/* Clear DA filter other multicast table (Ex_dFOMT) */
|
/* Clear DA filter other multicast table (Ex_dFOMT) */
|
||||||
mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) +
|
wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) +
|
||||||
table_index, 0);
|
table_index, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2624,12 +2624,12 @@ static void eth_clear_mib_counters(struct mv643xx_private *mp)
|
|||||||
/* Perform dummy reads from MIB counters */
|
/* Perform dummy reads from MIB counters */
|
||||||
for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
|
for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
|
||||||
i += 4)
|
i += 4)
|
||||||
mv_read(MIB_COUNTERS_BASE(port_num) + i);
|
rdl(mp, MIB_COUNTERS_BASE(port_num) + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 read_mib(struct mv643xx_private *mp, int offset)
|
static inline u32 read_mib(struct mv643xx_private *mp, int offset)
|
||||||
{
|
{
|
||||||
return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset);
|
return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void eth_update_mib_counters(struct mv643xx_private *mp)
|
static void eth_update_mib_counters(struct mv643xx_private *mp)
|
||||||
@ -2714,7 +2714,7 @@ static int ethernet_phy_get(struct mv643xx_private *mp)
|
|||||||
{
|
{
|
||||||
unsigned int reg_data;
|
unsigned int reg_data;
|
||||||
|
|
||||||
reg_data = mv_read(PHY_ADDR_REG);
|
reg_data = rdl(mp, PHY_ADDR_REG);
|
||||||
|
|
||||||
return ((reg_data >> (5 * mp->port_num)) & 0x1f);
|
return ((reg_data >> (5 * mp->port_num)) & 0x1f);
|
||||||
}
|
}
|
||||||
@ -2741,10 +2741,10 @@ static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr)
|
|||||||
u32 reg_data;
|
u32 reg_data;
|
||||||
int addr_shift = 5 * mp->port_num;
|
int addr_shift = 5 * mp->port_num;
|
||||||
|
|
||||||
reg_data = mv_read(PHY_ADDR_REG);
|
reg_data = rdl(mp, PHY_ADDR_REG);
|
||||||
reg_data &= ~(0x1f << addr_shift);
|
reg_data &= ~(0x1f << addr_shift);
|
||||||
reg_data |= (phy_addr & 0x1f) << addr_shift;
|
reg_data |= (phy_addr & 0x1f) << addr_shift;
|
||||||
mv_write(PHY_ADDR_REG, reg_data);
|
wrl(mp, PHY_ADDR_REG, reg_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2782,13 +2782,13 @@ static void ethernet_phy_reset(struct mv643xx_private *mp)
|
|||||||
static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
|
static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
|
||||||
unsigned int queues)
|
unsigned int queues)
|
||||||
{
|
{
|
||||||
mv_write(TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues);
|
wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
|
static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
|
||||||
unsigned int queues)
|
unsigned int queues)
|
||||||
{
|
{
|
||||||
mv_write(RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues);
|
wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
|
static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
|
||||||
@ -2797,18 +2797,18 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
|
|||||||
u32 queues;
|
u32 queues;
|
||||||
|
|
||||||
/* Stop Tx port activity. Check port Tx activity. */
|
/* Stop Tx port activity. Check port Tx activity. */
|
||||||
queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
|
queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
|
||||||
if (queues) {
|
if (queues) {
|
||||||
/* Issue stop command for active queues only */
|
/* Issue stop command for active queues only */
|
||||||
mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
|
wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
|
||||||
|
|
||||||
/* Wait for all Tx activity to terminate. */
|
/* Wait for all Tx activity to terminate. */
|
||||||
/* Check port cause register that all Tx queues are stopped */
|
/* Check port cause register that all Tx queues are stopped */
|
||||||
while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
|
while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
|
||||||
udelay(PHY_WAIT_MICRO_SECONDS);
|
udelay(PHY_WAIT_MICRO_SECONDS);
|
||||||
|
|
||||||
/* Wait for Tx FIFO to empty */
|
/* Wait for Tx FIFO to empty */
|
||||||
while (mv_read(PORT_STATUS_REG(port_num)) &
|
while (rdl(mp, PORT_STATUS_REG(port_num)) &
|
||||||
ETH_PORT_TX_FIFO_EMPTY)
|
ETH_PORT_TX_FIFO_EMPTY)
|
||||||
udelay(PHY_WAIT_MICRO_SECONDS);
|
udelay(PHY_WAIT_MICRO_SECONDS);
|
||||||
}
|
}
|
||||||
@ -2822,14 +2822,14 @@ static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
|
|||||||
u32 queues;
|
u32 queues;
|
||||||
|
|
||||||
/* Stop Rx port activity. Check port Rx activity. */
|
/* Stop Rx port activity. Check port Rx activity. */
|
||||||
queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
|
queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
|
||||||
if (queues) {
|
if (queues) {
|
||||||
/* Issue stop command for active queues only */
|
/* Issue stop command for active queues only */
|
||||||
mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
|
wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
|
||||||
|
|
||||||
/* Wait for all Rx activity to terminate. */
|
/* Wait for all Rx activity to terminate. */
|
||||||
/* Check port cause register that all Rx queues are stopped */
|
/* Check port cause register that all Rx queues are stopped */
|
||||||
while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
|
while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
|
||||||
udelay(PHY_WAIT_MICRO_SECONDS);
|
udelay(PHY_WAIT_MICRO_SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2866,11 +2866,11 @@ static void eth_port_reset(struct mv643xx_private *mp)
|
|||||||
eth_clear_mib_counters(mp);
|
eth_clear_mib_counters(mp);
|
||||||
|
|
||||||
/* Reset the Enable bit in the Configuration Register */
|
/* Reset the Enable bit in the Configuration Register */
|
||||||
reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
|
reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
|
||||||
reg_data &= ~(SERIAL_PORT_ENABLE |
|
reg_data &= ~(SERIAL_PORT_ENABLE |
|
||||||
DO_NOT_FORCE_LINK_FAIL |
|
DO_NOT_FORCE_LINK_FAIL |
|
||||||
FORCE_LINK_PASS);
|
FORCE_LINK_PASS);
|
||||||
mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data);
|
wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2905,7 +2905,7 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
|
|||||||
spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
|
spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
|
||||||
|
|
||||||
/* wait for the SMI register to become available */
|
/* wait for the SMI register to become available */
|
||||||
for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
|
for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
|
||||||
if (i == PHY_WAIT_ITERATIONS) {
|
if (i == PHY_WAIT_ITERATIONS) {
|
||||||
printk("%s: PHY busy timeout\n", mp->dev->name);
|
printk("%s: PHY busy timeout\n", mp->dev->name);
|
||||||
goto out;
|
goto out;
|
||||||
@ -2913,11 +2913,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
|
|||||||
udelay(PHY_WAIT_MICRO_SECONDS);
|
udelay(PHY_WAIT_MICRO_SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
mv_write(SMI_REG,
|
wrl(mp, SMI_REG,
|
||||||
(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
|
(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
|
||||||
|
|
||||||
/* now wait for the data to be valid */
|
/* now wait for the data to be valid */
|
||||||
for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) {
|
for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) {
|
||||||
if (i == PHY_WAIT_ITERATIONS) {
|
if (i == PHY_WAIT_ITERATIONS) {
|
||||||
printk("%s: PHY read timeout\n", mp->dev->name);
|
printk("%s: PHY read timeout\n", mp->dev->name);
|
||||||
goto out;
|
goto out;
|
||||||
@ -2925,7 +2925,7 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
|
|||||||
udelay(PHY_WAIT_MICRO_SECONDS);
|
udelay(PHY_WAIT_MICRO_SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
*value = mv_read(SMI_REG) & 0xffff;
|
*value = rdl(mp, SMI_REG) & 0xffff;
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
|
spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
|
||||||
}
|
}
|
||||||
@ -2963,7 +2963,7 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp,
|
|||||||
spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
|
spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
|
||||||
|
|
||||||
/* wait for the SMI register to become available */
|
/* wait for the SMI register to become available */
|
||||||
for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
|
for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
|
||||||
if (i == PHY_WAIT_ITERATIONS) {
|
if (i == PHY_WAIT_ITERATIONS) {
|
||||||
printk("%s: PHY busy timeout\n", mp->dev->name);
|
printk("%s: PHY busy timeout\n", mp->dev->name);
|
||||||
goto out;
|
goto out;
|
||||||
@ -2971,7 +2971,7 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp,
|
|||||||
udelay(PHY_WAIT_MICRO_SECONDS);
|
udelay(PHY_WAIT_MICRO_SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
|
wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
|
||||||
ETH_SMI_OPCODE_WRITE | (value & 0xffff));
|
ETH_SMI_OPCODE_WRITE | (value & 0xffff));
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
|
spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
|
||||||
|
Loading…
Reference in New Issue
Block a user