Merge branch 'new-drivers' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

This commit is contained in:
David S. Miller 2008-09-19 15:51:35 -07:00
commit 79b6f7ecda
41 changed files with 20398 additions and 0 deletions

View File

@ -0,0 +1,46 @@
Copyright (c) 2003-2008 QLogic Corporation
QLogic Linux Networking HBA Driver
This program includes a device driver for Linux 2.6 that may be
distributed with QLogic hardware specific firmware binary file.
You may modify and redistribute the device driver code under the
GNU General Public License as published by the Free Software
Foundation (version 2 or a later version).
You may redistribute the hardware specific firmware binary file
under the following terms:
1. Redistribution of source code (only if applicable),
must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistribution in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
3. The name of QLogic Corporation may not be used to
endorse or promote products derived from this software
without specific prior written permission
REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
COMBINATION WITH THIS PROGRAM.

View File

@ -1046,6 +1046,13 @@ L: cbe-oss-dev@ozlabs.org
W: http://www.ibm.com/developerworks/power/cell/
S: Supported
CISCO 10G ETHERNET DRIVER
P: Scott Feldman
M: scofeldm@cisco.com
P: Joe Eykholt
M: jeykholt@cisco.com
S: Supported
CFAG12864B LCD DRIVER
P: Miguel Ojeda Sandonis
M: miguel.ojeda.sandonis@gmail.com
@ -2319,6 +2326,12 @@ L: video4linux-list@redhat.com
W: http://www.ivtvdriver.org
S: Maintained
JME NETWORK DRIVER
P: Guo-Fu Tseng
M: cooldavid@cooldavid.org
L: netdev@vger.kernel.org
S: Maintained
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
P: David Woodhouse
M: dwmw2@infradead.org
@ -3383,6 +3396,13 @@ M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported
QLOGIC QLGE 10Gb ETHERNET DRIVER
P: Ron Mercer
M: linux-driver@qlogic.com
M: ron.mercer@qlogic.com
L: netdev@vger.kernel.org
S: Supported
QNX4 FILESYSTEM
P: Anders Larsen
M: al@alarsen.net

View File

@ -1840,6 +1840,17 @@ config NE_H8300
Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor.
config ATL2
tristate "Atheros L2 Fast Ethernet support"
depends on PCI
select CRC32
select MII
help
This driver supports the Atheros L2 fast ethernet adapter.
To compile this driver as a module, choose M here. The module
will be called atl2.
source "drivers/net/fs_enet/Kconfig"
endif # NET_ETHERNET
@ -2302,6 +2313,18 @@ config ATL1E
To compile this driver as a module, choose M here. The module
will be called atl1e.
config JME
tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
depends on PCI
select CRC32
select MII
---help---
This driver supports the PCI-Express gigabit ethernet adapters
based on JMicron JMC250 chipset.
To compile this driver as a module, choose M here. The module
will be called jme.
endif # NETDEV_1000
#
@ -2377,6 +2400,13 @@ config EHEA
To compile the driver as a module, choose M here. The module
will be called ehea.
config ENIC
tristate "E, the Cisco 10G Ethernet NIC"
depends on PCI && INET
select INET_LRO
help
This enables the support for the Cisco 10G Ethernet card.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI && INET
@ -2496,6 +2526,15 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
help
This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called qlge.
source "drivers/net/sfc/Kconfig"
endif # NETDEV_10000

View File

@ -15,9 +15,12 @@ obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_TEHUTI) += tehuti.o
obj-$(CONFIG_ENIC) += enic/
obj-$(CONFIG_JME) += jme.o
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \
@ -128,6 +131,7 @@ obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_PPP) += ppp_generic.o
obj-$(CONFIG_PPP_ASYNC) += ppp_async.o

View File

@ -1 +1,3 @@
obj-$(CONFIG_ATL1) += atl1.o
obj-$(CONFIG_ATL2) += atl2.o

3127
drivers/net/atlx/atl2.c Normal file

File diff suppressed because it is too large Load Diff

530
drivers/net/atlx/atl2.h Normal file
View File

@ -0,0 +1,530 @@
/* atl2.h -- atl2 driver definitions
*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
* Copyright(c) 2006 xiong huang <xiong.huang@atheros.com>
* Copyright(c) 2007 Chris Snook <csnook@redhat.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ATL2_H_
#define _ATL2_H_
#include <asm/atomic.h>
#include <linux/netdevice.h>
#ifndef _ATL2_HW_H_
#define _ATL2_HW_H_
#ifndef _ATL2_OSDEP_H_
#define _ATL2_OSDEP_H_
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include "atlx.h"
#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
#endif
#define PCI_COMMAND_REGISTER PCI_COMMAND
#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
#define ETH_ADDR_LEN ETH_ALEN
#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
((a)->hw_addr + (reg))))
#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
((a)->hw_addr + (reg))))
#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
((a)->hw_addr + (reg))))
#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
(iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
#define ATL2_READ_REG_ARRAY(a, reg, offset) \
(ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
#endif /* _ATL2_OSDEP_H_ */
struct atl2_adapter;
struct atl2_hw;
/* function prototype */
static s32 atl2_reset_hw(struct atl2_hw *hw);
static s32 atl2_read_mac_addr(struct atl2_hw *hw);
static s32 atl2_init_hw(struct atl2_hw *hw);
static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
u16 *duplex);
static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
static void atl2_set_mac_addr(struct atl2_hw *hw);
static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
static s32 atl2_phy_init(struct atl2_hw *hw);
static int atl2_check_eeprom_exist(struct atl2_hw *hw);
static void atl2_force_ps(struct atl2_hw *hw);
/* register definition */
/* Block IDLE Status Register */
#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */
#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */
#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */
#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */
/* MDIO Control Register */
#define MDIO_WAIT_TIMES 10
/* MAC Control Register */
#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */
#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */
#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28
#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */
/* Internal SRAM Partition Register */
#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM
* default: 2byte*1024 */
#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM
* default: 2byte*1024 */
/* Descriptor Control register */
#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit
* Data Mem low 32-bit(dword align) */
#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by
* double word , max 256KB) */
#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit
* Status Memory low 32-bit(dword word
* align) */
#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047
* bytes. */
#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit
* Status Memory low 32-bit(unit 8
* bytes) */
#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer
* number (unit 1536bytes, max
* 1536*2047) */
/* DMAR Control Register */
#define REG_DMAR 0x1580
#define DMAR_EN 0x1 /* 1: Enable DMAR */
/* TX Cur-Through (early tx threshold) Control Register */
#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet
* threshold(unit word) */
/* DMAW Control Register */
#define REG_DMAW 0x15A0
#define DMAW_EN 0x1
/* Flow control register */
#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow
* threshold configuration register */
#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow
* threshold configuration register */
/* Mailbox Register */
#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */
#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */
/* Interrupt Status Register */
#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */
#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set
* when SW_MAN_INT_EN is set in Table 51
* Selene Master Control Register
* (Offset 0x1400). */
#define ISR_RXF_OV 4 /* RXF overflow interrupt */
#define ISR_TXF_UR 8 /* TXF underrun interrupt */
#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full
* interrupt */
#define ISR_RXS_OV 0x20 /* Internal receive status buffer full
* interrupt */
#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */
#define ISR_HOST_TXD_UR 0x80
#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */
#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should
* do Reset */
#define ISR_DMAW_TO_RST 0x400
#define ISR_PHY 0x800 /* phy interrupt */
#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written
* to host */
#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written
* to host. */
#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one
* packet */
#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
ISR_TS_UPDATE | ISR_TX_EARLY)
#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
ISR_RS_UPDATE)
#define IMR_NORMAL_MASK (\
/*ISR_LINK_CHG |*/\
ISR_MANUAL |\
ISR_DMAR_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_PHY |\
ISR_PHY_LINKDOWN |\
ISR_TS_UPDATE |\
ISR_RS_UPDATE)
/* Receive MAC Statistics Registers */
#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */
#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX
* FIFO overflow */
#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX
* Status Buffer Overflow */
#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to
* address filtering */
/* MII definitions */
/* PHY Common Register */
#define MII_SMARTSPEED 0x14
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
/* PCI Command Register Bit Definitions */
#define PCI_REG_COMMAND 0x04
#define CMD_IO_SPACE 0x0001
#define CMD_MEMORY_SPACE 0x0002
#define CMD_BUS_MASTER 0x0004
#define MEDIA_TYPE_100M_FULL 1
#define MEDIA_TYPE_100M_HALF 2
#define MEDIA_TYPE_10M_FULL 3
#define MEDIA_TYPE_10M_HALF 4
#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
/* The size (in bytes) of a ethernet packet */
#define ENET_HEADER_SIZE 14
#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x2000
#define VLAN_SIZE 4
struct tx_pkt_header {
unsigned pkt_size:11;
unsigned:4; /* reserved */
unsigned ins_vlan:1; /* txmac should insert vlan */
unsigned short vlan; /* vlan tag */
};
/* FIXME: replace above bitfields with MASK/SHIFT defines below */
#define TX_PKT_HEADER_SIZE_MASK 0x7FF
#define TX_PKT_HEADER_SIZE_SHIFT 0
#define TX_PKT_HEADER_INS_VLAN_MASK 0x1
#define TX_PKT_HEADER_INS_VLAN_SHIFT 15
#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF
#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16
struct tx_pkt_status {
unsigned pkt_size:11;
unsigned:5; /* reserved */
unsigned ok:1; /* current packet transmitted without error */
unsigned bcast:1; /* broadcast packet */
unsigned mcast:1; /* multicast packet */
unsigned pause:1; /* transmiited a pause frame */
unsigned ctrl:1;
unsigned defer:1; /* current packet is xmitted with defer */
unsigned exc_defer:1;
unsigned single_col:1;
unsigned multi_col:1;
unsigned late_col:1;
unsigned abort_col:1;
unsigned underun:1; /* current packet is aborted
* due to txram underrun */
unsigned:3; /* reserved */
unsigned update:1; /* always 1'b1 in tx_status_buf */
};
/* FIXME: replace above bitfields with MASK/SHIFT defines below */
#define TX_PKT_STATUS_SIZE_MASK 0x7FF
#define TX_PKT_STATUS_SIZE_SHIFT 0
#define TX_PKT_STATUS_OK_MASK 0x1
#define TX_PKT_STATUS_OK_SHIFT 16
#define TX_PKT_STATUS_BCAST_MASK 0x1
#define TX_PKT_STATUS_BCAST_SHIFT 17
#define TX_PKT_STATUS_MCAST_MASK 0x1
#define TX_PKT_STATUS_MCAST_SHIFT 18
#define TX_PKT_STATUS_PAUSE_MASK 0x1
#define TX_PKT_STATUS_PAUSE_SHIFT 19
#define TX_PKT_STATUS_CTRL_MASK 0x1
#define TX_PKT_STATUS_CTRL_SHIFT 20
#define TX_PKT_STATUS_DEFER_MASK 0x1
#define TX_PKT_STATUS_DEFER_SHIFT 21
#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1
#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22
#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1
#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23
#define TX_PKT_STATUS_MULTI_COL_MASK 0x1
#define TX_PKT_STATUS_MULTI_COL_SHIFT 24
#define TX_PKT_STATUS_LATE_COL_MASK 0x1
#define TX_PKT_STATUS_LATE_COL_SHIFT 25
#define TX_PKT_STATUS_ABORT_COL_MASK 0x1
#define TX_PKT_STATUS_ABORT_COL_SHIFT 26
#define TX_PKT_STATUS_UNDERRUN_MASK 0x1
#define TX_PKT_STATUS_UNDERRUN_SHIFT 27
#define TX_PKT_STATUS_UPDATE_MASK 0x1
#define TX_PKT_STATUS_UPDATE_SHIFT 31
struct rx_pkt_status {
unsigned pkt_size:11; /* packet size, max 2047 bytes */
unsigned:5; /* reserved */
unsigned ok:1; /* current packet received ok without error */
unsigned bcast:1; /* current packet is broadcast */
unsigned mcast:1; /* current packet is multicast */
unsigned pause:1;
unsigned ctrl:1;
unsigned crc:1; /* received a packet with crc error */
unsigned code:1; /* received a packet with code error */
unsigned runt:1; /* received a packet less than 64 bytes
* with good crc */
unsigned frag:1; /* received a packet less than 64 bytes
* with bad crc */
unsigned trunc:1; /* current frame truncated due to rxram full */
unsigned align:1; /* this packet is alignment error */
unsigned vlan:1; /* this packet has vlan */
unsigned:3; /* reserved */
unsigned update:1;
unsigned short vtag; /* vlan tag */
unsigned:16;
};
/* FIXME: replace above bitfields with MASK/SHIFT defines below */
#define RX_PKT_STATUS_SIZE_MASK 0x7FF
#define RX_PKT_STATUS_SIZE_SHIFT 0
#define RX_PKT_STATUS_OK_MASK 0x1
#define RX_PKT_STATUS_OK_SHIFT 16
#define RX_PKT_STATUS_BCAST_MASK 0x1
#define RX_PKT_STATUS_BCAST_SHIFT 17
#define RX_PKT_STATUS_MCAST_MASK 0x1
#define RX_PKT_STATUS_MCAST_SHIFT 18
#define RX_PKT_STATUS_PAUSE_MASK 0x1
#define RX_PKT_STATUS_PAUSE_SHIFT 19
#define RX_PKT_STATUS_CTRL_MASK 0x1
#define RX_PKT_STATUS_CTRL_SHIFT 20
#define RX_PKT_STATUS_CRC_MASK 0x1
#define RX_PKT_STATUS_CRC_SHIFT 21
#define RX_PKT_STATUS_CODE_MASK 0x1
#define RX_PKT_STATUS_CODE_SHIFT 22
#define RX_PKT_STATUS_RUNT_MASK 0x1
#define RX_PKT_STATUS_RUNT_SHIFT 23
#define RX_PKT_STATUS_FRAG_MASK 0x1
#define RX_PKT_STATUS_FRAG_SHIFT 24
#define RX_PKT_STATUS_TRUNK_MASK 0x1
#define RX_PKT_STATUS_TRUNK_SHIFT 25
#define RX_PKT_STATUS_ALIGN_MASK 0x1
#define RX_PKT_STATUS_ALIGN_SHIFT 26
#define RX_PKT_STATUS_VLAN_MASK 0x1
#define RX_PKT_STATUS_VLAN_SHIFT 27
#define RX_PKT_STATUS_UPDATE_MASK 0x1
#define RX_PKT_STATUS_UPDATE_SHIFT 31
#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF
#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32
struct rx_desc {
struct rx_pkt_status status;
unsigned char packet[1536-sizeof(struct rx_pkt_status)];
};
enum atl2_speed_duplex {
atl2_10_half = 0,
atl2_10_full = 1,
atl2_100_half = 2,
atl2_100_full = 3
};
struct atl2_spi_flash_dev {
const char *manu_name; /* manufacturer id */
/* op-code */
u8 cmdWRSR;
u8 cmdREAD;
u8 cmdPROGRAM;
u8 cmdWREN;
u8 cmdWRDI;
u8 cmdRDSR;
u8 cmdRDID;
u8 cmdSECTOR_ERASE;
u8 cmdCHIP_ERASE;
};
/* Structure containing variables used by the shared code (atl2_hw.c) */
struct atl2_hw {
u8 __iomem *hw_addr;
void *back;
u8 preamble_len;
u8 max_retry; /* Retransmission maximum, afterwards the
* packet will be discarded. */
u8 jam_ipg; /* IPG to start JAM for collision based flow
* control in half-duplex mode. In unit of
* 8-bit time. */
u8 ipgt; /* Desired back to back inter-packet gap. The
* default is 96-bit time. */
u8 min_ifg; /* Minimum number of IFG to enforce in between
* RX frames. Frame gap below such IFP is
* dropped. */
u8 ipgr1; /* 64bit Carrier-Sense window */
u8 ipgr2; /* 96-bit IPG window */
u8 retry_buf; /* When half-duplex mode, should hold some
* bytes for mac retry . (8*4bytes unit) */
u16 fc_rxd_hi;
u16 fc_rxd_lo;
u16 lcol; /* Collision Window */
u16 max_frame_size;
u16 MediaType;
u16 autoneg_advertised;
u16 pci_cmd_word;
u16 mii_autoneg_adv_reg;
u32 mem_rang;
u32 txcw;
u32 mc_filter_type;
u32 num_mc_addrs;
u32 collision_delta;
u32 tx_packet_delta;
u16 phy_spd_default;
u16 device_id;
u16 vendor_id;
u16 subsystem_id;
u16 subsystem_vendor_id;
u8 revision_id;
/* spi flash */
u8 flash_vendor;
u8 dma_fairness;
u8 mac_addr[NODE_ADDRESS_SIZE];
u8 perm_mac_addr[NODE_ADDRESS_SIZE];
/* FIXME */
/* bool phy_preamble_sup; */
bool phy_configured;
};
#endif /* _ATL2_HW_H_ */
struct atl2_ring_header {
/* pointer to the descriptor ring memory */
void *desc;
/* physical adress of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
};
/* board specific private data structure */
struct atl2_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
#ifdef NETIF_F_HW_VLAN_TX
struct vlan_group *vlgrp;
#endif
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
spinlock_t tx_lock;
struct work_struct reset_task;
struct work_struct link_chg_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
unsigned long cfg_phy;
bool mac_disabled;
/* All Descriptor memory */
dma_addr_t ring_dma;
void *ring_vir_addr;
int ring_size;
struct tx_pkt_header *txd_ring;
dma_addr_t txd_dma;
struct tx_pkt_status *txs_ring;
dma_addr_t txs_dma;
struct rx_desc *rxd_ring;
dma_addr_t rxd_dma;
u32 txd_ring_size; /* bytes per unit */
u32 txs_ring_size; /* dwords per unit */
u32 rxd_ring_size; /* 1536 bytes per unit */
/* read /write ptr: */
/* host */
u32 txd_write_ptr;
u32 txs_next_clear;
u32 rxd_read_ptr;
/* nic */
atomic_t txd_read_ptr;
atomic_t txs_write_ptr;
u32 rxd_write_ptr;
/* Interrupt Moderator timer ( 2us resolution) */
u16 imt;
/* Interrupt Clear timer (2us resolution) */
u16 ict;
unsigned long flags;
/* structs defined in atl2_hw.h */
u32 bd_number; /* board number */
bool pci_using_64;
bool have_msi;
struct atl2_hw hw;
u32 usr_cmd;
/* FIXME */
/* u32 regs_buff[ATL2_REGS_LEN]; */
u32 pci_state[16];
u32 *config_space;
};
enum atl2_state_t {
__ATL2_TESTING,
__ATL2_RESETTING,
__ATL2_DOWN
};
#endif /* _ATL2_H_ */

View File

@ -0,0 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o vnic_dev.o vnic_rq.o

View File

@ -0,0 +1,79 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specfic area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specfic[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 7
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */

View File

@ -0,0 +1,169 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
__le16 completed_index_flags;
__le16 q_number_rss_type_flags;
__le32 rss_hash;
__le16 bytes_written_flags;
__le16 vlan;
__le16 checksum_fcoe;
u8 flags;
u8 type_color;
};
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
u16 q_number_rss_type_flags =
le16_to_cpu(desc->q_number_rss_type_flags);
u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
1 : 0;
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
1 : 0;
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
1 : 0;
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
*csum_not_calc = (q_number_rss_type_flags &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
*rss_hash = le32_to_cpu(desc->rss_hash);
*bytes_written = bytes_written_flags &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
*packet_error = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
*vlan_stripped = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
*vlan = le16_to_cpu(desc->vlan);
if (*fcoe) {
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
*fcoe_fc_crc_ok = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
*fcoe_eof = (u8)((desc->checksum_fcoe >>
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
} else {
*fcoe_sof = 0;
*fcoe_fc_crc_ok = 0;
*fcoe_enc_error = 0;
*fcoe_eof = 0;
*checksum = le16_to_cpu(desc->checksum_fcoe);
}
*tcp_udp_csum_ok =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
*ipv4_csum_ok =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
*ipv4_fragment =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
}
#endif /* _CQ_ENET_DESC_H_ */

115
drivers/net/enic/enic.h Normal file
View File

@ -0,0 +1,115 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _ENIC_H_
#define _ENIC_H_
#include <linux/inet_lro.h>
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_rss.h"
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
#define DRV_VERSION "0.0.1.18163.472"
#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
#define ENIC_LRO_MAX_DESC 8
#define ENIC_LRO_MAX_AGGR 64
enum enic_cq_index {
ENIC_CQ_RQ,
ENIC_CQ_WQ,
ENIC_CQ_MAX,
};
enum enic_intx_intr_index {
ENIC_INTX_WQ_RQ,
ENIC_INTX_ERR,
ENIC_INTX_NOTIFY,
ENIC_INTX_MAX,
};
enum enic_msix_intr_index {
ENIC_MSIX_RQ,
ENIC_MSIX_WQ,
ENIC_MSIX_ERR,
ENIC_MSIX_NOTIFY,
ENIC_MSIX_MAX,
};
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
struct pci_dev *pdev;
struct vnic_enet_config config;
struct vnic_dev_bar bar0;
struct vnic_dev *vdev;
struct net_device_stats net_stats;
struct timer_list notify_timer;
struct work_struct reset;
struct msix_entry msix_entry[ENIC_MSIX_MAX];
struct enic_msix_entry msix[ENIC_MSIX_MAX];
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int mc_count;
int csum_rx_enabled;
u32 port_mtu;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[1];
spinlock_t wq_lock[1];
unsigned int wq_count;
struct vlan_group *vlan_group;
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[1];
unsigned int rq_count;
int (*rq_alloc_buf)(struct vnic_rq *rq);
struct napi_struct napi;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
};
#endif /* _ENIC_H_ */

1949
drivers/net/enic/enic_main.c Normal file

File diff suppressed because it is too large Load Diff

370
drivers/net/enic/enic_res.c Normal file
View File

@ -0,0 +1,370 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
#include "vnic_rss.h"
#include "enic_res.h"
#include "enic.h"
int enic_get_vnic_config(struct enic *enic)
{
struct vnic_enet_config *c = &enic->config;
int err;
err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
if (err) {
printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
return err;
}
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(enic->vdev, \
offsetof(struct vnic_enet_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
printk(KERN_ERR PFX \
"Error getting %s, %d\n", #m, err); \
return err; \
} \
} while (0)
GET_CONFIG(flags);
GET_CONFIG(wq_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(mtu);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
c->rq_desc_count =
min_t(u32, ENIC_MAX_RQ_DESCS,
max_t(u32, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
if (c->mtu == 0)
c->mtu = 1500;
c->mtu = min_t(u16, ENIC_MAX_MTU,
max_t(u16, ENIC_MIN_MTU,
c->mtu));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
"wq/rq %d/%d\n",
enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
c->wq_desc_count, c->rq_desc_count);
printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
"intr timer %d\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
ENIC_SETTING(enic, LRO), c->intr_timer);
return 0;
}
void enic_add_station_addr(struct enic *enic)
{
vnic_dev_add_addr(enic->vdev, enic->mac_addr);
}
void enic_add_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_add_addr(enic->vdev, addr);
}
void enic_del_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_del_addr(enic->vdev, addr);
}
void enic_add_vlan(struct enic *enic, u16 vlanid)
{
u64 a0 = vlanid, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
}
void enic_del_vlan(struct enic *enic, u16 vlanid)
{
u64 a0 = vlanid, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
}
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
rss_hash_type, rss_hash_bits, rss_base_cpu,
rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
a0 = nic_cfg;
a1 = 0;
return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
}
void enic_free_vnic_resources(struct enic *enic)
{
unsigned int i;
for (i = 0; i < enic->wq_count; i++)
vnic_wq_free(&enic->wq[i]);
for (i = 0; i < enic->rq_count; i++)
vnic_rq_free(&enic->rq[i]);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
vnic_intr_free(&enic->intr[i]);
}
void enic_get_res_counts(struct enic *enic)
{
enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
enic->intr_count = vnic_dev_get_res_count(enic->vdev,
RES_TYPE_INTR_CTRL);
printk(KERN_INFO PFX "vNIC resources avail: "
"wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic->rq_count,
enic->cq_count, enic->intr_count);
}
void enic_init_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int interrupt_offset;
unsigned int error_interrupt_enable;
unsigned int error_interrupt_offset;
unsigned int cq_index;
unsigned int i;
intr_mode = vnic_dev_get_intr_mode(enic->vdev);
/* Init RQ/WQ resources.
*
* RQ[0 - n-1] point to CQ[0 - n-1]
* WQ[0 - m-1] point to CQ[n - n+m-1]
*
* Error interrupt is not enabled for MSI.
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
error_interrupt_offset = enic->intr_count - 2;
break;
default:
error_interrupt_enable = 0;
error_interrupt_offset = 0;
break;
}
for (i = 0; i < enic->rq_count; i++) {
cq_index = i;
vnic_rq_init(&enic->rq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < enic->wq_count; i++) {
cq_index = enic->rq_count + i;
vnic_wq_init(&enic->wq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
/* Init CQ resources
*
* CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
* CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
*/
for (i = 0; i < enic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
interrupt_offset = i;
break;
default:
interrupt_offset = 0;
break;
}
vnic_cq_init(&enic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
interrupt_offset,
0 /* cq_message_addr */);
}
/* Init INTR resources
*
* mask_on_assertion is not used for INTx due to the level-
* triggered nature of INTx
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSI:
case VNIC_DEV_INTR_MODE_MSIX:
mask_on_assertion = 1;
break;
default:
mask_on_assertion = 0;
break;
}
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
enic->config.intr_timer,
enic->config.intr_timer_type,
mask_on_assertion);
}
/* Clear LIF stats
*/
vnic_dev_stats_clear(enic->vdev);
}
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int i;
int err;
intr_mode = vnic_dev_get_intr_mode(enic->vdev);
printk(KERN_INFO PFX "vNIC resources used: "
"wq %d rq %d cq %d intr %d intr mode %s\n",
enic->wq_count, enic->rq_count,
enic->cq_count, enic->intr_count,
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown"
);
/* Allocate queue resources
*/
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
enic->config.wq_desc_count,
sizeof(struct wq_enet_desc));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < enic->rq_count; i++) {
err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
enic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.rq_desc_count,
sizeof(struct cq_enet_rq_desc));
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
sizeof(struct cq_enet_wq_desc));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < enic->intr_count; i++) {
err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
if (err)
goto err_out_cleanup;
}
/* Hook remaining resource
*/
enic->legacy_pba = vnic_dev_get_res(enic->vdev,
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
return 0;
err_out_cleanup:
enic_free_vnic_resources(enic);
return err;
}

151
drivers/net/enic/enic_res.h Normal file
View File

@ -0,0 +1,151 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _ENIC_RES_H_
#define _ENIC_RES_H_
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#define ENIC_MIN_WQ_DESCS 64
#define ENIC_MAX_WQ_DESCS 4096
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
#define ENIC_MIN_MTU 576 /* minimum for IPv4 */
#define ENIC_MAX_MTU 9000
#define ENIC_MULTICAST_PERFECT_FILTERS 32
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int mss_or_csum_offset, unsigned int hdr_len,
int vlan_tag_insert, unsigned int vlan_tag,
int offload_mode, int cq_entry, int sop, int eop)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
(u16)mss_or_csum_offset,
(u16)hdr_len, (u8)offload_mode,
(u8)eop, (u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
wmb();
vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
0, 0, 0, 0, 0,
eop, 0 /* !SOP */, eop);
}
static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
0, 0, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
int ip_csum, int tcpudp_csum, int vlan_tag_insert,
unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
0, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int csum_offset, unsigned int hdr_len,
int vlan_tag_insert, unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM_L4,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
mss, hdr_len, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_TSO,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_rq_desc(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
dma_addr_t dma_addr, unsigned int len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
u8 type = os_buf_index ?
RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
rq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
wmb();
vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
}
struct enic;
int enic_get_vnic_config(struct enic *);
void enic_add_station_addr(struct enic *enic);
void enic_add_multicast_addr(struct enic *enic, u8 *addr);
void enic_del_multicast_addr(struct enic *enic, u8 *addr);
void enic_add_vlan(struct enic *enic, u16 vlanid);
void enic_del_vlan(struct enic *enic, u16 vlanid);
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en);
void enic_get_res_counts(struct enic *enic);
void enic_init_vnic_resources(struct enic *enic);
int enic_alloc_vnic_resources(struct enic *);
void enic_free_vnic_resources(struct enic *);
#endif /* _ENIC_RES_H_ */

View File

@ -0,0 +1,60 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
/* Ethernet receive queue descriptor: 16B */
struct rq_enet_desc {
__le64 address;
__le16 length_type;
u8 reserved[6];
};
enum rq_enet_type_types {
RQ_ENET_TYPE_ONLY_SOP = 0,
RQ_ENET_TYPE_NOT_SOP = 1,
RQ_ENET_TYPE_RESV2 = 2,
RQ_ENET_TYPE_RESV3 = 3,
};
#define RQ_ENET_ADDR_BITS 64
#define RQ_ENET_LEN_BITS 14
#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
#define RQ_ENET_TYPE_BITS 2
#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
u64 address, u8 type, u16 length)
{
desc->address = cpu_to_le64(address);
desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
}
static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
u64 *address, u8 *type, u16 *length)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
*type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
RQ_ENET_TYPE_MASK);
}
#endif /* _RQ_ENET_DESC_H_ */

View File

@ -0,0 +1,89 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
void vnic_cq_free(struct vnic_cq *cq)
{
vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
cq->ctrl = NULL;
}
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
cq->index = index;
cq->vdev = vdev;
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
unsigned int interrupt_offset, u64 cq_message_addr)
{
u64 paddr;
paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
iowrite32(color_enable, &cq->ctrl->color_enable);
iowrite32(cq_head, &cq->ctrl->cq_head);
iowrite32(cq_tail, &cq->ctrl->cq_tail);
iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
}
void vnic_cq_clean(struct vnic_cq *cq)
{
cq->to_clean = 0;
cq->last_color = 0;
iowrite32(0, &cq->ctrl->cq_head);
iowrite32(0, &cq->ctrl->cq_tail);
iowrite32(1, &cq->ctrl->cq_tail_color);
vnic_dev_clear_desc_ring(&cq->ring);
}

113
drivers/net/enic/vnic_cq.h Normal file
View File

@ -0,0 +1,113 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
#include "cq_desc.h"
#include "vnic_dev.h"
/* Completion queue control */
struct vnic_cq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 flow_control_enable; /* 0x10 */
u32 pad1;
u32 color_enable; /* 0x18 */
u32 pad2;
u32 cq_head; /* 0x20 */
u32 pad3;
u32 cq_tail; /* 0x28 */
u32 pad4;
u32 cq_tail_color; /* 0x30 */
u32 pad5;
u32 interrupt_enable; /* 0x38 */
u32 pad6;
u32 cq_entry_enable; /* 0x40 */
u32 pad7;
u32 cq_message_enable; /* 0x48 */
u32 pad8;
u32 interrupt_offset; /* 0x50 */
u32 pad9;
u64 cq_message_addr; /* 0x58 */
u32 pad10;
};
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned int to_clean;
unsigned int last_color;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
unsigned int work_to_do,
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
u8 type, u16 q_number, u16 completed_index, void *opaque),
void *opaque)
{
struct cq_desc *cq_desc;
unsigned int work_done = 0;
u16 q_number, completed_index;
u8 type, color;
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq_desc, type,
q_number, completed_index, opaque))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
unsigned int interrupt_offset, u64 message_addr);
void vnic_cq_clean(struct vnic_cq *cq);
#endif /* _VNIC_CQ_H_ */

674
drivers/net/enic/vnic_dev.c Normal file
View File

@ -0,0 +1,674 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
struct vnic_res {
void __iomem *vaddr;
unsigned int count;
};
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
struct vnic_res res[RES_TYPE_MAX];
enum vnic_dev_intr_mode intr_mode;
struct vnic_devcmd __iomem *devcmd;
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
};
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
void *vnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
}
static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar)
{
struct vnic_resource_header __iomem *rh;
struct vnic_resource __iomem *r;
u8 type;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
if (!rh) {
printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
ioread32(&rh->version) != VNIC_RES_VERSION) {
printk(KERN_ERR "vNIC BAR0 res magic/version error "
"exp (%lx/%lx) curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
r = (struct vnic_resource __iomem *)(rh + 1);
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
u8 bar_num = ioread8(&r->bar);
u32 bar_offset = ioread32(&r->bar_offset);
u32 count = ioread32(&r->count);
u32 len;
r++;
if (bar_num != 0) /* only mapping in BAR0 resources */
continue;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar->len) {
printk(KERN_ERR "vNIC BAR0 resource %d "
"out-of-bounds, offset 0x%x + "
"size 0x%x > bar len 0x%lx\n",
type, bar_offset,
len,
bar->len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
len = count;
break;
default:
continue;
}
vdev->res[type].count = count;
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
return 0;
}
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type)
{
return vdev->res[type].count;
}
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
{
if (!vdev->res[type].vaddr)
return NULL;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
return (char __iomem *)vdev->res[type].vaddr +
index * VNIC_RES_STRIDE;
default:
return (char __iomem *)vdev->res[type].vaddr;
}
}
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
* Descriptor count is aligned to groups of 32 descriptors. A
* count of 0 means the maximum 4096 descriptors. Descriptor
* size is aligned to 16 bytes.
*/
unsigned int count_align = 32;
unsigned int desc_align = 16;
ring->base_align = 512;
if (desc_count == 0)
desc_count = 4096;
ring->desc_count = ALIGN(desc_count, count_align);
ring->desc_size = ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
return ring->size_unaligned;
}
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
}
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
ring->size_unaligned,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
printk(KERN_ERR
"Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
vnic_dev_clear_desc_ring(ring);
ring->desc_avail = ring->desc_count - 1;
return 0;
}
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
pci_free_consistent(vdev->pdev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
ring->descs = NULL;
}
}
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
EFAULT, /* ERR_EFAULT */
EPERM, /* ERR_EPERM */
EBUSY, /* ERR_EBUSY */
};
int err;
status = ioread32(&devcmd->status);
if (status & STAT_BUSY) {
printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
writeq(*a0, &devcmd->args[0]);
writeq(*a1, &devcmd->args[1]);
wmb();
}
iowrite32(cmd, &devcmd->cmd);
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
return 0;
for (delay = 0; delay < wait; delay++) {
udelay(100);
status = ioread32(&devcmd->status);
if (!(status & STAT_BUSY)) {
if (status & STAT_ERROR) {
err = dev_cmd_err[(int)readq(&devcmd->args[0])];
printk(KERN_ERR "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
rmb();
*a0 = readq(&devcmd->args[0]);
*a1 = readq(&devcmd->args[1]);
}
return 0;
}
}
printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
u64 a0, a1 = 0;
int wait = 1000;
int err = 0;
if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
/* only get fw_info once and cache it */
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
}
*fw_info = vdev->fw_info;
return err;
}
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
u64 a0, a1;
int wait = 1000;
int err;
a0 = offset;
a1 = size;
err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
switch (size) {
case 1: *(u8 *)value = (u8)a0; break;
case 2: *(u16 *)value = (u16)a0; break;
case 4: *(u32 *)value = (u32)a0; break;
case 8: *(u64 *)value = a0; break;
default: BUG(); break;
}
return err;
}
int vnic_dev_stats_clear(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
}
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_stats), &vdev->stats_pa);
if (!vdev->stats)
return -ENOMEM;
}
*stats = vdev->stats;
a0 = vdev->stats_pa;
a1 = sizeof(struct vnic_stats);
return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
int vnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
int vnic_dev_enable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
}
int vnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
}
int vnic_dev_open(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
}
int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a0, a1;
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = ((u8 *)&a0)[i];
return 0;
}
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
u64 a0, a1 = 0;
int wait = 1000;
int err;
a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
(multicast ? CMD_PFILTER_MULTICAST : 0) |
(broadcast ? CMD_PFILTER_BROADCAST : 0) |
(promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't set packet filter\n");
}
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR
"Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR
"Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa);
if (!vdev->notify)
return -ENOMEM;
}
a0 = vdev->notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
void vnic_dev_notify_unset(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
unsigned int i;
u32 csum;
if (!vdev->notify)
return 0;
do {
csum = 0;
memcpy(&vdev->notify_copy, vdev->notify,
sizeof(struct vnic_devcmd_notify));
words = (u32 *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
return 1;
}
int vnic_dev_init(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
}
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
return *vdev->linkstatus;
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_state;
}
u32 vnic_dev_port_speed(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.port_speed;
}
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.msglvl;
}
u32 vnic_dev_mtu(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.mtu;
}
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
vdev->intr_mode = intr_mode;
}
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
struct vnic_dev *vdev)
{
return vdev->intr_mode;
}
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
pci_free_consistent(vdev->pdev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_dev),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
kfree(vdev);
}
}
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
{
if (!vdev) {
vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
if (!vdev)
return NULL;
}
vdev->priv = priv;
vdev->pdev = pdev;
if (vnic_dev_discover_res(vdev, bar))
goto err_out;
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
goto err_out;
return vdev;
err_out:
vnic_dev_unregister(vdev);
return NULL;
}

106
drivers/net/enic/vnic_dev.h Normal file
View File

@ -0,0 +1,106 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#ifndef VNIC_PADDR_TARGET
#define VNIC_PADDR_TARGET 0x0000000000000000ULL
#endif
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
VNIC_DEV_INTR_MODE_MSI,
VNIC_DEV_INTR_MODE_MSIX,
};
struct vnic_dev_bar {
void __iomem *vaddr;
dma_addr_t bus_addr;
unsigned long len;
};
struct vnic_dev_ring {
void *descs;
size_t size;
dma_addr_t base_addr;
size_t base_align;
void *descs_unaligned;
size_t size_unaligned;
dma_addr_t base_addr_unaligned;
unsigned int desc_size;
unsigned int desc_count;
unsigned int desc_avail;
};
struct vnic_dev;
struct vnic_stats;
void *vnic_dev_priv(struct vnic_dev *vdev);
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index);
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
u32 vnic_dev_mtu(struct vnic_dev *vdev);
int vnic_dev_close(struct vnic_dev *vdev);
int vnic_dev_enable(struct vnic_dev *vdev);
int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
void vnic_dev_unregister(struct vnic_dev *vdev);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
#endif /* _VNIC_DEV_H_ */

View File

@ -0,0 +1,282 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
#define _CMD_NBITS 14
#define _CMD_VTYPEBITS 10
#define _CMD_FLAGSBITS 6
#define _CMD_DIRBITS 2
#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
#define _CMD_NSHIFT 0
#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
/*
* Direction bits (from host perspective).
*/
#define _CMD_DIR_NONE 0U
#define _CMD_DIR_WRITE 1U
#define _CMD_DIR_READ 2U
#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
/*
* Flag bits.
*/
#define _CMD_FLAGS_NONE 0U
#define _CMD_FLAGS_NOWAIT 1U
/*
* vNIC type bits.
*/
#define _CMD_VTYPE_NONE 0U
#define _CMD_VTYPE_ENET 1U
#define _CMD_VTYPE_FC 2U
#define _CMD_VTYPE_SCSI 4U
#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
/*
* Used to create cmds..
*/
#define _CMDCF(dir, flags, vtype, nr) \
(((dir) << _CMD_DIRSHIFT) | \
((flags) << _CMD_FLAGSSHIFT) | \
((vtype) << _CMD_VTYPESHIFT) | \
((nr) << _CMD_NSHIFT))
#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
/*
* Used to decode cmds..
*/
#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
* out: a0=value */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
/* stats dump in mem: (u64)a0=paddr to stats area,
* (u16)a1=sizeof stats area */
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
/* hang detection notification */
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
/* disable/enable promisc mode: (u8)a0=0/1 */
/***** XXX DEPRECATED *****/
CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
/* disable/enable all-multi mode: (u8)a0=0/1 */
/***** XXX DEPRECATED *****/
CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
/* del addr from (u48)a0 */
CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
/* add VLAN id in (u16)a0 */
CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
/* nic_cfg in (u32)a0 */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
/* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
/* initiate softreset */
CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
/* softreset status:
* out: a0=0 reset complete, a0=1 reset in progress */
CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
/* set struct vnic_devcmd_notify buffer in mem:
* in:
* (u64)a0=paddr to notify (set paddr=0 to unset)
* (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
* (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
* (u32)a1 = effective size
*/
CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
/* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
* (u8)a1=PXENV_UNDI_xxx */
CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
/* open status:
* out: a0=0 open complete, a0=1 open in progress */
CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
/* close vnic */
CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
/* variant of CMD_INIT, with provisioning info
* (u64)a0=paddr of vnic_devcmd_provinfo
* (u32)a1=sizeof provision info */
CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
/* enable virtual link */
CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* disable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
/* init status:
* out: a0=0 init complete, a0=1 init in progress
* if a0=0, a1=errno */
CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
/* INT13 API: (u64)a0=paddr to vnic_int13_params struct
* (u8)a1=INT13_CMD_xxx */
CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
/* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
};
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
#define CMD_PFILTER_BROADCAST 0x04
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
};
enum vnic_devcmd_error {
ERR_SUCCESS = 0,
ERR_EINVAL = 1,
ERR_EFAULT = 2,
ERR_EPERM = 3,
ERR_EBUSY = 4,
ERR_ECMDUNKNOWN = 5,
ERR_EBADSTATE = 6,
ERR_ENOMEM = 7,
ERR_ETIMEDOUT = 8,
ERR_ELINKDOWN = 9,
};
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
};
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
u32 link_state; /* link up == 1 */
u32 port_speed; /* effective port speed (rate limit) */
u32 mtu; /* MTU */
u32 msglvl; /* requested driver msg lvl */
u32 uif; /* uplink interface */
u32 status; /* status bits (see VNIC_STF_*) */
u32 error; /* error code (see ERR_*) for first ERR */
};
#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
u8 data[0];
};
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
* If cmd completed successfully STAT_ERROR will be clear
* and args registers contain cmd-specific results.
*
* If cmd error, STAT_ERROR will be set and args[0] contains error code.
*
* status register is read-only. While STAT_BUSY is set,
* all other register contents are read-only.
*/
/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
#define VNIC_DEVCMD_NARGS 15
struct vnic_devcmd {
u32 status; /* RO */
u32 cmd; /* RW */
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
#endif /* _VNIC_DEVCMD_H_ */

View File

@ -0,0 +1,47 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
u32 wq_desc_count;
u32 rq_desc_count;
u16 mtu;
u16 intr_timer;
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
};
#define VENETF_TSO 0x1 /* TSO enabled */
#define VENETF_LRO 0x2 /* LRO enabled */
#define VENETF_RXCSUM 0x4 /* RX csum enabled */
#define VENETF_TXCSUM 0x8 /* TX csum enabled */
#define VENETF_RSS 0x10 /* RSS enabled */
#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
#endif /* _VNIC_ENIC_H_ */

View File

@ -0,0 +1,62 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
index);
return -EINVAL;
}
return 0;
}
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}

View File

@ -0,0 +1,92 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#define VNIC_INTR_TIMER_MAX 0xffff
#define VNIC_INTR_TIMER_TYPE_ABS 0
#define VNIC_INTR_TIMER_TYPE_QUIET 1
/* Interrupt control */
struct vnic_intr_ctrl {
u32 coalescing_timer; /* 0x00 */
u32 pad0;
u32 coalescing_value; /* 0x08 */
u32 pad1;
u32 coalescing_type; /* 0x10 */
u32 pad2;
u32 mask_on_assertion; /* 0x18 */
u32 pad3;
u32 mask; /* 0x20 */
u32 pad4;
u32 int_credits; /* 0x28 */
u32 pad5;
u32 int_credit_return; /* 0x30 */
u32 pad6;
};
struct vnic_intr {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
};
static inline void vnic_intr_unmask(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->mask);
}
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
unsigned int credits, int unmask, int reset_timer)
{
#define VNIC_INTR_UNMASK_SHIFT 16
#define VNIC_INTR_RESET_TIMER_SHIFT 17
u32 int_credit_return = (credits & 0xffff) |
(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
}
static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
{
/* get and ack interrupt in one read (clear-and-ack-on-read) */
return ioread32(legacy_pba);
}
void vnic_intr_free(struct vnic_intr *intr);
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */

View File

@ -0,0 +1,65 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
#define NIC_CFG_RSS_ENABLE (1UL << 22)
#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
#define NIC_CFG_RSS_ENABLE_SHIFT 22
#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu,
u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
<< NIC_CFG_RSS_ENABLE_SHIFT) |
((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
}
#endif /* _VNIC_NIC_H_ */

View File

@ -0,0 +1,63 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
/* vNIC resource types */
enum vnic_res_type {
RES_TYPE_EOL, /* End-of-list */
RES_TYPE_WQ, /* Work queues */
RES_TYPE_RQ, /* Receive queues */
RES_TYPE_CQ, /* Completion queues */
RES_TYPE_RSVD1,
RES_TYPE_NIC_CFG, /* Enet NIC config registers */
RES_TYPE_RSVD2,
RES_TYPE_RSVD3,
RES_TYPE_RSVD4,
RES_TYPE_RSVD5,
RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */
RES_TYPE_RSVD6,
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
RES_TYPE_MAX, /* Count of resource types */
};
struct vnic_resource_header {
u32 magic;
u32 version;
};
struct vnic_resource {
u8 type;
u8 bar;
u8 pad[2];
u32 bar_offset;
u32 count;
};
#endif /* _VNIC_RESOURCE_H_ */

199
drivers/net/enic/vnic_rq.c Normal file
View File

@ -0,0 +1,199 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
vdev = rq->vdev;
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!rq->bufs[i]) {
printk(KERN_ERR "Failed to alloc rq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = rq->bufs[i];
for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)rq->ring.descs +
rq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = rq->bufs[0];
break;
} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
buf->next = rq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
rq->to_use = rq->to_clean = rq->bufs[0];
rq->buf_index = 0;
return 0;
}
void vnic_rq_free(struct vnic_rq *rq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = rq->vdev;
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
kfree(rq->bufs[i]);
rq->bufs[i] = NULL;
}
rq->ctrl = NULL;
}
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
rq->index = index;
rq->vdev = vdev;
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
vnic_rq_disable(rq);
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_rq_alloc_bufs(rq);
if (err) {
vnic_rq_free(rq);
return err;
}
return 0;
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
u32 fetch_index;
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
iowrite32(0, &rq->ctrl->dropped_packet_count);
iowrite32(0, &rq->ctrl->error_status);
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
}
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
return ioread32(&rq->ctrl->error_status);
}
void vnic_rq_enable(struct vnic_rq *rq)
{
iowrite32(1, &rq->ctrl->enable);
}
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
{
struct vnic_rq_buf *buf;
u32 fetch_index;
BUG_ON(ioread32(&rq->ctrl->enable));
buf = rq->to_clean;
while (vnic_rq_desc_used(rq) > 0) {
(*buf_clean)(rq, buf);
buf = rq->to_clean = buf->next;
rq->ring.desc_avail++;
}
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
vnic_dev_clear_desc_ring(&rq->ring);
}

204
drivers/net/enic/vnic_rq.h Normal file
View File

@ -0,0 +1,204 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/* Receive queue control */
struct vnic_rq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 error_interrupt_enable; /* 0x38 */
u32 pad6;
u32 error_interrupt_offset; /* 0x40 */
u32 pad7;
u32 error_status; /* 0x48 */
u32 pad8;
u32 dropped_packet_count; /* 0x50 */
u32 pad9;
u32 dropped_packet_count_rc; /* 0x58 */
u32 pad10;
};
/* Break the vnic_rq_buf allocations into blocks of 64 entries */
#define VNIC_RQ_BUF_BLK_ENTRIES 64
#define VNIC_RQ_BUF_BLK_SZ \
(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int os_buf_index;
unsigned int len;
unsigned int index;
void *desc;
};
struct vnic_rq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
struct vnic_rq_buf *to_use;
struct vnic_rq_buf *to_clean;
void *os_buf_head;
unsigned int buf_index;
unsigned int pkts_outstanding;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
{
/* how many does SW own? */
return rq->ring.desc_avail;
}
static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
{
/* how many does HW own? */
return rq->ring.desc_count - rq->ring.desc_avail - 1;
}
static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
{
return rq->to_use->desc;
}
static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
{
return rq->to_use->index;
}
static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
{
return rq->buf_index++;
}
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
dma_addr_t dma_addr, unsigned int len)
{
struct vnic_rq_buf *buf = rq->to_use;
buf->os_buf = os_buf;
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
rq->to_use = buf;
rq->ring.desc_avail--;
/* Move the posted_index every nth descriptor
*/
#ifndef VNIC_RQ_RETURN_RATE
#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
#endif
if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
iowrite32(buf->index, &rq->ctrl->posted_index);
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
}
enum desc_return_options {
VNIC_RQ_RETURN_DESC,
VNIC_RQ_DEFER_RETURN_DESC,
};
static inline void vnic_rq_service(struct vnic_rq *rq,
struct cq_desc *cq_desc, u16 completed_index,
int desc_return, void (*buf_service)(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque), void *opaque)
{
struct vnic_rq_buf *buf;
int skipped;
buf = rq->to_clean;
while (1) {
skipped = (buf->index != completed_index);
(*buf_service)(rq, cq_desc, buf, skipped, opaque);
if (desc_return == VNIC_RQ_RETURN_DESC)
rq->ring.desc_avail++;
rq->to_clean = buf->next;
if (!skipped)
break;
buf = rq->to_clean;
}
}
static inline int vnic_rq_fill(struct vnic_rq *rq,
int (*buf_fill)(struct vnic_rq *rq))
{
int err;
while (vnic_rq_desc_avail(rq) > 1) {
err = (*buf_fill)(rq);
if (err)
return err;
}
return 0;
}
void vnic_rq_free(struct vnic_rq *rq);
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int vnic_rq_error_status(struct vnic_rq *rq);
void vnic_rq_enable(struct vnic_rq *rq);
int vnic_rq_disable(struct vnic_rq *rq);
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
#endif /* _VNIC_RQ_H_ */

View File

@ -0,0 +1,32 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#ifndef _VNIC_RSS_H_
#define _VNIC_RSS_H_
/* RSS key array */
union vnic_rss_key {
struct {
u8 b[10];
u8 b_pad[6];
} key[4];
u64 raw[8];
};
/* RSS cpu array */
union vnic_rss_cpu {
struct {
u8 b[4] ;
u8 b_pad[4];
} cpu[32];
u64 raw[32];
};
void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
#endif /* _VNIC_RSS_H_ */

View File

@ -0,0 +1,70 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
/* Tx statistics */
struct vnic_tx_stats {
u64 tx_frames_ok;
u64 tx_unicast_frames_ok;
u64 tx_multicast_frames_ok;
u64 tx_broadcast_frames_ok;
u64 tx_bytes_ok;
u64 tx_unicast_bytes_ok;
u64 tx_multicast_bytes_ok;
u64 tx_broadcast_bytes_ok;
u64 tx_drops;
u64 tx_errors;
u64 tx_tso;
u64 rsvd[16];
};
/* Rx statistics */
struct vnic_rx_stats {
u64 rx_frames_ok;
u64 rx_frames_total;
u64 rx_unicast_frames_ok;
u64 rx_multicast_frames_ok;
u64 rx_broadcast_frames_ok;
u64 rx_bytes_ok;
u64 rx_unicast_bytes_ok;
u64 rx_multicast_bytes_ok;
u64 rx_broadcast_bytes_ok;
u64 rx_drop;
u64 rx_no_bufs;
u64 rx_errors;
u64 rx_rss;
u64 rx_crc_errors;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_to_max;
u64 rsvd[16];
};
struct vnic_stats {
struct vnic_tx_stats tx;
struct vnic_rx_stats rx;
};
#endif /* _VNIC_STATS_H_ */

184
drivers/net/enic/vnic_wq.c Normal file
View File

@ -0,0 +1,184 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
vdev = wq->vdev;
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
printk(KERN_ERR "Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}

154
drivers/net/enic/vnic_wq.h Normal file
View File

@ -0,0 +1,154 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/* Work queue control */
struct vnic_wq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 dca_value; /* 0x38 */
u32 pad6;
u32 error_interrupt_enable; /* 0x40 */
u32 pad7;
u32 error_interrupt_offset; /* 0x48 */
u32 pad8;
u32 error_status; /* 0x50 */
u32 pad9;
};
struct vnic_wq_buf {
struct vnic_wq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int len;
unsigned int index;
int sop;
void *desc;
};
/* Break the vnic_wq_buf allocations into blocks of 64 entries */
#define VNIC_WQ_BUF_BLK_ENTRIES 64
#define VNIC_WQ_BUF_BLK_SZ \
(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
struct vnic_wq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
struct vnic_wq_buf *to_use;
struct vnic_wq_buf *to_clean;
unsigned int pkts_outstanding;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
return wq->ring.desc_avail;
}
static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
{
/* how many does HW own? */
return wq->ring.desc_count - wq->ring.desc_avail - 1;
}
static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
{
return wq->to_use->desc;
}
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
if (eop)
iowrite32(buf->index, &wq->ctrl->posted_index);
wq->to_use = buf;
wq->ring.desc_avail--;
}
static inline void vnic_wq_service(struct vnic_wq *wq,
struct cq_desc *cq_desc, u16 completed_index,
void (*buf_service)(struct vnic_wq *wq,
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
void *opaque)
{
struct vnic_wq_buf *buf;
buf = wq->to_clean;
while (1) {
(*buf_service)(wq, cq_desc, buf, opaque);
wq->ring.desc_avail++;
wq->to_clean = buf->next;
if (buf->index == completed_index)
break;
buf = wq->to_clean;
}
}
void vnic_wq_free(struct vnic_wq *wq);
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
#endif /* _VNIC_WQ_H_ */

View File

@ -0,0 +1,98 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
__le64 address;
__le16 length;
__le16 mss_loopback;
__le16 header_length_flags;
__le16 vlan_tag;
};
#define WQ_ENET_ADDR_BITS 64
#define WQ_ENET_LEN_BITS 14
#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
#define WQ_ENET_MSS_BITS 14
#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
#define WQ_ENET_MSS_SHIFT 2
#define WQ_ENET_LOOPBACK_SHIFT 1
#define WQ_ENET_HDRLEN_BITS 10
#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
#define WQ_ENET_FLAGS_OM_BITS 2
#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
#define WQ_ENET_FLAGS_EOP_SHIFT 12
#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
#define WQ_ENET_OFFLOAD_MODE_CSUM 0
#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
#define WQ_ENET_OFFLOAD_MODE_TSO 3
static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
u64 address, u16 length, u16 mss, u16 header_length,
u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
{
desc->address = cpu_to_le64(address);
desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
desc->header_length_flags = cpu_to_le16(
(header_length & WQ_ENET_HDRLEN_MASK) |
(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
desc->vlan_tag = cpu_to_le16(vlan_tag);
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
u64 *address, u16 *length, u16 *mss, u16 *header_length,
u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
WQ_ENET_MSS_MASK;
*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
WQ_ENET_LOOPBACK_SHIFT) & 1);
*header_length = le16_to_cpu(desc->header_length_flags) &
WQ_ENET_HDRLEN_MASK;
*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
*vlan_tag = le16_to_cpu(desc->vlan_tag);
}
#endif /* _WQ_ENET_DESC_H_ */

3019
drivers/net/jme.c Normal file

File diff suppressed because it is too large Load Diff

1199
drivers/net/jme.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,7 @@
#
# Makefile for the Qlogic 10GbE PCI Express ethernet driver
#
obj-$(CONFIG_QLGE) += qlge.o
qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o

1593
drivers/net/qlge/qlge.h Normal file

File diff suppressed because it is too large Load Diff

858
drivers/net/qlge/qlge_dbg.c Normal file
View File

@ -0,0 +1,858 @@
#include "qlge.h"
#ifdef QL_REG_DUMP
static void ql_dump_intr_states(struct ql_adapter *qdev)
{
int i;
u32 value;
for (i = 0; i < qdev->intr_count; i++) {
ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
value = ql_read32(qdev, INTR_EN);
printk(KERN_ERR PFX
"%s: Interrupt %d is %s.\n",
qdev->ndev->name, i,
(value & INTR_EN_EN ? "enabled" : "disabled"));
}
}
void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
{
u32 data;
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
return;
}
ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, TX_CFG, &data);
printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
ql_read_xgmac_reg(qdev, RX_CFG, &data);
printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
qdev->ndev->name, data);
ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
qdev->ndev->name, data);
ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
qdev->ndev->name, data);
ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
data);
ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
qdev->ndev->name, data);
ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
data);
ql_sem_unlock(qdev, qdev->xg_sem_mask);
}
static void ql_dump_ets_regs(struct ql_adapter *qdev)
{
}
static void ql_dump_cam_entries(struct ql_adapter *qdev)
{
int i;
u32 value[3];
for (i = 0; i < 4; i++) {
if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
printk(KERN_ERR PFX
"%s: Failed read of mac index register.\n",
__func__);
return;
} else {
if (value[0])
printk(KERN_ERR PFX
"%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
qdev->ndev->name, i, value[1], value[0],
value[2]);
}
}
for (i = 0; i < 32; i++) {
if (ql_get_mac_addr_reg
(qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
printk(KERN_ERR PFX
"%s: Failed read of mac index register.\n",
__func__);
return;
} else {
if (value[0])
printk(KERN_ERR PFX
"%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
qdev->ndev->name, i, value[1], value[0]);
}
}
}
void ql_dump_routing_entries(struct ql_adapter *qdev)
{
int i;
u32 value;
for (i = 0; i < 16; i++) {
value = 0;
if (ql_get_routing_reg(qdev, i, &value)) {
printk(KERN_ERR PFX
"%s: Failed read of routing index register.\n",
__func__);
return;
} else {
if (value)
printk(KERN_ERR PFX
"%s: Routing Mask %d = 0x%.08x.\n",
qdev->ndev->name, i, value);
}
}
}
void ql_dump_regs(struct ql_adapter *qdev)
{
printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
printk(KERN_ERR PFX "SYS = 0x%x.\n",
ql_read32(qdev, SYS));
printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
ql_read32(qdev, RST_FO));
printk(KERN_ERR PFX "FSC = 0x%x.\n",
ql_read32(qdev, FSC));
printk(KERN_ERR PFX "CSR = 0x%x.\n",
ql_read32(qdev, CSR));
printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
ql_read32(qdev, ICB_RID));
printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
ql_read32(qdev, ICB_L));
printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
ql_read32(qdev, ICB_H));
printk(KERN_ERR PFX "CFG = 0x%x.\n",
ql_read32(qdev, CFG));
printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
ql_read32(qdev, BIOS_ADDR));
printk(KERN_ERR PFX "STS = 0x%x.\n",
ql_read32(qdev, STS));
printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
ql_read32(qdev, INTR_EN));
printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
ql_read32(qdev, INTR_MASK));
printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
ql_read32(qdev, ISR1));
printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
ql_read32(qdev, ISR2));
printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
ql_read32(qdev, ISR3));
printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
ql_read32(qdev, ISR4));
printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
ql_read32(qdev, REV_ID));
printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
ql_read32(qdev, FRC_ECC_ERR));
printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
ql_read32(qdev, ERR_STS));
printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
ql_read32(qdev, RAM_DBG_ADDR));
printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
ql_read32(qdev, RAM_DBG_DATA));
printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
ql_read32(qdev, ECC_ERR_CNT));
printk(KERN_ERR PFX "SEM = 0x%x.\n",
ql_read32(qdev, SEM));
printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
ql_read32(qdev, GPIO_1));
printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
ql_read32(qdev, GPIO_2));
printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
ql_read32(qdev, GPIO_3));
printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
ql_read32(qdev, XGMAC_ADDR));
printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
ql_read32(qdev, XGMAC_DATA));
printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
ql_read32(qdev, NIC_ETS));
printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
ql_read32(qdev, CNA_ETS));
printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
ql_read32(qdev, FLASH_ADDR));
printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
ql_read32(qdev, FLASH_DATA));
printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
ql_read32(qdev, CQ_STOP));
printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
ql_read32(qdev, PAGE_TBL_RID));
printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
ql_read32(qdev, WQ_PAGE_TBL_LO));
printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
ql_read32(qdev, WQ_PAGE_TBL_HI));
printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
ql_read32(qdev, CQ_PAGE_TBL_LO));
printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
ql_read32(qdev, CQ_PAGE_TBL_HI));
printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
ql_read32(qdev, COS_DFLT_CQ1));
printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
ql_read32(qdev, COS_DFLT_CQ2));
printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
ql_read32(qdev, SPLT_HDR));
printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
ql_read32(qdev, FC_PAUSE_THRES));
printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
ql_read32(qdev, NIC_PAUSE_THRES));
printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
ql_read32(qdev, FC_ETHERTYPE));
printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
ql_read32(qdev, FC_RCV_CFG));
printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
ql_read32(qdev, NIC_RCV_CFG));
printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
ql_read32(qdev, FC_COS_TAGS));
printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
ql_read32(qdev, NIC_COS_TAGS));
printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
ql_read32(qdev, MGMT_RCV_CFG));
printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
ql_read32(qdev, XG_SERDES_ADDR));
printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
ql_read32(qdev, XG_SERDES_DATA));
printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
ql_read32(qdev, PRB_MX_ADDR));
printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
ql_read32(qdev, PRB_MX_DATA));
ql_dump_intr_states(qdev);
ql_dump_xgmac_control_regs(qdev);
ql_dump_ets_regs(qdev);
ql_dump_cam_entries(qdev);
ql_dump_routing_entries(qdev);
}
#endif
#ifdef QL_STAT_DUMP
void ql_dump_stat(struct ql_adapter *qdev)
{
printk(KERN_ERR "%s: Enter.\n", __func__);
printk(KERN_ERR "tx_pkts = %ld\n",
(unsigned long)qdev->nic_stats.tx_pkts);
printk(KERN_ERR "tx_bytes = %ld\n",
(unsigned long)qdev->nic_stats.tx_bytes);
printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.tx_mcast_pkts);
printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.tx_bcast_pkts);
printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.tx_ucast_pkts);
printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.tx_ctl_pkts);
printk(KERN_ERR "tx_pause_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.tx_pause_pkts);
printk(KERN_ERR "tx_64_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_64_pkt);
printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_256_511_pkt);
printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_undersize_pkt);
printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
(unsigned long)qdev->nic_stats.tx_oversize_pkt);
printk(KERN_ERR "rx_bytes = %ld.\n",
(unsigned long)qdev->nic_stats.rx_bytes);
printk(KERN_ERR "rx_bytes_ok = %ld.\n",
(unsigned long)qdev->nic_stats.rx_bytes_ok);
printk(KERN_ERR "rx_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_pkts);
printk(KERN_ERR "rx_pkts_ok = %ld.\n",
(unsigned long)qdev->nic_stats.rx_pkts_ok);
printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_bcast_pkts);
printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_mcast_pkts);
printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_ucast_pkts);
printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_undersize_pkts);
printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_oversize_pkts);
printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_jabber_pkts);
printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
printk(KERN_ERR "rx_drop_events = %ld.\n",
(unsigned long)qdev->nic_stats.rx_drop_events);
printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_fcerr_pkts);
printk(KERN_ERR "rx_align_err = %ld.\n",
(unsigned long)qdev->nic_stats.rx_align_err);
printk(KERN_ERR "rx_symbol_err = %ld.\n",
(unsigned long)qdev->nic_stats.rx_symbol_err);
printk(KERN_ERR "rx_mac_err = %ld.\n",
(unsigned long)qdev->nic_stats.rx_mac_err);
printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_ctl_pkts);
printk(KERN_ERR "rx_pause_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_pause_pkts);
printk(KERN_ERR "rx_64_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_64_pkts);
printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_128_255_pkts);
printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_256_511_pkts);
printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
(unsigned long)qdev->nic_stats.rx_len_err_pkts);
};
#endif
#ifdef QL_DEV_DUMP
void ql_dump_qdev(struct ql_adapter *qdev)
{
int i;
printk(KERN_ERR PFX "qdev->flags = %lx.\n",
qdev->flags);
printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
qdev->vlgrp);
printk(KERN_ERR PFX "qdev->pdev = %p.\n",
qdev->pdev);
printk(KERN_ERR PFX "qdev->ndev = %p.\n",
qdev->ndev);
printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
qdev->chip_rev_id);
printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
qdev->reg_base);
printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
qdev->doorbell_area);
printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
qdev->doorbell_area_size);
printk(KERN_ERR PFX "msg_enable = %x.\n",
qdev->msg_enable);
printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
qdev->rx_ring_shadow_reg_area);
printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %p.\n",
(void *)qdev->rx_ring_shadow_reg_dma);
printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
qdev->tx_ring_shadow_reg_area);
printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %p.\n",
(void *)qdev->tx_ring_shadow_reg_dma);
printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
qdev->intr_count);
if (qdev->msi_x_entry)
for (i = 0; i < qdev->intr_count; i++) {
printk(KERN_ERR PFX
"msi_x_entry.[%d]vector = %d.\n", i,
qdev->msi_x_entry[i].vector);
printk(KERN_ERR PFX
"msi_x_entry.[%d]entry = %d.\n", i,
qdev->msi_x_entry[i].entry);
}
for (i = 0; i < qdev->intr_count; i++) {
printk(KERN_ERR PFX
"intr_context[%d].qdev = %p.\n", i,
qdev->intr_context[i].qdev);
printk(KERN_ERR PFX
"intr_context[%d].intr = %d.\n", i,
qdev->intr_context[i].intr);
printk(KERN_ERR PFX
"intr_context[%d].hooked = %d.\n", i,
qdev->intr_context[i].hooked);
printk(KERN_ERR PFX
"intr_context[%d].intr_en_mask = 0x%08x.\n", i,
qdev->intr_context[i].intr_en_mask);
printk(KERN_ERR PFX
"intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
qdev->intr_context[i].intr_dis_mask);
printk(KERN_ERR PFX
"intr_context[%d].intr_read_mask = 0x%08x.\n", i,
qdev->intr_context[i].intr_read_mask);
}
printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
qdev->tx_ring);
printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id = %d.\n",
qdev->rss_ring_first_cq_id);
printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
qdev->rss_ring_count);
printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
qdev->default_rx_queue);
printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
qdev->xg_sem_mask);
printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
qdev->port_link_up);
printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
qdev->port_init);
}
#endif
#ifdef QL_CB_DUMP
void ql_dump_wqicb(struct wqicb *wqicb)
{
printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
le16_to_cpu(wqicb->cq_id_rss));
printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
le32_to_cpu(wqicb->addr_lo));
printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
le32_to_cpu(wqicb->addr_hi));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
}
void ql_dump_tx_ring(struct tx_ring *tx_ring)
{
if (tx_ring == NULL)
return;
printk(KERN_ERR PFX
"===================== Dumping tx_ring %d ===============.\n",
tx_ring->wq_id);
printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
(u64) tx_ring->wq_base_dma);
printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
tx_ring->cnsmr_idx_sh_reg);
printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
(u64) tx_ring->cnsmr_idx_sh_reg_dma);
printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
tx_ring->prod_idx_db_reg);
printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
tx_ring->valid_db_reg);
printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
atomic_read(&tx_ring->tx_count));
}
void ql_dump_ricb(struct ricb *ricb)
{
int i;
printk(KERN_ERR PFX
"===================== Dumping ricb ===============.\n");
printk(KERN_ERR PFX "Dumping ricb stuff...\n");
printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
ricb->flags & RSS_L6K ? "RSS_L6K " : "",
ricb->flags & RSS_LI ? "RSS_LI " : "",
ricb->flags & RSS_LB ? "RSS_LB " : "",
ricb->flags & RSS_LM ? "RSS_LM " : "",
ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
for (i = 0; i < 16; i++)
printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
le32_to_cpu(ricb->hash_cq_id[i]));
for (i = 0; i < 10; i++)
printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
le32_to_cpu(ricb->ipv6_hash_key[i]));
for (i = 0; i < 4; i++)
printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
le32_to_cpu(ricb->ipv4_hash_key[i]));
}
void ql_dump_cqicb(struct cqicb *cqicb)
{
printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
le32_to_cpu(cqicb->addr_lo));
printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
le32_to_cpu(cqicb->addr_hi));
printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
le32_to_cpu(cqicb->prod_idx_addr_lo));
printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
le32_to_cpu(cqicb->prod_idx_addr_hi));
printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
le16_to_cpu(cqicb->pkt_delay));
printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
le16_to_cpu(cqicb->irq_delay));
printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
le32_to_cpu(cqicb->lbq_addr_lo));
printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
le32_to_cpu(cqicb->lbq_addr_hi));
printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
le16_to_cpu(cqicb->lbq_buf_size));
printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
le16_to_cpu(cqicb->lbq_len));
printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
le32_to_cpu(cqicb->sbq_addr_lo));
printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
le32_to_cpu(cqicb->sbq_addr_hi));
printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
le16_to_cpu(cqicb->sbq_buf_size));
printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
le16_to_cpu(cqicb->sbq_len));
}
void ql_dump_rx_ring(struct rx_ring *rx_ring)
{
if (rx_ring == NULL)
return;
printk(KERN_ERR PFX
"===================== Dumping rx_ring %d ===============.\n",
rx_ring->cq_id);
printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
(u64) rx_ring->cq_base_dma);
printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
printk(KERN_ERR PFX
"rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
rx_ring->prod_idx_sh_reg,
rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
(u64) rx_ring->prod_idx_sh_reg_dma);
printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
rx_ring->cnsmr_idx_db_reg);
printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
rx_ring->valid_db_reg);
printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
(u64) rx_ring->lbq_base_dma);
printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
rx_ring->lbq_base_indirect);
printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
(u64) rx_ring->lbq_base_indirect_dma);
printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
rx_ring->lbq_prod_idx_db_reg);
printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
rx_ring->lbq_prod_idx);
printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
rx_ring->lbq_curr_idx);
printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
rx_ring->lbq_clean_idx);
printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
rx_ring->lbq_free_cnt);
printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
rx_ring->lbq_buf_size);
printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
(u64) rx_ring->sbq_base_dma);
printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
rx_ring->sbq_base_indirect);
printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
(u64) rx_ring->sbq_base_indirect_dma);
printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
rx_ring->sbq_prod_idx_db_reg);
printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
rx_ring->sbq_prod_idx);
printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
rx_ring->sbq_curr_idx);
printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
rx_ring->sbq_clean_idx);
printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
rx_ring->sbq_free_cnt);
printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
rx_ring->sbq_buf_size);
printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
}
void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
{
void *ptr;
printk(KERN_ERR PFX "%s: Enter.\n", __func__);
ptr = kmalloc(size, GFP_ATOMIC);
if (ptr == NULL) {
printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
__func__);
return;
}
if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
printk(KERN_ERR "%s: Failed to upload control block!\n",
__func__);
goto fail_it;
}
switch (bit) {
case CFG_DRQ:
ql_dump_wqicb((struct wqicb *)ptr);
break;
case CFG_DCQ:
ql_dump_cqicb((struct cqicb *)ptr);
break;
case CFG_DR:
ql_dump_ricb((struct ricb *)ptr);
break;
default:
printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
__func__, bit);
break;
}
fail_it:
kfree(ptr);
}
#endif
#ifdef QL_OB_DUMP
void ql_dump_tx_desc(struct tx_buf_desc *tbd)
{
printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
le64_to_cpu((u64) tbd->addr));
printk(KERN_ERR PFX "tbd->len = %d\n",
le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
printk(KERN_ERR PFX "tbd->flags = %s %s\n",
tbd->len & TX_DESC_C ? "C" : ".",
tbd->len & TX_DESC_E ? "E" : ".");
tbd++;
printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
le64_to_cpu((u64) tbd->addr));
printk(KERN_ERR PFX "tbd->len = %d\n",
le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
printk(KERN_ERR PFX "tbd->flags = %s %s\n",
tbd->len & TX_DESC_C ? "C" : ".",
tbd->len & TX_DESC_E ? "E" : ".");
tbd++;
printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
le64_to_cpu((u64) tbd->addr));
printk(KERN_ERR PFX "tbd->len = %d\n",
le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
printk(KERN_ERR PFX "tbd->flags = %s %s\n",
tbd->len & TX_DESC_C ? "C" : ".",
tbd->len & TX_DESC_E ? "E" : ".");
}
void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
{
struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
(struct ob_mac_tso_iocb_req *)ob_mac_iocb;
struct tx_buf_desc *tbd;
u16 frame_len;
printk(KERN_ERR PFX "%s\n", __func__);
printk(KERN_ERR PFX "opcode = %s\n",
(ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
printk(KERN_ERR PFX "flags2 = %s %s %s\n",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
printk(KERN_ERR PFX "flags3 = %s %s %s \n",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
printk(KERN_ERR PFX "frame_len = %d\n",
le32_to_cpu(ob_mac_tso_iocb->frame_len));
printk(KERN_ERR PFX "mss = %d\n",
le16_to_cpu(ob_mac_tso_iocb->mss));
printk(KERN_ERR PFX "prot_hdr_len = %d\n",
le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
} else {
printk(KERN_ERR PFX "frame_len = %d\n",
le16_to_cpu(ob_mac_iocb->frame_len));
frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
}
tbd = &ob_mac_iocb->tbd[0];
ql_dump_tx_desc(tbd);
}
void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
{
printk(KERN_ERR PFX "%s\n", __func__);
printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
}
#endif
#ifdef QL_IB_DUMP
void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
{
printk(KERN_ERR PFX "%s\n", __func__);
printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
printk(KERN_ERR PFX "%s%s%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
printk(KERN_ERR PFX "flags3 = %s%s.\n",
ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
printk(KERN_ERR PFX "data_len = %d\n",
le32_to_cpu(ib_mac_rsp->data_len));
printk(KERN_ERR PFX "data_addr_hi = 0x%x\n",
le32_to_cpu(ib_mac_rsp->data_addr_hi));
printk(KERN_ERR PFX "data_addr_lo = 0x%x\n",
le32_to_cpu(ib_mac_rsp->data_addr_lo));
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
printk(KERN_ERR PFX "rss = %x\n",
le32_to_cpu(ib_mac_rsp->rss));
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
printk(KERN_ERR PFX "vlan_id = %x\n",
le16_to_cpu(ib_mac_rsp->vlan_id));
printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
le32_to_cpu(ib_mac_rsp->
flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "",
le32_to_cpu(ib_mac_rsp->
flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "",
le32_to_cpu(ib_mac_rsp->
flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
printk(KERN_ERR PFX "hdr length = %d.\n",
le32_to_cpu(ib_mac_rsp->hdr_len));
printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n",
le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n",
le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
}
}
#endif
#ifdef QL_ALL_DUMP
void ql_dump_all(struct ql_adapter *qdev)
{
int i;
QL_DUMP_REGS(qdev);
QL_DUMP_QDEV(qdev);
for (i = 0; i < qdev->tx_ring_count; i++) {
QL_DUMP_TX_RING(&qdev->tx_ring[i]);
QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
}
for (i = 0; i < qdev->rx_ring_count; i++) {
QL_DUMP_RX_RING(&qdev->rx_ring[i]);
QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
}
}
#endif

View File

@ -0,0 +1,415 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/version.h>
#include "qlge.h"
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
int i, status = 0;
struct rx_ring *rx_ring;
struct cqicb *cqicb;
if (!netif_running(qdev->ndev))
return status;
spin_lock(&qdev->hw_lock);
/* Skip the default queue, and update the outbound handler
* queues if they changed.
*/
cqicb = (struct cqicb *)&qdev->rx_ring[1];
if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs);
cqicb->pkt_delay =
le16_to_cpu(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Failed to load CQICB.\n");
goto exit;
}
}
}
/* Update the inbound (RSS) handler queues if they changed. */
cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
for (i = qdev->rss_ring_first_cq_id;
i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
i++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs);
cqicb->pkt_delay =
le16_to_cpu(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Failed to load CQICB.\n");
goto exit;
}
}
}
exit:
spin_unlock(&qdev->hw_lock);
return status;
}
void ql_update_stats(struct ql_adapter *qdev)
{
u32 i;
u64 data;
u64 *iter = &qdev->nic_stats.tx_pkts;
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
QPRINTK(qdev, DRV, ERR,
"Couldn't get xgmac sem.\n");
goto quit;
}
/*
* Get TX statistics.
*/
for (i = 0x200; i < 0x280; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
QPRINTK(qdev, DRV, ERR,
"Error reading status register 0x%.04x.\n", i);
goto end;
} else
*iter = data;
iter++;
}
/*
* Get RX statistics.
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
QPRINTK(qdev, DRV, ERR,
"Error reading status register 0x%.04x.\n", i);
goto end;
} else
*iter = data;
iter++;
}
end:
ql_sem_unlock(qdev, qdev->xg_sem_mask);
quit:
spin_unlock(&qdev->stats_lock);
QL_DUMP_STAT(qdev);
return;
}
static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
{"tx_pkts"},
{"tx_bytes"},
{"tx_mcast_pkts"},
{"tx_bcast_pkts"},
{"tx_ucast_pkts"},
{"tx_ctl_pkts"},
{"tx_pause_pkts"},
{"tx_64_pkts"},
{"tx_65_to_127_pkts"},
{"tx_128_to_255_pkts"},
{"tx_256_511_pkts"},
{"tx_512_to_1023_pkts"},
{"tx_1024_to_1518_pkts"},
{"tx_1519_to_max_pkts"},
{"tx_undersize_pkts"},
{"tx_oversize_pkts"},
{"rx_bytes"},
{"rx_bytes_ok"},
{"rx_pkts"},
{"rx_pkts_ok"},
{"rx_bcast_pkts"},
{"rx_mcast_pkts"},
{"rx_ucast_pkts"},
{"rx_undersize_pkts"},
{"rx_oversize_pkts"},
{"rx_jabber_pkts"},
{"rx_undersize_fcerr_pkts"},
{"rx_drop_events"},
{"rx_fcerr_pkts"},
{"rx_align_err"},
{"rx_symbol_err"},
{"rx_mac_err"},
{"rx_ctl_pkts"},
{"rx_pause_pkts"},
{"rx_64_pkts"},
{"rx_65_to_127_pkts"},
{"rx_128_255_pkts"},
{"rx_256_511_pkts"},
{"rx_512_to_1023_pkts"},
{"rx_1024_to_1518_pkts"},
{"rx_1519_to_max_pkts"},
{"rx_len_err_pkts"},
};
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
break;
}
}
static int ql_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ql_stats_str_arr);
default:
return -EOPNOTSUPP;
}
}
static void
ql_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct nic_stats *s = &qdev->nic_stats;
ql_update_stats(qdev);
*data++ = s->tx_pkts;
*data++ = s->tx_bytes;
*data++ = s->tx_mcast_pkts;
*data++ = s->tx_bcast_pkts;
*data++ = s->tx_ucast_pkts;
*data++ = s->tx_ctl_pkts;
*data++ = s->tx_pause_pkts;
*data++ = s->tx_64_pkt;
*data++ = s->tx_65_to_127_pkt;
*data++ = s->tx_128_to_255_pkt;
*data++ = s->tx_256_511_pkt;
*data++ = s->tx_512_to_1023_pkt;
*data++ = s->tx_1024_to_1518_pkt;
*data++ = s->tx_1519_to_max_pkt;
*data++ = s->tx_undersize_pkt;
*data++ = s->tx_oversize_pkt;
*data++ = s->rx_bytes;
*data++ = s->rx_bytes_ok;
*data++ = s->rx_pkts;
*data++ = s->rx_pkts_ok;
*data++ = s->rx_bcast_pkts;
*data++ = s->rx_mcast_pkts;
*data++ = s->rx_ucast_pkts;
*data++ = s->rx_undersize_pkts;
*data++ = s->rx_oversize_pkts;
*data++ = s->rx_jabber_pkts;
*data++ = s->rx_undersize_fcerr_pkts;
*data++ = s->rx_drop_events;
*data++ = s->rx_fcerr_pkts;
*data++ = s->rx_align_err;
*data++ = s->rx_symbol_err;
*data++ = s->rx_mac_err;
*data++ = s->rx_ctl_pkts;
*data++ = s->rx_pause_pkts;
*data++ = s->rx_64_pkts;
*data++ = s->rx_65_to_127_pkts;
*data++ = s->rx_128_255_pkts;
*data++ = s->rx_256_511_pkts;
*data++ = s->rx_512_to_1023_pkts;
*data++ = s->rx_1024_to_1518_pkts;
*data++ = s->rx_1519_to_max_pkts;
*data++ = s->rx_len_err_pkts;
}
static int ql_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct ql_adapter *qdev = netdev_priv(ndev);
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
} else {
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
}
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
return 0;
}
static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
strncpy(drvinfo->driver, qlge_driver_name, 32);
strncpy(drvinfo->version, qlge_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
struct ql_adapter *qdev = netdev_priv(dev);
c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
/* This chip coalesces as follows:
* If a packet arrives, hold off interrupts until
* cqicb->int_delay expires, but if no other packets arrive don't
* wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
* timer to coalesce on a frame basis. So, we have to take ethtool's
* max_coalesced_frames value and convert it to a delay in microseconds.
* We do this by using a basic thoughput of 1,000,000 frames per
* second @ (1024 bytes). This means one frame per usec. So it's a
* simple one to one ratio.
*/
c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
return 0;
}
static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
{
struct ql_adapter *qdev = netdev_priv(ndev);
/* Validate user parameters. */
if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
return -EINVAL;
/* Don't wait more than 10 usec. */
if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
return -EINVAL;
if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
return -EINVAL;
if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
return -EINVAL;
/* Verify a change took place before updating the hardware. */
if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
return 0;
qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
return ql_update_ring_coalescing(qdev);
}
static u32 ql_get_rx_csum(struct net_device *netdev)
{
struct ql_adapter *qdev = netdev_priv(netdev);
return qdev->rx_csum;
}
static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
{
struct ql_adapter *qdev = netdev_priv(netdev);
qdev->rx_csum = data;
return 0;
}
static int ql_set_tso(struct net_device *ndev, uint32_t data)
{
if (data) {
ndev->features |= NETIF_F_TSO;
ndev->features |= NETIF_F_TSO6;
} else {
ndev->features &= ~NETIF_F_TSO;
ndev->features &= ~NETIF_F_TSO6;
}
return 0;
}
static u32 ql_get_msglevel(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
return qdev->msg_enable;
}
static void ql_set_msglevel(struct net_device *ndev, u32 value)
{
struct ql_adapter *qdev = netdev_priv(ndev);
qdev->msg_enable = value;
}
const struct ethtool_ops qlge_ethtool_ops = {
.get_settings = ql_get_settings,
.get_drvinfo = ql_get_drvinfo,
.get_msglevel = ql_get_msglevel,
.set_msglevel = ql_set_msglevel,
.get_link = ethtool_op_get_link,
.get_rx_csum = ql_get_rx_csum,
.set_rx_csum = ql_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ql_set_tso,
.get_coalesce = ql_get_coalesce,
.set_coalesce = ql_set_coalesce,
.get_sset_count = ql_get_sset_count,
.get_strings = ql_get_strings,
.get_ethtool_stats = ql_get_ethtool_stats,
};

3954
drivers/net/qlge/qlge_main.c Normal file

File diff suppressed because it is too large Load Diff

150
drivers/net/qlge/qlge_mpi.c Normal file
View File

@ -0,0 +1,150 @@
#include "qlge.h"
static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* get the data */
*data = ql_read32(qdev, PROC_DATA);
exit:
return status;
}
int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return -EBUSY;
for (i = 0; i < mbcp->out_count; i++) {
status =
ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
break;
}
}
ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
}
static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
mbcp->out_count = 2;
if (ql_get_mb_sts(qdev, mbcp))
goto exit;
qdev->link_status = mbcp->mbox_out[1];
QPRINTK(qdev, DRV, ERR, "Link Up.\n");
QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
if (!netif_carrier_ok(qdev->ndev)) {
QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
netif_carrier_on(qdev->ndev);
netif_wake_queue(qdev->ndev);
}
exit:
/* Clear the MPI firmware status. */
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
}
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
mbcp->out_count = 3;
if (ql_get_mb_sts(qdev, mbcp)) {
QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
goto exit;
}
if (netif_carrier_ok(qdev->ndev)) {
QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
netif_carrier_off(qdev->ndev);
netif_stop_queue(qdev->ndev);
}
QPRINTK(qdev, DRV, ERR, "Link Down.\n");
QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
exit:
/* Clear the MPI firmware status. */
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
}
static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
mbcp->out_count = 2;
if (ql_get_mb_sts(qdev, mbcp)) {
QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
goto exit;
}
QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
mbcp->mbox_out[0]);
QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
mbcp->mbox_out[1]);
exit:
/* Clear the MPI firmware status. */
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
}
void ql_mpi_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_work.work);
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
mbcp->out_count = 1;
while (ql_read32(qdev, STS) & STS_PI) {
if (ql_get_mb_sts(qdev, mbcp)) {
QPRINTK(qdev, DRV, ERR,
"Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
}
switch (mbcp->mbox_out[0]) {
case AEN_LINK_UP:
ql_link_up(qdev, mbcp);
break;
case AEN_LINK_DOWN:
ql_link_down(qdev, mbcp);
break;
case AEN_FW_INIT_DONE:
ql_init_fw_done(qdev, mbcp);
break;
case MB_CMD_STS_GOOD:
break;
case AEN_FW_INIT_FAIL:
case AEN_SYS_ERR:
case MB_CMD_STS_ERR:
ql_queue_fw_error(qdev);
default:
/* Clear the MPI firmware status. */
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
break;
}
}
ql_enable_completion_interrupt(qdev, 0);
}
void ql_mpi_reset_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_reset_work.work);
QPRINTK(qdev, DRV, ERR,
"Enter, qdev = %p..\n", qdev);
ql_write32(qdev, CSR, CSR_CMD_SET_RST);
msleep(50);
ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
}

View File

@ -1411,6 +1411,8 @@
#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
#define PCI_VENDOR_ID_CISCO 0x1137
#define PCI_VENDOR_ID_ZIATECH 0x1138
#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
@ -2213,6 +2215,7 @@
#define PCI_VENDOR_ID_ATTANSIC 0x1969
#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048
#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048
#define PCI_VENDOR_ID_JMICRON 0x197B
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360