mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
This commit is contained in:
commit
1561747ddf
@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *);
|
||||
static s32 igb_read_mac_addr_82575(struct e1000_hw *);
|
||||
static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
|
||||
static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
|
||||
|
||||
static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
|
||||
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
|
||||
static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
|
||||
u16 offset);
|
||||
static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
|
||||
u16 offset);
|
||||
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
|
||||
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
|
||||
static const u16 e1000_82580_rxpbs_table[] =
|
||||
{ 36, 72, 144, 1, 2, 4, 8, 16,
|
||||
35, 70, 140 };
|
||||
@ -195,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
mac->arc_subsystem_valid =
|
||||
(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
|
||||
? true : false;
|
||||
|
||||
/* enable EEE on i350 parts */
|
||||
if (mac->type == e1000_i350)
|
||||
dev_spec->eee_disable = false;
|
||||
else
|
||||
dev_spec->eee_disable = true;
|
||||
/* physical interface link setup */
|
||||
mac->ops.setup_physical_interface =
|
||||
(hw->phy.media_type == e1000_media_type_copper)
|
||||
@ -233,10 +244,32 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
*/
|
||||
size += NVM_WORD_SIZE_BASE_SHIFT;
|
||||
|
||||
/* EEPROM access above 16k is unsupported */
|
||||
if (size > 14)
|
||||
size = 14;
|
||||
nvm->word_size = 1 << size;
|
||||
if (nvm->word_size == (1 << 15))
|
||||
nvm->page_size = 128;
|
||||
|
||||
/* NVM Function Pointers */
|
||||
nvm->ops.acquire = igb_acquire_nvm_82575;
|
||||
if (nvm->word_size < (1 << 15))
|
||||
nvm->ops.read = igb_read_nvm_eerd;
|
||||
else
|
||||
nvm->ops.read = igb_read_nvm_spi;
|
||||
|
||||
nvm->ops.release = igb_release_nvm_82575;
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82580:
|
||||
nvm->ops.validate = igb_validate_nvm_checksum_82580;
|
||||
nvm->ops.update = igb_update_nvm_checksum_82580;
|
||||
break;
|
||||
case e1000_i350:
|
||||
nvm->ops.validate = igb_validate_nvm_checksum_i350;
|
||||
nvm->ops.update = igb_update_nvm_checksum_i350;
|
||||
break;
|
||||
default:
|
||||
nvm->ops.validate = igb_validate_nvm_checksum;
|
||||
nvm->ops.update = igb_update_nvm_checksum;
|
||||
}
|
||||
nvm->ops.write = igb_write_nvm_spi;
|
||||
|
||||
/* if part supports SR-IOV then initialize mailbox parameters */
|
||||
switch (mac->type) {
|
||||
@ -1754,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_validate_nvm_checksum_with_offset - Validate EEPROM
|
||||
* checksum
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset in words of the checksum protected region
|
||||
*
|
||||
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
|
||||
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
|
||||
**/
|
||||
s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 checksum = 0;
|
||||
u16 i, nvm_data;
|
||||
|
||||
for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
|
||||
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
hw_dbg("NVM Read Error\n");
|
||||
goto out;
|
||||
}
|
||||
checksum += nvm_data;
|
||||
}
|
||||
|
||||
if (checksum != (u16) NVM_SUM) {
|
||||
hw_dbg("NVM Checksum Invalid\n");
|
||||
ret_val = -E1000_ERR_NVM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_update_nvm_checksum_with_offset - Update EEPROM
|
||||
* checksum
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset in words of the checksum protected region
|
||||
*
|
||||
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
|
||||
* up to the checksum. Then calculates the EEPROM checksum and writes the
|
||||
* value to the EEPROM.
|
||||
**/
|
||||
s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 checksum = 0;
|
||||
u16 i, nvm_data;
|
||||
|
||||
for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
|
||||
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
hw_dbg("NVM Read Error while updating checksum.\n");
|
||||
goto out;
|
||||
}
|
||||
checksum += nvm_data;
|
||||
}
|
||||
checksum = (u16) NVM_SUM - checksum;
|
||||
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
|
||||
&checksum);
|
||||
if (ret_val)
|
||||
hw_dbg("NVM Write Error while updating checksum.\n");
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Calculates the EEPROM section checksum by reading/adding each word of
|
||||
* the EEPROM and then verifies that the sum of the EEPROM is
|
||||
* equal to 0xBABA.
|
||||
**/
|
||||
static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 eeprom_regions_count = 1;
|
||||
u16 j, nvm_data;
|
||||
u16 nvm_offset;
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
hw_dbg("NVM Read Error\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
|
||||
/* if chekcsums compatibility bit is set validate checksums
|
||||
* for all 4 ports. */
|
||||
eeprom_regions_count = 4;
|
||||
}
|
||||
|
||||
for (j = 0; j < eeprom_regions_count; j++) {
|
||||
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
|
||||
ret_val = igb_validate_nvm_checksum_with_offset(hw,
|
||||
nvm_offset);
|
||||
if (ret_val != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_update_nvm_checksum_82580 - Update EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Updates the EEPROM section checksums for all 4 ports by reading/adding
|
||||
* each word of the EEPROM up to the checksum. Then calculates the EEPROM
|
||||
* checksum and writes the value to the EEPROM.
|
||||
**/
|
||||
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 j, nvm_data;
|
||||
u16 nvm_offset;
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
hw_dbg("NVM Read Error while updating checksum"
|
||||
" compatibility bit.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
|
||||
/* set compatibility bit to validate checksums appropriately */
|
||||
nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
|
||||
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
|
||||
&nvm_data);
|
||||
if (ret_val) {
|
||||
hw_dbg("NVM Write Error while updating checksum"
|
||||
" compatibility bit.\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < 4; j++) {
|
||||
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
|
||||
ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Calculates the EEPROM section checksum by reading/adding each word of
|
||||
* the EEPROM and then verifies that the sum of the EEPROM is
|
||||
* equal to 0xBABA.
|
||||
**/
|
||||
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 j;
|
||||
u16 nvm_offset;
|
||||
|
||||
for (j = 0; j < 4; j++) {
|
||||
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
|
||||
ret_val = igb_validate_nvm_checksum_with_offset(hw,
|
||||
nvm_offset);
|
||||
if (ret_val != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_update_nvm_checksum_i350 - Update EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Updates the EEPROM section checksums for all 4 ports by reading/adding
|
||||
* each word of the EEPROM up to the checksum. Then calculates the EEPROM
|
||||
* checksum and writes the value to the EEPROM.
|
||||
**/
|
||||
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 j;
|
||||
u16 nvm_offset;
|
||||
|
||||
for (j = 0; j < 4; j++) {
|
||||
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
|
||||
ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
|
||||
if (ret_val != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
/**
|
||||
* igb_set_eee_i350 - Enable/disable EEE support
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Enable/disable EEE based on setting in dev_spec structure.
|
||||
*
|
||||
**/
|
||||
s32 igb_set_eee_i350(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u32 ipcnfg, eeer, ctrl_ext;
|
||||
|
||||
ctrl_ext = rd32(E1000_CTRL_EXT);
|
||||
if ((hw->mac.type != e1000_i350) ||
|
||||
(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
|
||||
goto out;
|
||||
ipcnfg = rd32(E1000_IPCNFG);
|
||||
eeer = rd32(E1000_EEER);
|
||||
|
||||
/* enable or disable per user setting */
|
||||
if (!(hw->dev_spec._82575.eee_disable)) {
|
||||
ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
|
||||
E1000_IPCNFG_EEE_100M_AN);
|
||||
eeer |= (E1000_EEER_TX_LPI_EN |
|
||||
E1000_EEER_RX_LPI_EN |
|
||||
E1000_EEER_LPI_FC);
|
||||
|
||||
} else {
|
||||
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
|
||||
E1000_IPCNFG_EEE_100M_AN);
|
||||
eeer &= ~(E1000_EEER_TX_LPI_EN |
|
||||
E1000_EEER_RX_LPI_EN |
|
||||
E1000_EEER_LPI_FC);
|
||||
}
|
||||
wr32(E1000_IPCNFG, ipcnfg);
|
||||
wr32(E1000_EEER, eeer);
|
||||
out:
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static struct e1000_mac_operations e1000_mac_ops_82575 = {
|
||||
.init_hw = igb_init_hw_82575,
|
||||
.check_for_link = igb_check_for_link_82575,
|
||||
|
@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
|
||||
void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
|
||||
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
|
||||
u16 igb_rxpbs_adjust_82580(u32 data);
|
||||
s32 igb_set_eee_i350(struct e1000_hw *);
|
||||
|
||||
#endif
|
||||
|
@ -287,7 +287,34 @@
|
||||
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
|
||||
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
|
||||
|
||||
/* Transmit Arbitration Count */
|
||||
/* DMA Coalescing register fields */
|
||||
#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
|
||||
* Watchdog Timer */
|
||||
#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
|
||||
* Threshold */
|
||||
#define E1000_DMACR_DMACTHR_SHIFT 16
|
||||
#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
|
||||
* transactions */
|
||||
#define E1000_DMACR_DMAC_LX_SHIFT 28
|
||||
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
|
||||
|
||||
#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
|
||||
* Threshold */
|
||||
|
||||
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
|
||||
|
||||
#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
|
||||
* Threshold */
|
||||
#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
|
||||
* current window */
|
||||
|
||||
#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
|
||||
* Current Cnt */
|
||||
|
||||
#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
|
||||
* High val */
|
||||
#define E1000_FCRTC_RTH_COAL_SHIFT 4
|
||||
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
|
||||
|
||||
/* SerDes Control */
|
||||
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
|
||||
@ -566,6 +593,8 @@
|
||||
#define NVM_INIT_CONTROL3_PORT_A 0x0024
|
||||
#define NVM_ALT_MAC_ADDR_PTR 0x0037
|
||||
#define NVM_CHECKSUM_REG 0x003F
|
||||
#define NVM_COMPATIBILITY_REG_3 0x0003
|
||||
#define NVM_COMPATIBILITY_BIT_MASK 0x8000
|
||||
|
||||
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
|
||||
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
|
||||
@ -600,6 +629,7 @@
|
||||
/* NVM Commands - SPI */
|
||||
#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
|
||||
#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
|
||||
#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
|
||||
#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
|
||||
#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
|
||||
#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
|
||||
@ -758,6 +788,13 @@
|
||||
#define E1000_MDIC_ERROR 0x40000000
|
||||
#define E1000_MDIC_DEST 0x80000000
|
||||
|
||||
/* Energy Efficient Ethernet */
|
||||
#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
|
||||
#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
|
||||
#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
|
||||
#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
|
||||
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
|
||||
|
||||
/* SerDes Control */
|
||||
#define E1000_GEN_CTL_READY 0x80000000
|
||||
#define E1000_GEN_CTL_ADDRESS_SHIFT 8
|
||||
|
@ -336,6 +336,8 @@ struct e1000_nvm_operations {
|
||||
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
|
||||
void (*release)(struct e1000_hw *);
|
||||
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
|
||||
s32 (*update)(struct e1000_hw *);
|
||||
s32 (*validate)(struct e1000_hw *);
|
||||
};
|
||||
|
||||
struct e1000_info {
|
||||
@ -422,7 +424,6 @@ struct e1000_phy_info {
|
||||
|
||||
struct e1000_nvm_info {
|
||||
struct e1000_nvm_operations ops;
|
||||
|
||||
enum e1000_nvm_type type;
|
||||
enum e1000_nvm_override override;
|
||||
|
||||
@ -488,6 +489,7 @@ struct e1000_mbx_info {
|
||||
struct e1000_dev_spec_82575 {
|
||||
bool sgmii_active;
|
||||
bool global_device_reset;
|
||||
bool eee_disable;
|
||||
};
|
||||
|
||||
struct e1000_hw {
|
||||
|
@ -317,6 +317,68 @@ out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_read_nvm_spi - Read EEPROM's using SPI
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset of word in the EEPROM to read
|
||||
* @words: number of words to read
|
||||
* @data: word read from the EEPROM
|
||||
*
|
||||
* Reads a 16 bit word from the EEPROM.
|
||||
**/
|
||||
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 i = 0;
|
||||
s32 ret_val;
|
||||
u16 word_in;
|
||||
u8 read_opcode = NVM_READ_OPCODE_SPI;
|
||||
|
||||
/*
|
||||
* A check for invalid values: offset too large, too many words,
|
||||
* and not enough words.
|
||||
*/
|
||||
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
||||
(words == 0)) {
|
||||
hw_dbg("nvm parameter(s) out of bounds\n");
|
||||
ret_val = -E1000_ERR_NVM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = nvm->ops.acquire(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = igb_ready_nvm_eeprom(hw);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
igb_standby_nvm(hw);
|
||||
|
||||
if ((nvm->address_bits == 8) && (offset >= 128))
|
||||
read_opcode |= NVM_A8_OPCODE_SPI;
|
||||
|
||||
/* Send the READ command (opcode + addr) */
|
||||
igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
|
||||
igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
|
||||
|
||||
/*
|
||||
* Read the data. SPI NVMs increment the address with each byte
|
||||
* read and will roll over if reading beyond the end. This allows
|
||||
* us to read the whole NVM from any offset
|
||||
*/
|
||||
for (i = 0; i < words; i++) {
|
||||
word_in = igb_shift_in_eec_bits(hw, 16);
|
||||
data[i] = (word_in >> 8) | (word_in << 8);
|
||||
}
|
||||
|
||||
release:
|
||||
nvm->ops.release(hw);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_read_nvm_eerd - Reads EEPROM using EERD register
|
||||
* @hw: pointer to the HW structure
|
||||
@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
break;
|
||||
|
||||
data[i] = (rd32(E1000_EERD) >>
|
||||
E1000_NVM_RW_REG_DATA);
|
||||
E1000_NVM_RW_REG_DATA);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -35,6 +35,7 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
|
||||
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
|
||||
u32 part_num_size);
|
||||
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
|
||||
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
|
||||
|
@ -106,6 +106,15 @@
|
||||
|
||||
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
|
||||
|
||||
/* DMA Coalescing registers */
|
||||
#define E1000_DMACR 0x02508 /* Control Register */
|
||||
#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
|
||||
#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
|
||||
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
|
||||
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
|
||||
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
|
||||
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
|
||||
|
||||
/* TX Rate Limit Registers */
|
||||
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
|
||||
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
|
||||
@ -329,6 +338,10 @@
|
||||
/* DMA Coalescing registers */
|
||||
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
|
||||
|
||||
/* Energy Efficient Ethernet "EEE" register */
|
||||
#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
|
||||
#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
|
||||
|
||||
/* OS2BMC Registers */
|
||||
#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
|
||||
#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
|
||||
|
@ -333,6 +333,12 @@ struct igb_adapter {
|
||||
#define IGB_FLAG_DCA_ENABLED (1 << 1)
|
||||
#define IGB_FLAG_QUAD_PORT_A (1 << 2)
|
||||
#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
|
||||
#define IGB_FLAG_DMAC (1 << 4)
|
||||
|
||||
/* DMA Coalescing defines */
|
||||
#define IGB_MIN_TXPBSIZE 20408
|
||||
#define IGB_TX_BUF_4096 4096
|
||||
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
|
||||
|
||||
#define IGB_82576_TSYNC_SHIFT 19
|
||||
#define IGB_82580_TSYNC_SHIFT 24
|
||||
|
@ -721,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev,
|
||||
/* Update the checksum over the first part of the EEPROM if needed
|
||||
* and flush shadow RAM for 82573 controllers */
|
||||
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
|
||||
igb_update_nvm_checksum(hw);
|
||||
hw->nvm.ops.update(hw);
|
||||
|
||||
kfree(eeprom_buff);
|
||||
return ret_val;
|
||||
@ -2009,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev,
|
||||
if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
|
||||
return -EINVAL;
|
||||
|
||||
/* If ITR is disabled, disable DMAC */
|
||||
if (ec->rx_coalesce_usecs == 0) {
|
||||
if (adapter->flags & IGB_FLAG_DMAC)
|
||||
adapter->flags &= ~IGB_FLAG_DMAC;
|
||||
}
|
||||
|
||||
/* convert to rate of irq's per second */
|
||||
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
|
||||
adapter->rx_itr_setting = ec->rx_coalesce_usecs;
|
||||
|
@ -50,7 +50,12 @@
|
||||
#endif
|
||||
#include "igb.h"
|
||||
|
||||
#define DRV_VERSION "2.4.13-k2"
|
||||
#define MAJ 3
|
||||
#define MIN 0
|
||||
#define BUILD 6
|
||||
#define KFIX 2
|
||||
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
|
||||
__stringify(BUILD) "-k" __stringify(KFIX)
|
||||
char igb_driver_name[] = "igb";
|
||||
char igb_driver_version[] = DRV_VERSION;
|
||||
static const char igb_driver_string[] =
|
||||
@ -1674,7 +1679,58 @@ void igb_reset(struct igb_adapter *adapter)
|
||||
|
||||
if (hw->mac.ops.init_hw(hw))
|
||||
dev_err(&pdev->dev, "Hardware Error\n");
|
||||
if (hw->mac.type > e1000_82580) {
|
||||
if (adapter->flags & IGB_FLAG_DMAC) {
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* DMA Coalescing high water mark needs to be higher
|
||||
* than * the * Rx threshold. The Rx threshold is
|
||||
* currently * pba - 6, so we * should use a high water
|
||||
* mark of pba * - 4. */
|
||||
hwm = (pba - 4) << 10;
|
||||
|
||||
reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
|
||||
& E1000_DMACR_DMACTHR_MASK);
|
||||
|
||||
/* transition to L0x or L1 if available..*/
|
||||
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
|
||||
|
||||
/* watchdog timer= +-1000 usec in 32usec intervals */
|
||||
reg |= (1000 >> 5);
|
||||
wr32(E1000_DMACR, reg);
|
||||
|
||||
/* no lower threshold to disable coalescing(smart fifb)
|
||||
* -UTRESH=0*/
|
||||
wr32(E1000_DMCRTRH, 0);
|
||||
|
||||
/* set hwm to PBA - 2 * max frame size */
|
||||
wr32(E1000_FCRTC, hwm);
|
||||
|
||||
/*
|
||||
* This sets the time to wait before requesting tran-
|
||||
* sition to * low power state to number of usecs needed
|
||||
* to receive 1 512 * byte frame at gigabit line rate
|
||||
*/
|
||||
reg = rd32(E1000_DMCTLX);
|
||||
reg |= IGB_DMCTLX_DCFLUSH_DIS;
|
||||
|
||||
/* Delay 255 usec before entering Lx state. */
|
||||
reg |= 0xFF;
|
||||
wr32(E1000_DMCTLX, reg);
|
||||
|
||||
/* free space in Tx packet buffer to wake from DMAC */
|
||||
wr32(E1000_DMCTXTH,
|
||||
(IGB_MIN_TXPBSIZE -
|
||||
(IGB_TX_BUF_4096 + adapter->max_frame_size))
|
||||
>> 6);
|
||||
|
||||
/* make low power state decision controlled by DMAC */
|
||||
reg = rd32(E1000_PCIEMISC);
|
||||
reg |= E1000_PCIEMISC_LX_DECISION;
|
||||
wr32(E1000_PCIEMISC, reg);
|
||||
} /* end if IGB_FLAG_DMAC set */
|
||||
}
|
||||
if (hw->mac.type == e1000_82580) {
|
||||
u32 reg = rd32(E1000_PCIEMISC);
|
||||
wr32(E1000_PCIEMISC,
|
||||
@ -1884,7 +1940,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
|
||||
hw->mac.ops.reset_hw(hw);
|
||||
|
||||
/* make sure the NVM is good */
|
||||
if (igb_validate_nvm_checksum(hw) < 0) {
|
||||
if (hw->nvm.ops.validate(hw) < 0) {
|
||||
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
|
||||
err = -EIO;
|
||||
goto err_eeprom;
|
||||
@ -2014,7 +2070,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
|
||||
adapter->msix_entries ? "MSI-X" :
|
||||
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
|
||||
adapter->num_rx_queues, adapter->num_tx_queues);
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_i350:
|
||||
igb_set_eee_i350(hw);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_register:
|
||||
@ -2151,6 +2213,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
|
||||
random_ether_addr(mac_addr);
|
||||
igb_set_vf_mac(adapter, i, mac_addr);
|
||||
}
|
||||
/* DMA Coalescing is not supported in IOV mode. */
|
||||
if (adapter->flags & IGB_FLAG_DMAC)
|
||||
adapter->flags &= ~IGB_FLAG_DMAC;
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
}
|
||||
@ -2325,6 +2390,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
|
||||
/* Explicitly disable IRQ since the NIC can be in any state. */
|
||||
igb_irq_disable(adapter);
|
||||
|
||||
if (hw->mac.type == e1000_i350)
|
||||
adapter->flags &= ~IGB_FLAG_DMAC;
|
||||
|
||||
set_bit(__IGB_DOWN, &adapter->state);
|
||||
return 0;
|
||||
}
|
||||
|
@ -118,6 +118,7 @@ struct vf_data_storage {
|
||||
bool pf_set_mac;
|
||||
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
|
||||
u16 pf_qos;
|
||||
u16 tx_rate;
|
||||
};
|
||||
|
||||
/* wrapper around a pointer to a socket buffer,
|
||||
@ -209,6 +210,7 @@ struct ixgbe_ring {
|
||||
* associated with this ring, which is
|
||||
* different for DCB and RSS modes
|
||||
*/
|
||||
u8 dcb_tc;
|
||||
|
||||
u16 work_limit; /* max work per interrupt */
|
||||
|
||||
@ -243,7 +245,7 @@ enum ixgbe_ring_f_enum {
|
||||
RING_F_ARRAY_SIZE /* must be last in enum set */
|
||||
};
|
||||
|
||||
#define IXGBE_MAX_DCB_INDICES 8
|
||||
#define IXGBE_MAX_DCB_INDICES 64
|
||||
#define IXGBE_MAX_RSS_INDICES 16
|
||||
#define IXGBE_MAX_VMDQ_INDICES 64
|
||||
#define IXGBE_MAX_FDIR_INDICES 64
|
||||
@ -341,6 +343,7 @@ struct ixgbe_adapter {
|
||||
struct ixgbe_dcb_config dcb_cfg;
|
||||
struct ixgbe_dcb_config temp_dcb_cfg;
|
||||
u8 dcb_set_bitmap;
|
||||
u8 dcbx_cap;
|
||||
enum ixgbe_fc_mode last_lfc_mode;
|
||||
|
||||
/* Interrupt Throttle Rate */
|
||||
@ -466,6 +469,7 @@ struct ixgbe_adapter {
|
||||
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
|
||||
unsigned int num_vfs;
|
||||
struct vf_data_storage *vfinfo;
|
||||
int vf_rate_link_speed;
|
||||
};
|
||||
|
||||
enum ixbge_state_t {
|
||||
@ -541,6 +545,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
|
||||
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *ring);
|
||||
extern void ixgbe_set_rx_mode(struct net_device *netdev);
|
||||
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
|
||||
#ifdef IXGBE_FCOE
|
||||
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
|
||||
|
@ -158,6 +158,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
|
||||
|
||||
switch (hw->phy.type) {
|
||||
case ixgbe_phy_tn:
|
||||
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
|
||||
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
|
||||
phy->ops.get_firmware_version =
|
||||
&ixgbe_get_phy_firmware_version_tnx;
|
||||
|
@ -64,7 +64,7 @@ s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
|
||||
val = min_credit;
|
||||
refill[i] = val;
|
||||
|
||||
max[i] = (bw[i] * MAX_CREDIT)/100;
|
||||
max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -246,6 +246,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
|
||||
u8 bwgid[MAX_TRAFFIC_CLASS];
|
||||
u16 refill[MAX_TRAFFIC_CLASS];
|
||||
u16 max[MAX_TRAFFIC_CLASS];
|
||||
/* CEE does not define a priority to tc mapping so map 1:1 */
|
||||
u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||
|
||||
/* Unpack CEE standard containers */
|
||||
ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
|
||||
@ -264,7 +266,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
|
||||
case ixgbe_mac_X540:
|
||||
ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
|
||||
pfc_en, refill, max, bwgid,
|
||||
ptype);
|
||||
ptype, prio_tc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -292,30 +294,9 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
|
||||
}
|
||||
|
||||
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
|
||||
u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
|
||||
u16 *refill, u16 *max, u8 *bwg_id,
|
||||
u8 *prio_type, u8 *prio_tc)
|
||||
{
|
||||
int i;
|
||||
u8 prio_type[IEEE_8021QAZ_MAX_TCS];
|
||||
|
||||
/* Map TSA onto CEE prio type */
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
switch (tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
prio_type[i] = 2;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
prio_type[i] = 0;
|
||||
break;
|
||||
default:
|
||||
/* Hardware only supports priority strict or
|
||||
* ETS transmission selection algorithms if
|
||||
* we receive some other value from dcbnl
|
||||
* throw an error
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
|
||||
@ -328,11 +309,11 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
|
||||
bwg_id, prio_type);
|
||||
bwg_id, prio_type, prio_tc);
|
||||
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
|
||||
bwg_id, prio_type);
|
||||
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
|
||||
bwg_id, prio_type);
|
||||
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
|
||||
prio_type, prio_tc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -159,8 +159,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
|
||||
struct ixgbe_dcb_config *, int, u8);
|
||||
|
||||
/* DCB hw initialization */
|
||||
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
|
||||
u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
|
||||
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
|
||||
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
|
||||
s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
|
||||
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
|
||||
|
||||
|
@ -233,21 +233,27 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
|
||||
u32 reg, rx_pba_size;
|
||||
u8 i;
|
||||
|
||||
if (!pfc_en)
|
||||
goto out;
|
||||
if (pfc_en) {
|
||||
/* Enable Transmit Priority Flow Control */
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
|
||||
reg &= ~IXGBE_RMCS_TFCE_802_3X;
|
||||
/* correct the reporting of our flow control status */
|
||||
reg |= IXGBE_RMCS_TFCE_PRIORITY;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
|
||||
|
||||
/* Enable Transmit Priority Flow Control */
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
|
||||
reg &= ~IXGBE_RMCS_TFCE_802_3X;
|
||||
/* correct the reporting of our flow control status */
|
||||
reg |= IXGBE_RMCS_TFCE_PRIORITY;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
|
||||
/* Enable Receive Priority Flow Control */
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
||||
reg &= ~IXGBE_FCTRL_RFCE;
|
||||
reg |= IXGBE_FCTRL_RPFCE;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
|
||||
|
||||
/* Enable Receive Priority Flow Control */
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
||||
reg &= ~IXGBE_FCTRL_RFCE;
|
||||
reg |= IXGBE_FCTRL_RPFCE;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
|
||||
/* Configure pause time */
|
||||
for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
|
||||
|
||||
/* Configure flow control refresh threshold value */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
|
||||
}
|
||||
|
||||
/*
|
||||
* Configure flow control thresholds and enable priority flow control
|
||||
@ -273,14 +279,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
|
||||
}
|
||||
|
||||
/* Configure pause time */
|
||||
for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
|
||||
|
||||
/* Configure flow control refresh threshold value */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
|
||||
u16 *refill,
|
||||
u16 *max,
|
||||
u8 *bwg_id,
|
||||
u8 *prio_type)
|
||||
u8 *prio_type,
|
||||
u8 *prio_tc)
|
||||
{
|
||||
u32 reg = 0;
|
||||
u32 credit_refill = 0;
|
||||
@ -102,7 +103,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
|
||||
/* Map all traffic classes to their UP, 1 to 1 */
|
||||
reg = 0;
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
|
||||
reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
|
||||
reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
|
||||
|
||||
/* Configure traffic class credits and priority */
|
||||
@ -194,7 +195,8 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
|
||||
u16 *refill,
|
||||
u16 *max,
|
||||
u8 *bwg_id,
|
||||
u8 *prio_type)
|
||||
u8 *prio_type,
|
||||
u8 *prio_tc)
|
||||
{
|
||||
u32 reg;
|
||||
u8 i;
|
||||
@ -211,7 +213,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
|
||||
/* Map all traffic classes to their UP, 1 to 1 */
|
||||
reg = 0;
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
|
||||
reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
|
||||
reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
|
||||
|
||||
/* Configure traffic class credits and priority */
|
||||
@ -251,13 +253,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
|
||||
{
|
||||
u32 i, reg, rx_pba_size;
|
||||
|
||||
/* If PFC is disabled globally then fall back to LFC. */
|
||||
if (!pfc_en) {
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
|
||||
hw->mac.ops.fc_enable(hw, i);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Configure PFC Tx thresholds per TC */
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
|
||||
int enabled = pfc_en & (1 << i);
|
||||
@ -276,28 +271,33 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
|
||||
}
|
||||
|
||||
/* Configure pause time (2 TCs per register) */
|
||||
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
|
||||
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
|
||||
if (pfc_en) {
|
||||
/* Configure pause time (2 TCs per register) */
|
||||
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
|
||||
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
|
||||
|
||||
/* Configure flow control refresh threshold value */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
|
||||
/* Configure flow control refresh threshold value */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
|
||||
|
||||
/* Enable Transmit PFC */
|
||||
reg = IXGBE_FCCFG_TFCE_PRIORITY;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
|
||||
|
||||
/*
|
||||
* Enable Receive PFC
|
||||
* We will always honor XOFF frames we receive when
|
||||
* we are in PFC mode.
|
||||
*/
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
|
||||
reg &= ~IXGBE_MFLCN_RFCE;
|
||||
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
|
||||
out:
|
||||
reg = IXGBE_FCCFG_TFCE_PRIORITY;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
|
||||
/*
|
||||
* Enable Receive PFC
|
||||
* We will always honor XOFF frames we receive when
|
||||
* we are in PFC mode.
|
||||
*/
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
|
||||
reg &= ~IXGBE_MFLCN_RFCE;
|
||||
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
|
||||
|
||||
} else {
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
|
||||
hw->mac.ops.fc_enable(hw, i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -424,15 +424,16 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
|
||||
*/
|
||||
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
|
||||
u8 rx_pba, u8 pfc_en, u16 *refill,
|
||||
u16 *max, u8 *bwg_id, u8 *prio_type)
|
||||
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
|
||||
{
|
||||
ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
|
||||
ixgbe_dcb_config_82599(hw);
|
||||
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
|
||||
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
|
||||
prio_type, prio_tc);
|
||||
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
|
||||
bwg_id, prio_type);
|
||||
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
|
||||
bwg_id, prio_type);
|
||||
bwg_id, prio_type, prio_tc);
|
||||
ixgbe_dcb_config_pfc_82599(hw, pfc_en);
|
||||
ixgbe_dcb_config_tc_stats_82599(hw);
|
||||
|
||||
|
@ -109,7 +109,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
|
||||
u16 *refill,
|
||||
u16 *max,
|
||||
u8 *bwg_id,
|
||||
u8 *prio_type);
|
||||
u8 *prio_type,
|
||||
u8 *prio_tc);
|
||||
|
||||
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
|
||||
u16 *refill,
|
||||
@ -121,10 +122,12 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
|
||||
u16 *refill,
|
||||
u16 *max,
|
||||
u8 *bwg_id,
|
||||
u8 *prio_type);
|
||||
u8 *prio_type,
|
||||
u8 *prio_tc);
|
||||
|
||||
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
|
||||
u8 rx_pba, u8 pfc_en, u16 *refill,
|
||||
u16 *max, u8 *bwg_id, u8 *prio_type);
|
||||
u16 *max, u8 *bwg_id, u8 *prio_type,
|
||||
u8 *prio_tc);
|
||||
|
||||
#endif /* _DCB_82599_CONFIG_H */
|
||||
|
@ -129,7 +129,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
||||
netdev->netdev_ops->ndo_stop(netdev);
|
||||
ixgbe_clear_interrupt_scheme(adapter);
|
||||
|
||||
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
|
||||
switch (adapter->hw.mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
|
||||
@ -145,6 +144,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
||||
}
|
||||
|
||||
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
|
||||
if (!netdev_get_num_tc(netdev))
|
||||
ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
|
||||
|
||||
ixgbe_init_interrupt_scheme(adapter);
|
||||
if (netif_running(netdev))
|
||||
netdev->netdev_ops->ndo_open(netdev);
|
||||
@ -159,7 +161,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
||||
adapter->temp_dcb_cfg.pfc_mode_enable = false;
|
||||
adapter->dcb_cfg.pfc_mode_enable = false;
|
||||
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
|
||||
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
|
||||
switch (adapter->hw.mac.type) {
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
@ -169,6 +170,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
||||
break;
|
||||
}
|
||||
|
||||
ixgbe_setup_tc(netdev, 0);
|
||||
|
||||
ixgbe_init_interrupt_scheme(adapter);
|
||||
if (netif_running(netdev))
|
||||
netdev->netdev_ops->ndo_open(netdev);
|
||||
@ -346,11 +349,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
if (!adapter->dcb_set_bitmap)
|
||||
if (!adapter->dcb_set_bitmap ||
|
||||
!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
|
||||
return DCB_NO_HW_CHG;
|
||||
|
||||
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
|
||||
adapter->ring_feature[RING_F_DCB].indices);
|
||||
MAX_TRAFFIC_CLASS);
|
||||
|
||||
if (ret)
|
||||
return DCB_NO_HW_CHG;
|
||||
@ -412,6 +416,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
|
||||
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
|
||||
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
|
||||
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
|
||||
/* Priority to TC mapping in CEE case default to 1:1 */
|
||||
u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
#ifdef CONFIG_FCOE
|
||||
@ -433,7 +439,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
|
||||
DCB_TX_CONFIG, prio_type);
|
||||
|
||||
ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
|
||||
bwg_id, prio_type);
|
||||
bwg_id, prio_type, prio_tc);
|
||||
}
|
||||
|
||||
if (adapter->dcb_cfg.pfc_mode_enable)
|
||||
@ -448,40 +454,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
|
||||
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
u8 rval = 0;
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
switch (capid) {
|
||||
case DCB_CAP_ATTR_PG:
|
||||
*cap = true;
|
||||
break;
|
||||
case DCB_CAP_ATTR_PFC:
|
||||
*cap = true;
|
||||
break;
|
||||
case DCB_CAP_ATTR_UP2TC:
|
||||
*cap = false;
|
||||
break;
|
||||
case DCB_CAP_ATTR_PG_TCS:
|
||||
*cap = 0x80;
|
||||
break;
|
||||
case DCB_CAP_ATTR_PFC_TCS:
|
||||
*cap = 0x80;
|
||||
break;
|
||||
case DCB_CAP_ATTR_GSP:
|
||||
*cap = true;
|
||||
break;
|
||||
case DCB_CAP_ATTR_BCN:
|
||||
*cap = false;
|
||||
break;
|
||||
default:
|
||||
rval = -EINVAL;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
rval = -EINVAL;
|
||||
switch (capid) {
|
||||
case DCB_CAP_ATTR_PG:
|
||||
*cap = true;
|
||||
break;
|
||||
case DCB_CAP_ATTR_PFC:
|
||||
*cap = true;
|
||||
break;
|
||||
case DCB_CAP_ATTR_UP2TC:
|
||||
*cap = false;
|
||||
break;
|
||||
case DCB_CAP_ATTR_PG_TCS:
|
||||
*cap = 0x80;
|
||||
break;
|
||||
case DCB_CAP_ATTR_PFC_TCS:
|
||||
*cap = 0x80;
|
||||
break;
|
||||
case DCB_CAP_ATTR_GSP:
|
||||
*cap = true;
|
||||
break;
|
||||
case DCB_CAP_ATTR_BCN:
|
||||
*cap = false;
|
||||
break;
|
||||
case DCB_CAP_ATTR_DCBX:
|
||||
*cap = adapter->dcbx_cap;
|
||||
break;
|
||||
default:
|
||||
*cap = false;
|
||||
break;
|
||||
}
|
||||
|
||||
return rval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
|
||||
@ -542,21 +546,16 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
|
||||
*/
|
||||
static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
|
||||
{
|
||||
u8 rval = 0;
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct dcb_app app = {
|
||||
.selector = idtype,
|
||||
.protocol = id,
|
||||
};
|
||||
|
||||
switch (idtype) {
|
||||
case DCB_APP_IDTYPE_ETHTYPE:
|
||||
#ifdef IXGBE_FCOE
|
||||
if (id == ETH_P_FCOE)
|
||||
rval = ixgbe_fcoe_getapp(netdev_priv(netdev));
|
||||
#endif
|
||||
break;
|
||||
case DCB_APP_IDTYPE_PORTNUM:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return rval;
|
||||
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
|
||||
return 0;
|
||||
|
||||
return dcb_getapp(netdev, &app);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -571,14 +570,24 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
|
||||
static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
|
||||
u8 idtype, u16 id, u8 up)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
u8 rval = 1;
|
||||
struct dcb_app app = {
|
||||
.selector = idtype,
|
||||
.protocol = id,
|
||||
.priority = up
|
||||
};
|
||||
|
||||
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
|
||||
return rval;
|
||||
|
||||
rval = dcb_setapp(netdev, &app);
|
||||
|
||||
switch (idtype) {
|
||||
case DCB_APP_IDTYPE_ETHTYPE:
|
||||
#ifdef IXGBE_FCOE
|
||||
if (id == ETH_P_FCOE) {
|
||||
u8 old_tc;
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* Get current programmed tc */
|
||||
old_tc = adapter->fcoe.tc;
|
||||
@ -635,11 +644,16 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
|
||||
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
|
||||
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
int err;
|
||||
int i, err;
|
||||
__u64 *p = (__u64 *) ets->prio_tc;
|
||||
/* naively give each TC a bwg to map onto CEE hardware */
|
||||
__u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||
|
||||
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adapter->ixgbe_ieee_ets) {
|
||||
adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
|
||||
GFP_KERNEL);
|
||||
@ -647,12 +661,35 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
|
||||
|
||||
/* Map TSA onto CEE prio type */
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
switch (ets->tc_tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
prio_type[i] = 2;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
prio_type[i] = 0;
|
||||
break;
|
||||
default:
|
||||
/* Hardware only supports priority strict or
|
||||
* ETS transmission selection algorithms if
|
||||
* we receive some other value from dcbnl
|
||||
* throw an error
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (*p)
|
||||
ixgbe_dcbnl_set_state(dev, 1);
|
||||
else
|
||||
ixgbe_dcbnl_set_state(dev, 0);
|
||||
|
||||
ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
|
||||
err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
|
||||
bwg_id, ets->tc_tsa);
|
||||
bwg_id, prio_type, ets->prio_tc);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -686,6 +723,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adapter->ixgbe_ieee_pfc) {
|
||||
adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
|
||||
GFP_KERNEL);
|
||||
@ -698,11 +738,86 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
|
||||
struct dcb_app *app)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
||||
return -EINVAL;
|
||||
#ifdef IXGBE_FCOE
|
||||
if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
|
||||
if (adapter->fcoe.tc == app->priority)
|
||||
goto setapp;
|
||||
|
||||
/* In IEEE mode map up to tc 1:1 */
|
||||
adapter->fcoe.tc = app->priority;
|
||||
adapter->fcoe.up = app->priority;
|
||||
|
||||
/* Force hardware reset required to push FCoE
|
||||
* setup on {tx|rx}_rings
|
||||
*/
|
||||
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
|
||||
ixgbe_dcbnl_set_all(dev);
|
||||
}
|
||||
|
||||
setapp:
|
||||
#endif
|
||||
dcb_setapp(dev, app);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
return adapter->dcbx_cap;
|
||||
}
|
||||
|
||||
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
struct ieee_ets ets = {0};
|
||||
struct ieee_pfc pfc = {0};
|
||||
|
||||
/* no support for LLD_MANAGED modes or CEE+IEEE */
|
||||
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
|
||||
((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
|
||||
!(mode & DCB_CAP_DCBX_HOST))
|
||||
return 1;
|
||||
|
||||
if (mode == adapter->dcbx_cap)
|
||||
return 0;
|
||||
|
||||
adapter->dcbx_cap = mode;
|
||||
|
||||
/* ETS and PFC defaults */
|
||||
ets.ets_cap = 8;
|
||||
pfc.pfc_cap = 8;
|
||||
|
||||
if (mode & DCB_CAP_DCBX_VER_IEEE) {
|
||||
ixgbe_dcbnl_ieee_setets(dev, &ets);
|
||||
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
|
||||
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
|
||||
adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
|
||||
ixgbe_dcbnl_set_all(dev);
|
||||
} else {
|
||||
/* Drop into single TC mode strict priority as this
|
||||
* indicates CEE and IEEE versions are disabled
|
||||
*/
|
||||
ixgbe_dcbnl_ieee_setets(dev, &ets);
|
||||
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
|
||||
ixgbe_dcbnl_set_state(dev, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct dcbnl_rtnl_ops dcbnl_ops = {
|
||||
.ieee_getets = ixgbe_dcbnl_ieee_getets,
|
||||
.ieee_setets = ixgbe_dcbnl_ieee_setets,
|
||||
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
|
||||
.ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
|
||||
.ieee_setapp = ixgbe_dcbnl_ieee_setapp,
|
||||
.getstate = ixgbe_dcbnl_get_state,
|
||||
.setstate = ixgbe_dcbnl_set_state,
|
||||
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
|
||||
@ -724,5 +839,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
|
||||
.setpfcstate = ixgbe_dcbnl_setpfcstate,
|
||||
.getapp = ixgbe_dcbnl_getapp,
|
||||
.setapp = ixgbe_dcbnl_setapp,
|
||||
.getdcbx = ixgbe_dcbnl_getdcbx,
|
||||
.setdcbx = ixgbe_dcbnl_setdcbx,
|
||||
};
|
||||
|
||||
|
@ -812,21 +812,6 @@ out_disable:
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
/**
|
||||
* ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
|
||||
* @adapter : ixgbe adapter
|
||||
*
|
||||
* Finds out the corresponding user priority bitmap from the current
|
||||
* traffic class that FCoE belongs to. Returns 0 as the invalid user
|
||||
* priority bitmap to indicate an error.
|
||||
*
|
||||
* Returns : 802.1p user priority bitmap for FCoE
|
||||
*/
|
||||
u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
return 1 << adapter->fcoe.up;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
|
||||
* @adapter : ixgbe adapter
|
||||
|
@ -652,7 +652,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
|
||||
static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
|
||||
{
|
||||
int tc = -1;
|
||||
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
||||
int dcb_i = netdev_get_num_tc(adapter->netdev);
|
||||
|
||||
/* if DCB is not enabled the queues have no TC */
|
||||
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
|
||||
@ -2892,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
|
||||
);
|
||||
|
||||
switch (mask) {
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
|
||||
mrqc = IXGBE_MRQC_RTRSS8TCEN;
|
||||
break;
|
||||
case (IXGBE_FLAG_DCB_ENABLED):
|
||||
mrqc = IXGBE_MRQC_RT8TCEN;
|
||||
break;
|
||||
#endif /* CONFIG_IXGBE_DCB */
|
||||
case (IXGBE_FLAG_RSS_ENABLED):
|
||||
mrqc = IXGBE_MRQC_RSSEN;
|
||||
break;
|
||||
case (IXGBE_FLAG_SRIOV_ENABLED):
|
||||
mrqc = IXGBE_MRQC_VMDQEN;
|
||||
break;
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
case (IXGBE_FLAG_DCB_ENABLED):
|
||||
mrqc = IXGBE_MRQC_RT8TCEN;
|
||||
break;
|
||||
#endif /* CONFIG_IXGBE_DCB */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -3655,15 +3658,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
|
||||
if (hw->mac.type == ixgbe_mac_82598EB)
|
||||
netif_set_gso_max_size(adapter->netdev, 32768);
|
||||
|
||||
#ifdef CONFIG_FCOE
|
||||
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
|
||||
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
|
||||
#endif
|
||||
|
||||
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
|
||||
DCB_TX_CONFIG);
|
||||
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
|
||||
DCB_RX_CONFIG);
|
||||
|
||||
/* Enable VLAN tag insert/strip */
|
||||
adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
|
||||
@ -3671,7 +3665,43 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
|
||||
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
|
||||
|
||||
/* reconfigure the hardware */
|
||||
ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
|
||||
if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
|
||||
#ifdef CONFIG_FCOE
|
||||
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
|
||||
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
|
||||
#endif
|
||||
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
|
||||
DCB_TX_CONFIG);
|
||||
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
|
||||
DCB_RX_CONFIG);
|
||||
ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
|
||||
} else {
|
||||
struct net_device *dev = adapter->netdev;
|
||||
|
||||
if (adapter->ixgbe_ieee_ets)
|
||||
dev->dcbnl_ops->ieee_setets(dev,
|
||||
adapter->ixgbe_ieee_ets);
|
||||
if (adapter->ixgbe_ieee_pfc)
|
||||
dev->dcbnl_ops->ieee_setpfc(dev,
|
||||
adapter->ixgbe_ieee_pfc);
|
||||
}
|
||||
|
||||
/* Enable RSS Hash per TC */
|
||||
if (hw->mac.type != ixgbe_mac_82598EB) {
|
||||
int i;
|
||||
u32 reg = 0;
|
||||
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
|
||||
u8 msb = 0;
|
||||
u8 cnt = adapter->netdev->tc_to_txq[i].count;
|
||||
|
||||
while (cnt >>= 1)
|
||||
msb++;
|
||||
|
||||
reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
|
||||
}
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -4258,24 +4288,6 @@ static void ixgbe_reset_task(struct work_struct *work)
|
||||
ixgbe_reinit_locked(adapter);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
bool ret = false;
|
||||
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
|
||||
|
||||
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
|
||||
return ret;
|
||||
|
||||
f->mask = 0x7 << 3;
|
||||
adapter->num_rx_queues = f->indices;
|
||||
adapter->num_tx_queues = f->indices;
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ixgbe_set_rss_queues: Allocate queues for RSS
|
||||
* @adapter: board private structure to initialize
|
||||
@ -4346,19 +4358,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
|
||||
**/
|
||||
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
bool ret = false;
|
||||
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
|
||||
|
||||
f->indices = min((int)num_online_cpus(), f->indices);
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
|
||||
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
||||
return false;
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
int tc;
|
||||
struct net_device *dev = adapter->netdev;
|
||||
|
||||
tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
|
||||
f->indices = dev->tc_to_txq[tc].count;
|
||||
f->mask = dev->tc_to_txq[tc].offset;
|
||||
#endif
|
||||
} else {
|
||||
f->indices = min((int)num_online_cpus(), f->indices);
|
||||
|
||||
adapter->num_rx_queues = 1;
|
||||
adapter->num_tx_queues = 1;
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
e_info(probe, "FCoE enabled with DCB\n");
|
||||
ixgbe_set_dcb_queues(adapter);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
||||
e_info(probe, "FCoE enabled with RSS\n");
|
||||
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
|
||||
@ -4371,14 +4390,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
|
||||
f->mask = adapter->num_rx_queues;
|
||||
adapter->num_rx_queues += f->indices;
|
||||
adapter->num_tx_queues += f->indices;
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
bool ret = false;
|
||||
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
|
||||
int i, q;
|
||||
|
||||
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
|
||||
return ret;
|
||||
|
||||
f->indices = 0;
|
||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
|
||||
q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
|
||||
f->indices += q;
|
||||
}
|
||||
|
||||
f->mask = 0x7 << 3;
|
||||
adapter->num_rx_queues = f->indices;
|
||||
adapter->num_tx_queues = f->indices;
|
||||
ret = true;
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
/* FCoE enabled queues require special configuration done through
|
||||
* configure_fcoe() and others. Here we map FCoE indices onto the
|
||||
* DCB queue pairs allowing FCoE to own configuration later.
|
||||
*/
|
||||
ixgbe_set_fcoe_queues(adapter);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* IXGBE_FCOE */
|
||||
/**
|
||||
* ixgbe_set_sriov_queues: Allocate queues for IOV use
|
||||
* @adapter: board private structure to initialize
|
||||
@ -4414,16 +4464,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
|
||||
if (ixgbe_set_sriov_queues(adapter))
|
||||
goto done;
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
if (ixgbe_set_fcoe_queues(adapter))
|
||||
goto done;
|
||||
|
||||
#endif /* IXGBE_FCOE */
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (ixgbe_set_dcb_queues(adapter))
|
||||
goto done;
|
||||
|
||||
#endif
|
||||
#ifdef IXGBE_FCOE
|
||||
if (ixgbe_set_fcoe_queues(adapter))
|
||||
goto done;
|
||||
|
||||
#endif /* IXGBE_FCOE */
|
||||
if (ixgbe_set_fdir_queues(adapter))
|
||||
goto done;
|
||||
|
||||
@ -4515,6 +4565,110 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
|
||||
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
|
||||
void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
|
||||
unsigned int *tx, unsigned int *rx)
|
||||
{
|
||||
struct net_device *dev = adapter->netdev;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u8 num_tcs = netdev_get_num_tc(dev);
|
||||
|
||||
*tx = 0;
|
||||
*rx = 0;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
*tx = tc << 3;
|
||||
*rx = tc << 2;
|
||||
break;
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
if (num_tcs == 8) {
|
||||
if (tc < 3) {
|
||||
*tx = tc << 5;
|
||||
*rx = tc << 4;
|
||||
} else if (tc < 5) {
|
||||
*tx = ((tc + 2) << 4);
|
||||
*rx = tc << 4;
|
||||
} else if (tc < num_tcs) {
|
||||
*tx = ((tc + 8) << 3);
|
||||
*rx = tc << 4;
|
||||
}
|
||||
} else if (num_tcs == 4) {
|
||||
*rx = tc << 5;
|
||||
switch (tc) {
|
||||
case 0:
|
||||
*tx = 0;
|
||||
break;
|
||||
case 1:
|
||||
*tx = 64;
|
||||
break;
|
||||
case 2:
|
||||
*tx = 96;
|
||||
break;
|
||||
case 3:
|
||||
*tx = 112;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
|
||||
|
||||
/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
|
||||
* classes.
|
||||
*
|
||||
* @netdev: net device to configure
|
||||
* @tc: number of traffic classes to enable
|
||||
*/
|
||||
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
||||
{
|
||||
int i;
|
||||
unsigned int q, offset = 0;
|
||||
|
||||
if (!tc) {
|
||||
netdev_reset_tc(dev);
|
||||
} else {
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
/* Hardware supports up to 8 traffic classes */
|
||||
if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
|
||||
return -EINVAL;
|
||||
|
||||
/* Partition Tx queues evenly amongst traffic classes */
|
||||
for (i = 0; i < tc; i++) {
|
||||
q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
|
||||
netdev_set_prio_tc_map(dev, i, i);
|
||||
netdev_set_tc_queue(dev, i, q, offset);
|
||||
offset += q;
|
||||
}
|
||||
|
||||
/* This enables multiple traffic class support in the hardware
|
||||
* which defaults to strict priority transmission by default.
|
||||
* If traffic classes are already enabled perhaps through DCB
|
||||
* code path then existing configuration will be used.
|
||||
*/
|
||||
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
|
||||
dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
|
||||
struct ieee_ets ets = {
|
||||
.prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
|
||||
};
|
||||
u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
|
||||
|
||||
dev->dcbnl_ops->setdcbx(dev, mode);
|
||||
dev->dcbnl_ops->ieee_setets(dev, &ets);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
|
||||
* @adapter: board private structure to initialize
|
||||
@ -4524,72 +4678,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
|
||||
**/
|
||||
static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
bool ret = false;
|
||||
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
||||
struct net_device *dev = adapter->netdev;
|
||||
int i, j, k;
|
||||
u8 num_tcs = netdev_get_num_tc(dev);
|
||||
|
||||
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
|
||||
return false;
|
||||
|
||||
/* the number of queues is assumed to be symmetric */
|
||||
switch (adapter->hw.mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
for (i = 0; i < dcb_i; i++) {
|
||||
adapter->rx_ring[i]->reg_idx = i << 3;
|
||||
adapter->tx_ring[i]->reg_idx = i << 2;
|
||||
for (i = 0, k = 0; i < num_tcs; i++) {
|
||||
unsigned int tx_s, rx_s;
|
||||
u16 count = dev->tc_to_txq[i].count;
|
||||
|
||||
ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
|
||||
for (j = 0; j < count; j++, k++) {
|
||||
adapter->tx_ring[k]->reg_idx = tx_s + j;
|
||||
adapter->rx_ring[k]->reg_idx = rx_s + j;
|
||||
adapter->tx_ring[k]->dcb_tc = i;
|
||||
adapter->rx_ring[k]->dcb_tc = i;
|
||||
}
|
||||
ret = true;
|
||||
break;
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
if (dcb_i == 8) {
|
||||
/*
|
||||
* Tx TC0 starts at: descriptor queue 0
|
||||
* Tx TC1 starts at: descriptor queue 32
|
||||
* Tx TC2 starts at: descriptor queue 64
|
||||
* Tx TC3 starts at: descriptor queue 80
|
||||
* Tx TC4 starts at: descriptor queue 96
|
||||
* Tx TC5 starts at: descriptor queue 104
|
||||
* Tx TC6 starts at: descriptor queue 112
|
||||
* Tx TC7 starts at: descriptor queue 120
|
||||
*
|
||||
* Rx TC0-TC7 are offset by 16 queues each
|
||||
*/
|
||||
for (i = 0; i < 3; i++) {
|
||||
adapter->tx_ring[i]->reg_idx = i << 5;
|
||||
adapter->rx_ring[i]->reg_idx = i << 4;
|
||||
}
|
||||
for ( ; i < 5; i++) {
|
||||
adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
|
||||
adapter->rx_ring[i]->reg_idx = i << 4;
|
||||
}
|
||||
for ( ; i < dcb_i; i++) {
|
||||
adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
|
||||
adapter->rx_ring[i]->reg_idx = i << 4;
|
||||
}
|
||||
ret = true;
|
||||
} else if (dcb_i == 4) {
|
||||
/*
|
||||
* Tx TC0 starts at: descriptor queue 0
|
||||
* Tx TC1 starts at: descriptor queue 64
|
||||
* Tx TC2 starts at: descriptor queue 96
|
||||
* Tx TC3 starts at: descriptor queue 112
|
||||
*
|
||||
* Rx TC0-TC3 are offset by 32 queues each
|
||||
*/
|
||||
adapter->tx_ring[0]->reg_idx = 0;
|
||||
adapter->tx_ring[1]->reg_idx = 64;
|
||||
adapter->tx_ring[2]->reg_idx = 96;
|
||||
adapter->tx_ring[3]->reg_idx = 112;
|
||||
for (i = 0 ; i < dcb_i; i++)
|
||||
adapter->rx_ring[i]->reg_idx = i << 5;
|
||||
ret = true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -4635,33 +4744,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
|
||||
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||
|
||||
ixgbe_cache_ring_dcb(adapter);
|
||||
/* find out queues in TC for FCoE */
|
||||
fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
|
||||
fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
|
||||
/*
|
||||
* In 82599, the number of Tx queues for each traffic
|
||||
* class for both 8-TC and 4-TC modes are:
|
||||
* TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
|
||||
* 8 TCs: 32 32 16 16 8 8 8 8
|
||||
* 4 TCs: 64 64 32 32
|
||||
* We have max 8 queues for FCoE, where 8 the is
|
||||
* FCoE redirection table size. If TC for FCoE is
|
||||
* less than or equal to TC3, we have enough queues
|
||||
* to add max of 8 queues for FCoE, so we start FCoE
|
||||
* Tx queue from the next one, i.e., reg_idx + 1.
|
||||
* If TC for FCoE is above TC3, implying 8 TC mode,
|
||||
* and we need 8 for FCoE, we have to take all queues
|
||||
* in that traffic class for FCoE.
|
||||
*/
|
||||
if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
|
||||
fcoe_tx_i--;
|
||||
}
|
||||
#endif /* CONFIG_IXGBE_DCB */
|
||||
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
||||
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
|
||||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
|
||||
@ -4718,16 +4800,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
|
||||
if (ixgbe_cache_ring_sriov(adapter))
|
||||
return;
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
if (ixgbe_cache_ring_fcoe(adapter))
|
||||
return;
|
||||
|
||||
#endif /* IXGBE_FCOE */
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (ixgbe_cache_ring_dcb(adapter))
|
||||
return;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
if (ixgbe_cache_ring_fcoe(adapter))
|
||||
return;
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
if (ixgbe_cache_ring_fdir(adapter))
|
||||
return;
|
||||
|
||||
@ -5190,8 +5272,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
||||
adapter->dcb_cfg.rx_pba_cfg = pba_equal;
|
||||
adapter->dcb_cfg.pfc_mode_enable = false;
|
||||
adapter->dcb_set_bitmap = 0x00;
|
||||
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
|
||||
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
|
||||
adapter->ring_feature[RING_F_DCB].indices);
|
||||
MAX_TRAFFIC_CLASS);
|
||||
|
||||
#endif
|
||||
|
||||
@ -6134,6 +6217,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
|
||||
(flow_tx ? "TX" : "None"))));
|
||||
|
||||
netif_carrier_on(netdev);
|
||||
ixgbe_check_vf_rate_limit(adapter);
|
||||
} else {
|
||||
/* Force detection of hung controller */
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
@ -6663,18 +6747,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
|
||||
protocol = vlan_get_protocol(skb);
|
||||
|
||||
if ((protocol == htons(ETH_P_FCOE)) ||
|
||||
(protocol == htons(ETH_P_FIP))) {
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
|
||||
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
|
||||
txq += adapter->ring_feature[RING_F_FCOE].mask;
|
||||
return txq;
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
txq = adapter->fcoe.up;
|
||||
return txq;
|
||||
#endif
|
||||
}
|
||||
if (((protocol == htons(ETH_P_FCOE)) ||
|
||||
(protocol == htons(ETH_P_FIP))) &&
|
||||
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
|
||||
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
|
||||
txq += adapter->ring_feature[RING_F_FCOE].mask;
|
||||
return txq;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -6684,15 +6762,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
return txq;
|
||||
}
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
if (skb->priority == TC_PRIO_CONTROL)
|
||||
txq = adapter->ring_feature[RING_F_DCB].indices-1;
|
||||
else
|
||||
txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
|
||||
>> 13;
|
||||
return txq;
|
||||
}
|
||||
|
||||
return skb_tx_hash(dev, skb);
|
||||
}
|
||||
|
||||
@ -6714,13 +6783,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
tx_flags |= vlan_tx_tag_get(skb);
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
|
||||
tx_flags |= ((skb->queue_mapping & 0x7) << 13);
|
||||
tx_flags |= tx_ring->dcb_tc << 13;
|
||||
}
|
||||
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
|
||||
tx_flags |= IXGBE_TX_FLAGS_VLAN;
|
||||
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
|
||||
skb->priority != TC_PRIO_CONTROL) {
|
||||
tx_flags |= ((skb->queue_mapping & 0x7) << 13);
|
||||
tx_flags |= tx_ring->dcb_tc << 13;
|
||||
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
|
||||
tx_flags |= IXGBE_TX_FLAGS_VLAN;
|
||||
}
|
||||
@ -6729,20 +6798,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
/* for FCoE with DCB, we force the priority to what
|
||||
* was specified by the switch */
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
|
||||
(protocol == htons(ETH_P_FCOE) ||
|
||||
protocol == htons(ETH_P_FIP))) {
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
|
||||
<< IXGBE_TX_FLAGS_VLAN_SHIFT);
|
||||
tx_flags |= ((adapter->fcoe.up << 13)
|
||||
<< IXGBE_TX_FLAGS_VLAN_SHIFT);
|
||||
}
|
||||
#endif
|
||||
/* flag for FCoE offloads */
|
||||
if (protocol == htons(ETH_P_FCOE))
|
||||
tx_flags |= IXGBE_TX_FLAGS_FCOE;
|
||||
}
|
||||
(protocol == htons(ETH_P_FCOE)))
|
||||
tx_flags |= IXGBE_TX_FLAGS_FCOE;
|
||||
#endif
|
||||
|
||||
/* four things can cause us to need a context descriptor */
|
||||
@ -7015,6 +7072,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
||||
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
|
||||
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
|
||||
.ndo_get_stats64 = ixgbe_get_stats64,
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
.ndo_setup_tc = ixgbe_setup_tc,
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixgbe_netpoll,
|
||||
#endif
|
||||
@ -7156,8 +7216,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||
else
|
||||
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
|
||||
|
||||
#if defined(CONFIG_DCB)
|
||||
indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
|
||||
#ifdef IXGBE_FCOE
|
||||
#elif defined(IXGBE_FCOE)
|
||||
indices += min_t(unsigned int, num_possible_cpus(),
|
||||
IXGBE_MAX_FCOE_INDICES);
|
||||
#endif
|
||||
@ -7313,8 +7374,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
|
||||
adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
|
||||
IXGBE_FLAG_DCB_ENABLED);
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
|
||||
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
netdev->dcbnl_ops = &dcbnl_ops;
|
||||
|
@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
|
||||
udelay(mbx->usec_delay);
|
||||
}
|
||||
|
||||
/* if we failed, all future posted messages fail until reset */
|
||||
if (!countdown)
|
||||
mbx->timeout = 0;
|
||||
out:
|
||||
return countdown ? 0 : IXGBE_ERR_MBX;
|
||||
}
|
||||
@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
|
||||
udelay(mbx->usec_delay);
|
||||
}
|
||||
|
||||
/* if we failed, all future posted messages fail until reset */
|
||||
if (!countdown)
|
||||
mbx->timeout = 0;
|
||||
out:
|
||||
return countdown ? 0 : IXGBE_ERR_MBX;
|
||||
}
|
||||
|
@ -402,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
|
||||
**/
|
||||
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status = IXGBE_NOT_IMPLEMENTED;
|
||||
s32 status = 0;
|
||||
u32 time_out;
|
||||
u32 max_time_out = 10;
|
||||
u16 autoneg_reg;
|
||||
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
|
||||
bool autoneg = false;
|
||||
ixgbe_link_speed speed;
|
||||
|
||||
/*
|
||||
* Set advertisement settings in PHY based on autoneg_advertised
|
||||
* settings. If autoneg_advertised = 0, then advertise default values
|
||||
* tnx devices cannot be "forced" to a autoneg 10G and fail. But can
|
||||
* for a 1G.
|
||||
*/
|
||||
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
|
||||
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
|
||||
/* Set or unset auto-negotiation 10G advertisement */
|
||||
hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
|
||||
autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
|
||||
else
|
||||
autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
|
||||
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
|
||||
autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
|
||||
|
||||
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
|
||||
hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
|
||||
MDIO_MMD_AN,
|
||||
autoneg_reg);
|
||||
}
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
|
||||
/* Set or unset auto-negotiation 1G advertisement */
|
||||
hw->phy.ops.read_reg(hw,
|
||||
IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
|
||||
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
|
||||
autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
|
||||
|
||||
hw->phy.ops.write_reg(hw,
|
||||
IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
|
||||
MDIO_MMD_AN,
|
||||
autoneg_reg);
|
||||
}
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_100_FULL) {
|
||||
/* Set or unset auto-negotiation 100M advertisement */
|
||||
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= ~ADVERTISE_100FULL;
|
||||
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
|
||||
autoneg_reg |= ADVERTISE_100FULL;
|
||||
|
||||
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
|
||||
MDIO_MMD_AN,
|
||||
autoneg_reg);
|
||||
}
|
||||
|
||||
/* Restart PHY autonegotiation and wait for completion */
|
||||
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg);
|
||||
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, &autoneg_reg);
|
||||
|
||||
autoneg_reg |= MDIO_AN_CTRL1_RESTART;
|
||||
|
||||
hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg);
|
||||
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, autoneg_reg);
|
||||
|
||||
/* Wait for autonegotiation to finish */
|
||||
for (time_out = 0; time_out < max_time_out; time_out++) {
|
||||
udelay(10);
|
||||
/* Restart PHY autonegotiation and wait for completion */
|
||||
status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
|
||||
if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
|
||||
status = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (time_out == max_time_out)
|
||||
if (time_out == max_time_out) {
|
||||
status = IXGBE_ERR_LINK_SETUP;
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -473,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
|
||||
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
|
||||
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_100_FULL)
|
||||
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
|
||||
|
||||
/* Setup link based on the new speed settings */
|
||||
hw->phy.ops.setup_link(hw);
|
||||
|
||||
@ -512,6 +555,180 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_check_phy_link_tnx - Determine link and speed status
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Reads the VS1 register to determine if link is up and the current speed for
|
||||
* the PHY.
|
||||
**/
|
||||
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
||||
bool *link_up)
|
||||
{
|
||||
s32 status = 0;
|
||||
u32 time_out;
|
||||
u32 max_time_out = 10;
|
||||
u16 phy_link = 0;
|
||||
u16 phy_speed = 0;
|
||||
u16 phy_data = 0;
|
||||
|
||||
/* Initialize speed and link to default case */
|
||||
*link_up = false;
|
||||
*speed = IXGBE_LINK_SPEED_10GB_FULL;
|
||||
|
||||
/*
|
||||
* Check current speed and link status of the PHY register.
|
||||
* This is a vendor specific register and may have to
|
||||
* be changed for other copper PHYs.
|
||||
*/
|
||||
for (time_out = 0; time_out < max_time_out; time_out++) {
|
||||
udelay(10);
|
||||
status = hw->phy.ops.read_reg(hw,
|
||||
MDIO_STAT1,
|
||||
MDIO_MMD_VEND1,
|
||||
&phy_data);
|
||||
phy_link = phy_data &
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
|
||||
phy_speed = phy_data &
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
|
||||
if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
|
||||
*link_up = true;
|
||||
if (phy_speed ==
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
|
||||
*speed = IXGBE_LINK_SPEED_1GB_FULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_setup_phy_link_tnx - Set and restart autoneg
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Restart autonegotiation and PHY and waits for completion.
|
||||
**/
|
||||
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status = 0;
|
||||
u32 time_out;
|
||||
u32 max_time_out = 10;
|
||||
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
|
||||
bool autoneg = false;
|
||||
ixgbe_link_speed speed;
|
||||
|
||||
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
|
||||
/* Set or unset auto-negotiation 10G advertisement */
|
||||
hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
|
||||
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
|
||||
autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
|
||||
|
||||
hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
|
||||
MDIO_MMD_AN,
|
||||
autoneg_reg);
|
||||
}
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
|
||||
/* Set or unset auto-negotiation 1G advertisement */
|
||||
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
|
||||
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
|
||||
autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
|
||||
|
||||
hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
|
||||
MDIO_MMD_AN,
|
||||
autoneg_reg);
|
||||
}
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_100_FULL) {
|
||||
/* Set or unset auto-negotiation 100M advertisement */
|
||||
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= ~ADVERTISE_100FULL;
|
||||
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
|
||||
autoneg_reg |= ADVERTISE_100FULL;
|
||||
|
||||
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
|
||||
MDIO_MMD_AN,
|
||||
autoneg_reg);
|
||||
}
|
||||
|
||||
/* Restart PHY autonegotiation and wait for completion */
|
||||
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, &autoneg_reg);
|
||||
|
||||
autoneg_reg |= MDIO_AN_CTRL1_RESTART;
|
||||
|
||||
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, autoneg_reg);
|
||||
|
||||
/* Wait for autonegotiation to finish */
|
||||
for (time_out = 0; time_out < max_time_out; time_out++) {
|
||||
udelay(10);
|
||||
/* Restart PHY autonegotiation and wait for completion */
|
||||
status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
|
||||
if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
|
||||
break;
|
||||
}
|
||||
|
||||
if (time_out == max_time_out) {
|
||||
status = IXGBE_ERR_LINK_SETUP;
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
|
||||
* @hw: pointer to hardware structure
|
||||
* @firmware_version: pointer to the PHY Firmware Version
|
||||
**/
|
||||
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
|
||||
MDIO_MMD_VEND1,
|
||||
firmware_version);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
|
||||
* @hw: pointer to hardware structure
|
||||
* @firmware_version: pointer to the PHY Firmware Version
|
||||
**/
|
||||
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
|
||||
MDIO_MMD_VEND1,
|
||||
firmware_version);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_reset_phy_nl - Performs a PHY reset
|
||||
* @hw: pointer to hardware structure
|
||||
@ -1476,86 +1693,6 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
|
||||
ixgbe_i2c_stop(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_check_phy_link_tnx - Determine link and speed status
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Reads the VS1 register to determine if link is up and the current speed for
|
||||
* the PHY.
|
||||
**/
|
||||
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
||||
bool *link_up)
|
||||
{
|
||||
s32 status = 0;
|
||||
u32 time_out;
|
||||
u32 max_time_out = 10;
|
||||
u16 phy_link = 0;
|
||||
u16 phy_speed = 0;
|
||||
u16 phy_data = 0;
|
||||
|
||||
/* Initialize speed and link to default case */
|
||||
*link_up = false;
|
||||
*speed = IXGBE_LINK_SPEED_10GB_FULL;
|
||||
|
||||
/*
|
||||
* Check current speed and link status of the PHY register.
|
||||
* This is a vendor specific register and may have to
|
||||
* be changed for other copper PHYs.
|
||||
*/
|
||||
for (time_out = 0; time_out < max_time_out; time_out++) {
|
||||
udelay(10);
|
||||
status = hw->phy.ops.read_reg(hw,
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
|
||||
MDIO_MMD_VEND1,
|
||||
&phy_data);
|
||||
phy_link = phy_data &
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
|
||||
phy_speed = phy_data &
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
|
||||
if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
|
||||
*link_up = true;
|
||||
if (phy_speed ==
|
||||
IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
|
||||
*speed = IXGBE_LINK_SPEED_1GB_FULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
|
||||
* @hw: pointer to hardware structure
|
||||
* @firmware_version: pointer to the PHY Firmware Version
|
||||
**/
|
||||
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
|
||||
firmware_version);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
|
||||
* @hw: pointer to hardware structure
|
||||
* @firmware_version: pointer to the PHY Firmware Version
|
||||
**/
|
||||
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
|
||||
firmware_version);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_tn_check_overtemp - Checks if an overtemp occured.
|
||||
* @hw: pointer to hardware structure
|
||||
|
@ -108,6 +108,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
|
||||
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed,
|
||||
bool *link_up);
|
||||
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version);
|
||||
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
|
||||
|
@ -478,9 +478,90 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ixgbe_link_mbps(int internal_link_speed)
|
||||
{
|
||||
switch (internal_link_speed) {
|
||||
case IXGBE_LINK_SPEED_100_FULL:
|
||||
return 100;
|
||||
case IXGBE_LINK_SPEED_1GB_FULL:
|
||||
return 1000;
|
||||
case IXGBE_LINK_SPEED_10GB_FULL:
|
||||
return 10000;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
|
||||
int link_speed)
|
||||
{
|
||||
int rf_dec, rf_int;
|
||||
u32 bcnrc_val;
|
||||
|
||||
if (tx_rate != 0) {
|
||||
/* Calculate the rate factor values to set */
|
||||
rf_int = link_speed / tx_rate;
|
||||
rf_dec = (link_speed - (rf_int * tx_rate));
|
||||
rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
|
||||
|
||||
bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
|
||||
bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
|
||||
IXGBE_RTTBCNRC_RF_INT_MASK);
|
||||
bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
|
||||
} else {
|
||||
bcnrc_val = 0;
|
||||
}
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
|
||||
}
|
||||
|
||||
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
int actual_link_speed, i;
|
||||
bool reset_rate = false;
|
||||
|
||||
/* VF Tx rate limit was not set */
|
||||
if (adapter->vf_rate_link_speed == 0)
|
||||
return;
|
||||
|
||||
actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
|
||||
if (actual_link_speed != adapter->vf_rate_link_speed) {
|
||||
reset_rate = true;
|
||||
adapter->vf_rate_link_speed = 0;
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Link speed has been changed. VF Transmit rate "
|
||||
"is disabled\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_vfs; i++) {
|
||||
if (reset_rate)
|
||||
adapter->vfinfo[i].tx_rate = 0;
|
||||
|
||||
ixgbe_set_vf_rate_limit(&adapter->hw, i,
|
||||
adapter->vfinfo[i].tx_rate,
|
||||
actual_link_speed);
|
||||
}
|
||||
}
|
||||
|
||||
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int actual_link_speed;
|
||||
|
||||
actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
|
||||
if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
|
||||
(tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
|
||||
((tx_rate != 0) && (tx_rate <= 10)))
|
||||
/* rate limit cannot be set to 10Mb or less in 10Gb adapters */
|
||||
return -EINVAL;
|
||||
|
||||
adapter->vf_rate_link_speed = actual_link_speed;
|
||||
adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
|
||||
ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
|
||||
@ -491,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
|
||||
return -EINVAL;
|
||||
ivi->vf = vf;
|
||||
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
|
||||
ivi->tx_rate = 0;
|
||||
ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
|
||||
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
|
||||
ivi->qos = adapter->vfinfo[vf].pf_qos;
|
||||
return 0;
|
||||
|
@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
|
||||
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
|
||||
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
|
||||
int vf, struct ifla_vf_info *ivi);
|
||||
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
|
||||
|
||||
#endif /* _IXGBE_SRIOV_H_ */
|
||||
|
||||
|
@ -533,6 +533,12 @@
|
||||
#define IXGBE_RTTDTECC 0x04990
|
||||
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
|
||||
#define IXGBE_RTTBCNRC 0x04984
|
||||
#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
|
||||
#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
|
||||
#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
|
||||
#define IXGBE_RTTBCNRC_RF_INT_MASK \
|
||||
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
|
||||
|
||||
|
||||
/* FCoE registers */
|
||||
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
|
||||
@ -1009,6 +1015,13 @@
|
||||
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
|
||||
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
|
||||
|
||||
/* MII clause 22/28 definitions */
|
||||
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
|
||||
#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
|
||||
#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
|
||||
#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
|
||||
#define IXGBE_MII_AUTONEG_REG 0x0
|
||||
|
||||
#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
|
||||
#define IXGBE_MAX_PHY_ADDR 32
|
||||
|
||||
|
@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = {
|
||||
"IXGBE_VFSTATUS",
|
||||
"IXGBE_VFLINKS",
|
||||
"IXGBE_VFRXMEMWRAP",
|
||||
"IXGBE_VFRTIMER",
|
||||
"IXGBE_VFFRTIMER",
|
||||
"IXGBE_VTEICR",
|
||||
"IXGBE_VTEICS",
|
||||
"IXGBE_VTEIMS",
|
||||
@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev,
|
||||
regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
|
||||
regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
|
||||
regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
|
||||
regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
|
||||
regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
|
||||
|
||||
/* Interrupt */
|
||||
/* don't read EICR because it can clear interrupt causes, instead
|
||||
|
@ -49,9 +49,9 @@
|
||||
|
||||
char ixgbevf_driver_name[] = "ixgbevf";
|
||||
static const char ixgbevf_driver_string[] =
|
||||
"Intel(R) 82599 Virtual Function";
|
||||
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
|
||||
|
||||
#define DRV_VERSION "1.1.0-k0"
|
||||
#define DRV_VERSION "2.0.0-k2"
|
||||
const char ixgbevf_driver_version[] = DRV_VERSION;
|
||||
static char ixgbevf_copyright[] =
|
||||
"Copyright (c) 2009 - 2010 Intel Corporation.";
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define IXGBE_VFCTRL 0x00000
|
||||
#define IXGBE_VFSTATUS 0x00008
|
||||
#define IXGBE_VFLINKS 0x00010
|
||||
#define IXGBE_VFRTIMER 0x00048
|
||||
#define IXGBE_VFFRTIMER 0x00048
|
||||
#define IXGBE_VFRXMEMWRAP 0x03190
|
||||
#define IXGBE_VTEICR 0x00100
|
||||
#define IXGBE_VTEICS 0x00104
|
||||
|
Loading…
Reference in New Issue
Block a user