Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Davem says:

1) Fix JIT code generation on x86-64 for divide by zero, from Eric Dumazet.

2) tg3 header length computation correction from Eric Dumazet.

3) More build and reference counting fixes for socket memory cgroup
   code from Glauber Costa.

4) module.h snuck back into a core header after all the hard work we
   did to remove that, from Paul Gortmaker and Jesper Dangaard Brouer.

5) Fix PHY naming regression and add some new PCI IDs in stmmac, from
   Alessandro Rubini.

6) Netlink message generation fix in new team driver, should only advertise
   the entries that changed during events, from Jiri Pirko.

7) SRIOV VF registration and unregistration fixes, and also add a
   missing PCI ID, from Roopa Prabhu.

8) Fix infinite loop in tx queue flush code of brcmsmac, from Stanislaw Gruszka.

9) ftgmac100/ftmac100 build fix, missing interrupt.h include.

10) Memory leak fix in net/hyperv do_set_mutlicast() handling, from Wei Yongjun.

11) Off by one fix in netem packet scheduler, from Vijay Subramanian.

12) TCP loss detection fix from Yuchung Cheng.

13) TCP reset packet MD5 calculation uses wrong address, fix from Shawn Lu.

14) skge carrier assertion and DMA mapping fixes from Stephen Hemminger.

15) Congestion recovery undo performed at the wrong spot in BIC and CUBIC
    congestion control modules, fix from Neal Cardwell.

16) Ethtool ETHTOOL_GSSET_INFO is unnecessarily restrictive, from Michał Mirosław.

17) Fix triggerable race in ipv6 sysctl handling, from Francesco Ruggeri.

18) Statistics bug fixes in mlx4 from Eugenia Emantayev.

19) rds locking bug fix during info dumps, from your's truly.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits)
  rds: Make rds_sock_lock BH rather than IRQ safe.
  netprio_cgroup.h: dont include module.h from other includes
  net: flow_dissector.c missing include linux/export.h
  team: send only changed options/ports via netlink
  net/hyperv: fix possible memory leak in do_set_multicast()
  drivers/net: dsa/mv88e6xxx.c files need linux/module.h
  stmmac: added PCI identifiers
  llc: Fix race condition in llc_ui_recvmsg
  stmmac: fix phy naming inconsistency
  dsa: Add reporting of silicon revision for Marvell 88E6123/88E6161/88E6165 switches.
  tg3: fix ipv6 header length computation
  skge: add byte queue limit support
  mv643xx_eth: Add Rx Discard and Rx Overrun statistics
  bnx2x: fix compilation error with SOE in fw_dump
  bnx2x: handle CHIP_REVISION during init_one
  bnx2x: allow user to change ring size in ISCSI SD mode
  bnx2x: fix Big-Endianess in ethtool -t
  bnx2x: fixed ethtool statistics for MF modes
  bnx2x: credit-leakage fixup on vlan_mac_del_all
  macvlan: fix a possible use after free
  ...
This commit is contained in:
Linus Torvalds 2012-01-24 15:51:40 -08:00
commit 701b259f44
102 changed files with 909 additions and 517 deletions

View File

@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
cleanup_addr = proglen; /* epilogue address */
for (pass = 0; pass < 10; pass++) {
u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
/* no prologue/epilogue for trivial filters (RET something) */
proglen = 0;
prog = temp;
if (seen) {
if (seen_or_pass0) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */
if (seen & (SEEN_XREG | SEEN_DATAREF))
if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
if (seen & SEEN_XREG)
if (seen_or_pass0 & SEEN_XREG)
CLEAR_X(); /* make sure we dont leek kernel memory */
/*
@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
* r9 = skb->len - skb->data_len
* r8 = skb->data
*/
if (seen & SEEN_DATAREF) {
if (seen_or_pass0 & SEEN_DATAREF) {
if (offsetof(struct sk_buff, len) <= 127)
/* mov off8(%rdi),%r9d */
EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_DIV_X: /* A /= X; */
seen |= SEEN_XREG;
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
if (pc_ret0 != -1)
EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
else {
if (pc_ret0 > 0) {
/* addrs[pc_ret0 - 1] is start address of target
* (addrs[i] - 4) is the address following this jmp
* ("xor %edx,%edx; div %ebx" being 4 bytes long)
*/
EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
(addrs[i] - 4));
} else {
EMIT_COND_JMP(X86_JNE, 2 + 5);
CLEAR_A();
EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
}
/* fallinto */
case BPF_S_RET_A:
if (seen) {
if (seen_or_pass0) {
if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
if (seen & SEEN_XREG)
if (seen_or_pass0 & SEEN_XREG)
EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
EMIT1(0xc9); /* leaveq */
}
@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
goto common_load;
case BPF_S_LDX_B_MSH:
if ((int)K < 0) {
if (pc_ret0 != -1) {
EMIT_JMP(addrs[pc_ret0] - addrs[i]);
if (pc_ret0 > 0) {
/* addrs[pc_ret0 - 1] is the start address */
EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
break;
}
CLEAR_A();
@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
* use it to give the cleanup instruction(s) addr
*/
cleanup_addr = proglen - 1; /* ret */
if (seen)
if (seen_or_pass0)
cleanup_addr -= 1; /* leaveq */
if (seen & SEEN_XREG)
if (seen_or_pass0 & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
if (image) {
WARN_ON(proglen != oldproglen);
if (proglen != oldproglen)
pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
break;
}
if (proglen == oldproglen) {

View File

@ -909,16 +909,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
}
}
/* hw is a boolean parameter that determines whether we should try and
* set the hw address of the device as well as the hw address of the
* net_device
*/
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
{
struct net_device *dev = slave->dev;
struct sockaddr s_addr;
if (!hw) {
if (slave->bond->params.mode == BOND_MODE_TLB) {
memcpy(dev->dev_addr, addr, dev->addr_len);
return 0;
}
@ -948,8 +944,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
u8 tmp_mac_addr[ETH_ALEN];
memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
alb_set_slave_mac_addr(slave2, tmp_mac_addr);
}
@ -1096,8 +1092,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* Try setting slave mac to bond address and fall-through
to code handling that situation below... */
alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
}
/* The slave's address is equal to the address of the bond.
@ -1133,8 +1128,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
}
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
bond->dev->name, slave->dev->name,
@ -1491,8 +1485,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
{
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
bond->alb_info.rlb_enabled);
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
if (res) {
return res;
}
@ -1643,8 +1636,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
alb_swap_mac_addr(bond, swap_slave, new_slave);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
}
if (swap_slave) {
@ -1704,8 +1696,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
} else {
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
read_lock(&bond->lock);
alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);

View File

@ -9,6 +9,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>

View File

@ -9,6 +9,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>
@ -20,12 +21,25 @@ static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) {
ret &= 0xfff0;
if (ret == 0x1210)
if (ret == 0x1212)
return "Marvell 88E6123 (A1)";
if (ret == 0x1213)
return "Marvell 88E6123 (A2)";
if ((ret & 0xfff0) == 0x1210)
return "Marvell 88E6123";
if (ret == 0x1610)
if (ret == 0x1612)
return "Marvell 88E6161 (A1)";
if (ret == 0x1613)
return "Marvell 88E6161 (A2)";
if ((ret & 0xfff0) == 0x1610)
return "Marvell 88E6161";
if (ret == 0x1650)
if (ret == 0x1652)
return "Marvell 88E6165 (A1)";
if (ret == 0x1653)
return "Marvell 88e6165 (A2)";
if ((ret & 0xfff0) == 0x1650)
return "Marvell 88E6165";
}

View File

@ -9,6 +9,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>

View File

@ -9,6 +9,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>

View File

@ -3117,7 +3117,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
int rx_ring_size = 0;
#ifdef BCM_CNIC
if (IS_MF_ISCSI_SD(bp)) {
if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
} else

View File

@ -1738,7 +1738,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod, rx_comp_cons;
u16 pkt_prod, bd_prod;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
@ -1873,8 +1873,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (rx_idx != rx_start_idx + num_pkts)
goto test_loopback_exit;
rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
@ -2121,18 +2120,16 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = bnx2x_num_stat_queues(bp) *
BNX2X_NUM_Q_STATS;
if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
} else {
if (IS_MF_MODE_STAT(bp)) {
num_stats = 0;
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
num_stats++;
} else
num_stats = BNX2X_NUM_STATS;
}
BNX2X_NUM_Q_STATS;
} else
num_stats = 0;
if (IS_MF_MODE_STAT(bp)) {
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
num_stats++;
} else
num_stats += BNX2X_NUM_STATS;
return num_stats;
case ETH_SS_TEST:
@ -2151,8 +2148,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
k = 0;
if (is_multi(bp)) {
k = 0;
for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
sprintf(queue_name, "%d", i);
@ -2163,20 +2160,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
queue_name);
k += BNX2X_NUM_Q_STATS;
}
if (IS_MF_MODE_STAT(bp))
break;
for (j = 0; j < BNX2X_NUM_STATS; j++)
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[j].string);
} else {
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + j*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
j++;
}
}
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
j++;
}
break;
case ETH_SS_TEST:
@ -2190,10 +2184,9 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u32 *hw_stats, *offset;
int i, j, k;
int i, j, k = 0;
if (is_multi(bp)) {
k = 0;
for_each_eth_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
@ -2214,46 +2207,28 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
}
k += BNX2X_NUM_Q_STATS;
}
if (IS_MF_MODE_STAT(bp))
return;
hw_stats = (u32 *)&bp->eth_stats;
for (j = 0; j < BNX2X_NUM_STATS; j++) {
if (bnx2x_stats_arr[j].size == 0) {
/* skip this counter */
buf[k + j] = 0;
continue;
}
offset = (hw_stats + bnx2x_stats_arr[j].offset);
if (bnx2x_stats_arr[j].size == 4) {
/* 4-byte counter */
buf[k + j] = (u64) *offset;
continue;
}
/* 8-byte counter */
buf[k + j] = HILO_U64(*offset, *(offset + 1));
}
} else {
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
buf[j] = 0;
j++;
continue;
}
offset = (hw_stats + bnx2x_stats_arr[i].offset);
if (bnx2x_stats_arr[i].size == 4) {
/* 4-byte counter */
buf[j] = (u64) *offset;
j++;
continue;
}
/* 8-byte counter */
buf[j] = HILO_U64(*offset, *(offset + 1));
}
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
buf[k + j] = 0;
j++;
continue;
}
offset = (hw_stats + bnx2x_stats_arr[i].offset);
if (bnx2x_stats_arr[i].size == 4) {
/* 4-byte counter */
buf[k + j] = (u64) *offset;
j++;
continue;
}
/* 8-byte counter */
buf[k + j] = HILO_U64(*offset, *(offset + 1));
j++;
}
}

View File

@ -941,7 +941,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
i, j, rx_bd[1], rx_bd[0], sw_bd->data);
}
start = RX_SGE(fp->rx_sge_prod);
@ -10536,6 +10536,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
{
struct bnx2x *bp;
int rc;
bool chip_is_e1x = (board_type == BCM57710 ||
board_type == BCM57711 ||
board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@ -10624,7 +10627,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
if (CHIP_IS_E1x(bp)) {
if (chip_is_e1x) {
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
@ -10635,9 +10638,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
* Enable internal target-read (in case we are probed after PF FLR).
* Must be done prior to any BAR read access. Only for 57712 and up
*/
if (board_type != BCM57710 &&
board_type != BCM57711 &&
board_type != BCM57711E)
if (!chip_is_e1x)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Reset the load counter */

View File

@ -50,6 +50,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
int exe_len,
union bnx2x_qable_obj *owner,
exe_q_validate validate,
exe_q_remove remove,
exe_q_optimize optimize,
exe_q_execute exec,
exe_q_get get)
@ -66,6 +67,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
/* Owner specific callbacks */
o->validate = validate;
o->remove = remove;
o->optimize = optimize;
o->execute = exec;
o->get = get;
@ -1340,6 +1342,35 @@ static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
}
}
static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
union bnx2x_qable_obj *qo,
struct bnx2x_exeq_elem *elem)
{
int rc = 0;
/* If consumption wasn't required, nothing to do */
if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags))
return 0;
switch (elem->cmd_data.vlan_mac.cmd) {
case BNX2X_VLAN_MAC_ADD:
case BNX2X_VLAN_MAC_MOVE:
rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
break;
case BNX2X_VLAN_MAC_DEL:
rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
break;
default:
return -EINVAL;
}
if (rc != true)
return -EINVAL;
return 0;
}
/**
* bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
*
@ -1801,8 +1832,14 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
*vlan_mac_flags)
*vlan_mac_flags) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
return rc;
}
list_del(&exeq_pos->link);
}
}
spin_unlock_bh(&exeq->lock);
@ -1908,6 +1945,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&mac_obj->exe_queue, 1, qable_obj,
bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_mac);
@ -1924,6 +1962,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_mac);
@ -1963,6 +2002,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan);
@ -2009,6 +2049,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&vlan_mac_obj->exe_queue, 1, qable_obj,
bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan_mac);
@ -2025,6 +2066,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
&vlan_mac_obj->exe_queue,
CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan_mac);

View File

@ -161,6 +161,10 @@ typedef int (*exe_q_validate)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
typedef int (*exe_q_remove)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
/**
* @return positive is entry was optimized, 0 - if not, negative
* in case of an error.
@ -203,11 +207,18 @@ struct bnx2x_exe_queue_obj {
*/
exe_q_validate validate;
/**
* Called before removing pending commands, cleaning allocated
* resources (e.g., credits from validate)
*/
exe_q_remove remove;
/**
* This will try to cancel the current pending commands list
* considering the new command.
*
* Returns the number of optimized commands or a negative error code
*
* Must run under exe_queue->lock
*/
exe_q_optimize optimize;

View File

@ -6667,14 +6667,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
if (skb_is_gso_v6(skb)) {
hdr_len = skb_headlen(skb) - ETH_HLEN;
} else {
u32 ip_tcp_len;
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
if (!skb_is_gso_v6(skb)) {
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
}

View File

@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define DRV_VERSION "2.1.1.28"
#define DRV_VERSION "2.1.1.31"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6

View File

@ -57,11 +57,13 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
/* Supported devices */
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
{ 0, } /* end of table */
};
@ -132,6 +134,11 @@ int enic_sriov_enabled(struct enic *enic)
return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
}
static int enic_is_sriov_vf(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
}
int enic_is_valid_vf(struct enic *enic, int vf)
{
#ifdef CONFIG_PCI_IOV
@ -437,7 +444,7 @@ static void enic_mtu_check(struct enic *enic)
if (mtu && mtu != enic->port_mtu) {
enic->port_mtu = mtu;
if (enic_is_dynamic(enic)) {
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
mtu = max_t(int, ENIC_MIN_MTU,
min_t(int, ENIC_MAX_MTU, mtu));
if (mtu != netdev->mtu)
@ -849,7 +856,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
struct enic *enic = netdev_priv(netdev);
if (enic_is_dynamic(enic)) {
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
return -EADDRNOTAVAIL;
} else {
@ -1608,7 +1615,7 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
if (!enic_is_dynamic(enic))
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
enic_set_rx_mode(netdev);
@ -1659,7 +1666,7 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
if (!enic_is_dynamic(enic))
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
@ -1696,7 +1703,7 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
return -EINVAL;
if (enic_is_dynamic(enic))
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
if (running)
@ -2263,10 +2270,10 @@ static int __devinit enic_probe(struct pci_dev *pdev,
int using_dac = 0;
unsigned int i;
int err;
int num_pps = 1;
#ifdef CONFIG_PCI_IOV
int pos = 0;
#endif
int num_pps = 1;
/* Allocate net device structure and initialize. Private
* instance data is initialized to zero.
@ -2376,14 +2383,14 @@ static int __devinit enic_probe(struct pci_dev *pdev,
num_pps = enic->num_vfs;
}
}
#endif
/* Allocate structure for port profiles */
enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
if (!enic->pp) {
pr_err("port profile alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_sriov;
goto err_out_disable_sriov_pp;
}
/* Issue device open to get device in known state
@ -2392,7 +2399,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
err = enic_dev_open(enic);
if (err) {
dev_err(dev, "vNIC dev open failed, aborting\n");
goto err_out_free_pp;
goto err_out_disable_sriov;
}
/* Setup devcmd lock
@ -2426,7 +2433,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
* called later by an upper layer.
*/
if (!enic_is_dynamic(enic)) {
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) {
err = vnic_dev_init(enic->vdev, 0);
if (err) {
dev_err(dev, "vNIC dev init failed, aborting\n");
@ -2460,8 +2467,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
(void)enic_change_mtu(netdev, enic->port_mtu);
#ifdef CONFIG_PCI_IOV
if (enic_is_dynamic(enic) && pdev->is_virtfn &&
is_zero_ether_addr(enic->mac_addr))
if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr))
random_ether_addr(enic->mac_addr);
#endif
@ -2474,7 +2480,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
if (enic_is_dynamic(enic))
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
netdev->netdev_ops = &enic_netdev_dynamic_ops;
else
netdev->netdev_ops = &enic_netdev_ops;
@ -2516,17 +2522,17 @@ err_out_dev_deinit:
enic_dev_deinit(enic);
err_out_dev_close:
vnic_dev_close(enic->vdev);
err_out_free_pp:
kfree(enic->pp);
err_out_disable_sriov:
kfree(enic->pp);
err_out_disable_sriov_pp:
#ifdef CONFIG_PCI_IOV
if (enic_sriov_enabled(enic)) {
pci_disable_sriov(pdev);
enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
}
err_out_vnic_unregister:
vnic_dev_unregister(enic->vdev);
#endif
vnic_dev_unregister(enic->vdev);
err_out_iounmap:
enic_iounmap(enic);
err_out_release_regions:

View File

@ -1786,8 +1786,7 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_enabled(adapter) && be_physfn(adapter) &&
!be_is_mc(adapter)) {
!sriov_enabled(adapter) && be_physfn(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,

View File

@ -25,6 +25,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>

View File

@ -25,6 +25,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/module.h>

View File

@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
# Copyright(c) 1999 - 2011 Intel Corporation.
# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -151,7 +151,7 @@ void igb_clear_vfta_i350(struct e1000_hw *hw)
* Writes value at the given offset in the register array which stores
* the VLAN filter table.
**/
void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
{
int i;

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2011 Intel Corporation.
Copyright(c) 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,

View File

@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -68,7 +68,7 @@ char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@ -4003,8 +4003,8 @@ set_itr_now:
}
}
void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
u32 type_tucmd, u32 mss_l4len_idx)
static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
u32 type_tucmd, u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@ -5623,7 +5623,7 @@ static irqreturn_t igb_intr(int irq, void *data)
return IRQ_HANDLED;
}
void igb_ring_irq_enable(struct igb_q_vector *q_vector)
static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;

View File

@ -468,6 +468,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
/* have to "undeclare" const on this struct to remove warnings */
SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
}

View File

@ -1194,11 +1194,6 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
igbvf_irq_disable(adapter);
if (!test_bit(__IGBVF_DOWN, &adapter->state))
igbvf_irq_enable(adapter);
if (hw->mac.ops.set_vfta(hw, vid, false)) {
dev_err(&adapter->pdev->dev,
"Failed to remove vlan id %d\n", vid);

View File

@ -161,19 +161,19 @@
/* Receive DMA Registers */
#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
(0x0D000 + ((_i - 64) * 0x40)))
(0x0D000 + (((_i) - 64) * 0x40)))
#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
(0x0D004 + ((_i - 64) * 0x40)))
(0x0D004 + (((_i) - 64) * 0x40)))
#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
(0x0D008 + ((_i - 64) * 0x40)))
(0x0D008 + (((_i) - 64) * 0x40)))
#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
(0x0D010 + ((_i - 64) * 0x40)))
(0x0D010 + (((_i) - 64) * 0x40)))
#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
(0x0D018 + ((_i - 64) * 0x40)))
(0x0D018 + (((_i) - 64) * 0x40)))
#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
(0x0D028 + ((_i - 64) * 0x40)))
(0x0D028 + (((_i) - 64) * 0x40)))
#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
(0x0D02C + ((_i - 64) * 0x40)))
(0x0D02C + (((_i) - 64) * 0x40)))
#define IXGBE_RSCDBU 0x03028
#define IXGBE_RDDCC 0x02F20
#define IXGBE_RXMEMWRAP 0x03190
@ -186,7 +186,7 @@
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
(((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
(0x0D014 + ((_i - 64) * 0x40))))
(0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
* 00-15 : 0x02200 + n*4
@ -195,7 +195,7 @@
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + ((_i - 64) * 0x40))))
(0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
/* 8 of these 0x03C00 - 0x03C1C */
@ -344,9 +344,9 @@
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
* Filter Table */
#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
* Filter Table */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
@ -1485,7 +1485,7 @@ enum {
#define IXGBE_LED_BLINK_BASE 0x00000080
#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i))
#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
@ -2068,9 +2068,9 @@ enum {
/* SR-IOV specific macros */
#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,

View File

@ -56,7 +56,8 @@ struct ixgbe_stats {
offsetof(struct ixgbevf_adapter, m), \
offsetof(struct ixgbevf_adapter, b), \
offsetof(struct ixgbevf_adapter, r)
static struct ixgbe_stats ixgbe_gstrings_stats[] = {
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
stats.saved_reset_vfgprc)},
{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
@ -671,7 +672,7 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
return 0;
}
static struct ethtool_ops ixgbevf_ethtool_ops = {
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,

View File

@ -279,12 +279,12 @@ enum ixgbevf_boards {
board_X540_vf,
};
extern struct ixgbevf_info ixgbevf_82599_vf_info;
extern struct ixgbevf_info ixgbevf_X540_vf_info;
extern struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
extern char ixgbevf_driver_name[];
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
extern int ixgbevf_up(struct ixgbevf_adapter *adapter);

View File

@ -53,7 +53,7 @@
#include "ixgbevf.h"
char ixgbevf_driver_name[] = "ixgbevf";
const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
@ -917,31 +917,34 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr;
u32 msg;
bool got_ack = false;
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
if (!hw->mbx.ops.check_for_ack(hw)) {
/*
* checking for the ack clears the PFACK bit. Place
* it back in the v2p_mailbox cache so that anyone
* polling for an ack will not miss it. Also
* avoid the read below because the code to read
* the mailbox will also clear the ack bit. This was
* causing lost acks. Just cache the bit and exit
* the IRQ handler.
*/
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
if (!hw->mbx.ops.check_for_ack(hw))
got_ack = true;
if (!hw->mbx.ops.check_for_msg(hw)) {
hw->mbx.ops.read(hw, &msg, 1);
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 1));
if (msg & IXGBE_VT_MSGTYPE_NACK)
pr_warn("Last Request of type %2.2x to PF Nacked\n",
msg & 0xFF);
goto out;
}
/* Not an ack interrupt, go ahead and read the message */
hw->mbx.ops.read(hw, &msg, 1);
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 1));
/*
* checking for the ack clears the PFACK bit. Place
* it back in the v2p_mailbox cache so that anyone
* polling for an ack will not miss it
*/
if (got_ack)
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
out:
return IRQ_HANDLED;
}

View File

@ -26,6 +26,7 @@
*******************************************************************************/
#include "mbx.h"
#include "ixgbevf.h"
/**
* ixgbevf_poll_for_msg - Wait for message notification
@ -328,7 +329,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
return 0;
}
struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,

View File

@ -26,6 +26,7 @@
*******************************************************************************/
#include "vf.h"
#include "ixgbevf.h"
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
@ -401,7 +402,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
return 0;
}
static struct ixgbe_mac_operations ixgbevf_mac_ops = {
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
@ -415,12 +416,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_vfta = ixgbevf_set_vfta_vf,
};
struct ixgbevf_info ixgbevf_82599_vf_info = {
const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_mac_ops,
};
struct ixgbevf_info ixgbevf_X540_vf_info = {
const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};

View File

@ -167,7 +167,7 @@ struct ixgbevf_hw_stats {
struct ixgbevf_info {
enum ixgbe_mac_type mac;
struct ixgbe_mac_operations *mac_ops;
const struct ixgbe_mac_operations *mac_ops;
};
#endif /* __IXGBE_VF_H__ */

View File

@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define INT_MASK 0x0068
#define INT_MASK_EXT 0x006c
#define TX_FIFO_URGENT_THRESHOLD 0x0074
#define RX_DISCARD_FRAME_CNT 0x0084
#define RX_OVERRUN_FRAME_CNT 0x0088
#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
#define TX_BW_RATE_MOVED 0x00e0
#define TX_BW_MTU_MOVED 0x00e8
@ -334,6 +336,9 @@ struct mib_counters {
u32 bad_crc_event;
u32 collision;
u32 late_collision;
/* Non MIB hardware counters */
u32 rx_discard;
u32 rx_overrun;
};
struct lro_counters {
@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp)
for (i = 0; i < 0x80; i += 4)
mib_read(mp, i);
/* Clear non MIB hw counters also */
rdlp(mp, RX_DISCARD_FRAME_CNT);
rdlp(mp, RX_OVERRUN_FRAME_CNT);
}
static void mib_counters_update(struct mv643xx_eth_private *mp)
@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->bad_crc_event += mib_read(mp, 0x74);
p->collision += mib_read(mp, 0x78);
p->late_collision += mib_read(mp, 0x7c);
/* Non MIB hardware counters */
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
MIBSTAT(bad_crc_event),
MIBSTAT(collision),
MIBSTAT(late_collision),
MIBSTAT(rx_discard),
MIBSTAT(rx_overrun),
LROSTAT(lro_aggregated),
LROSTAT(lro_flushed),
LROSTAT(lro_no_desc),

View File

@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
}
/* Allocate and setup a new buffer for receiving */
static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
struct sk_buff *skb, unsigned int bufsize)
static int skge_rx_setup(struct pci_dev *pdev,
struct skge_element *e,
struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
u64 map;
dma_addr_t map;
map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
map = pci_map_single(pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, map))
goto mapping_error;
rd->dma_lo = map;
rd->dma_hi = map >> 32;
rd->dma_lo = lower_32_bits(map);
rd->dma_hi = upper_32_bits(map);
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
return 0;
mapping_error:
if (net_ratelimit())
dev_warn(&pdev->dev, "%s: rx mapping error\n",
skb->dev->name);
return -EIO;
}
/* Resume receiving using existing skb,
@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev)
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
skge_rx_setup(skge, e, skb, skge->rx_buf_size);
if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
kfree_skb(skb);
return -ENOMEM;
}
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
@ -2576,6 +2590,7 @@ static int skge_up(struct net_device *dev)
}
/* Initialize MAC */
netif_carrier_off(dev);
spin_lock_bh(&hw->phy_lock);
if (is_genesis(hw))
genesis_mac_init(hw, port);
@ -2728,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *td;
int i;
u32 control, len;
u64 map;
dma_addr_t map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
@ -2742,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(hw->pdev, map))
goto mapping_error;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
td->dma_lo = map;
td->dma_hi = map >> 32;
td->dma_lo = lower_32_bits(map);
td->dma_hi = upper_32_bits(map);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
@ -2777,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(&hw->pdev->dev, map))
goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
tf->dma_lo = lower_32_bits(map);
tf->dma_hi = upper_32_bits(map);
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
@ -2797,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
wmb();
netdev_sent_queue(dev, skb->len);
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@ -2812,15 +2834,35 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
}
return NETDEV_TX_OK;
mapping_unwind:
/* unroll any pages that were already mapped. */
if (e != skge->tx_ring.to_use) {
struct skge_element *u;
for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
dma_unmap_len(u, maplen),
PCI_DMA_TODEVICE);
e = skge->tx_ring.to_use;
}
/* undo the mapping for the skb header */
pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
mapping_error:
/* mapping error causes error message and packet to be discarded. */
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* Free resources associated with this reing element */
static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
u32 control)
static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
u32 control)
{
struct pci_dev *pdev = skge->hw->pdev;
/* skb header vs. fragment */
if (control & BMU_STF)
pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@ -2830,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
"tx done slot %td\n", e - skge->tx_ring.start);
dev_kfree_skb(e->skb);
}
}
/* Free all buffers in transmit ring */
@ -2847,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)
for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
skge_tx_free(skge, e, td->control);
skge_tx_unmap(skge->hw->pdev, e, td->control);
if (td->control & BMU_EOF)
dev_kfree_skb(e->skb);
td->control = 0;
}
netdev_reset_queue(dev);
skge->tx_ring.to_clean = e;
}
@ -3059,13 +3099,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
dev_kfree_skb(nskb);
goto resubmit;
}
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
@ -3111,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
unsigned int bytes_compl = 0, pkts_compl = 0;
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
@ -3120,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
if (control & BMU_OWN)
break;
skge_tx_free(skge, e, control);
skge_tx_unmap(skge->hw->pdev, e, control);
if (control & BMU_EOF) {
netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
"tx done slot %td\n",
e - skge->tx_ring.start);
pkts_compl++;
bytes_compl += e->skb->len;
dev_kfree_skb(e->skb);
}
}
netdev_completed_queue(dev, pkts_compl, bytes_compl);
skge->tx_ring.to_clean = e;
/* Can run lockless until we need to synchronize to restart queue. */

View File

@ -1247,6 +1247,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u32 reply;
u32 slave_status = 0;
u8 is_going_down = 0;
int i;
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
@ -1258,6 +1259,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
slave_state[slave].event_eq[i].token = 0;
}
/*check if we are in the middle of FLR process,
if so return "retry" status to the slave*/
if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
@ -1452,7 +1457,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state;
int i, err, port;
int i, j, err, port;
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
@ -1485,6 +1490,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_write);
__raw_writel((__force u32) 0,

View File

@ -96,7 +96,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
@ -111,7 +111,7 @@ static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}

View File

@ -183,10 +183,11 @@ static int mlx4_en_set_wol(struct net_device *netdev,
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int bit_count = hweight64(priv->stats_bitmap);
switch (sset) {
case ETH_SS_STATS:
return NUM_ALL_STATS +
return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num + priv->rx_ring_num) * 2;
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
@ -201,14 +202,34 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
int i;
int i, j = 0;
spin_lock_bh(&priv->stats_lock);
for (i = 0; i < NUM_MAIN_STATS; i++)
data[index++] = ((unsigned long *) &priv->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++)
data[index++] = ((unsigned long *) &priv->port_stats)[i];
if (!(priv->stats_bitmap)) {
for (i = 0; i < NUM_MAIN_STATS; i++)
data[index++] =
((unsigned long *) &priv->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++)
data[index++] =
((unsigned long *) &priv->port_stats)[i];
for (i = 0; i < NUM_PKT_STATS; i++)
data[index++] =
((unsigned long *) &priv->pkstats)[i];
} else {
for (i = 0; i < NUM_MAIN_STATS; i++) {
if ((priv->stats_bitmap >> j) & 1)
data[index++] =
((unsigned long *) &priv->stats)[i];
j++;
}
for (i = 0; i < NUM_PORT_STATS; i++) {
if ((priv->stats_bitmap >> j) & 1)
data[index++] =
((unsigned long *) &priv->port_stats)[i];
j++;
}
}
for (i = 0; i < priv->tx_ring_num; i++) {
data[index++] = priv->tx_ring[i].packets;
data[index++] = priv->tx_ring[i].bytes;
@ -217,8 +238,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
}
for (i = 0; i < NUM_PKT_STATS; i++)
data[index++] = ((unsigned long *) &priv->pkstats)[i];
spin_unlock_bh(&priv->stats_lock);
}
@ -247,11 +266,29 @@ static void mlx4_en_get_strings(struct net_device *dev,
case ETH_SS_STATS:
/* Add main counters */
for (i = 0; i < NUM_MAIN_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
for (i = 0; i< NUM_PORT_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS]);
if (!priv->stats_bitmap) {
for (i = 0; i < NUM_MAIN_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i]);
for (i = 0; i < NUM_PORT_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i +
NUM_MAIN_STATS]);
for (i = 0; i < NUM_PKT_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i +
NUM_MAIN_STATS +
NUM_PORT_STATS]);
} else
for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
if ((priv->stats_bitmap >> i) & 1) {
strcpy(data +
(index++) * ETH_GSTRING_LEN,
main_strings[i]);
}
if (!(priv->stats_bitmap >> i))
break;
}
for (i = 0; i < priv->tx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
@ -264,9 +301,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
}
for (i = 0; i< NUM_PKT_STATS; i++)
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
break;
}
}
@ -479,6 +513,95 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_pending = priv->tx_ring[0].size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return priv->rx_ring_num;
}
static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
int rss_rings;
size_t n = priv->rx_ring_num;
int err = 0;
rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
while (n--) {
ring_index[n] = rss_map->qps[n % rss_rings].qpn -
rss_map->base_qpn;
}
return err;
}
static int mlx4_en_set_rxfh_indir(struct net_device *dev,
const u32 *ring_index)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
int i;
int rss_rings = 0;
/* Calculate RSS table size and make sure flows are spread evenly
* between rings
*/
for (i = 0; i < priv->rx_ring_num; i++) {
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
return -EINVAL;
}
if (!rss_rings)
rss_rings = priv->rx_ring_num;
/* RSS table size must be an order of 2 */
if (!is_power_of_2(rss_rings))
return -EINVAL;
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev);
}
priv->prof->rss_rings = rss_rings;
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_ring_num;
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@ -498,6 +621,10 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
};

View File

@ -62,10 +62,6 @@ static const char mlx4_en_version[] =
* Device scope module parameters
*/
/* Enable RSS TCP traffic */
MLX4_EN_PARM_INT(tcp_rss, 1,
"Enable RSS for incomming TCP traffic or disabled (0)");
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
"Enable RSS for incomming UDP traffic or disabled (0)");
@ -104,7 +100,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
struct mlx4_en_profile *params = &mdev->profile;
int i;
params->tcp_rss = tcp_rss;
params->udp_rss = udp_rss;
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
@ -120,6 +115,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
params->prof[i].rss_rings = 0;
}
return 0;

View File

@ -702,6 +702,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
priv->port_up = true;
netif_tx_start_all_queues(dev);
return 0;
@ -807,12 +809,37 @@ static void mlx4_en_restart(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i;
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i].bytes = 0;
priv->tx_ring[i].packets = 0;
priv->tx_ring[i].tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i].bytes = 0;
priv->rx_ring[i].packets = 0;
priv->rx_ring[i].csum_ok = 0;
priv->rx_ring[i].csum_none = 0;
}
}
static int mlx4_en_open(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i;
int err = 0;
mutex_lock(&mdev->state_lock);
@ -823,21 +850,8 @@ static int mlx4_en_open(struct net_device *dev)
goto out;
}
/* Reset HW statistics and performance counters */
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i].bytes = 0;
priv->tx_ring[i].packets = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i].bytes = 0;
priv->rx_ring[i].packets = 0;
}
/* Reset HW statistics and SW counters */
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
if (err)

View File

@ -853,6 +853,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
struct mlx4_rss_context *rss_context;
int rss_rings;
void *ptr;
u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
MLX4_RSS_TCP_IPV6);
@ -893,10 +894,15 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num;
else
rss_rings = priv->prof->rss_rings;
ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
if (priv->mdev->profile.udp_rss) {

View File

@ -513,25 +513,22 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq_info *event_eq =
&priv->mfunc.master.slave_state[slave].event_eq;
priv->mfunc.master.slave_state[slave].event_eq;
u32 in_modifier = vhcr->in_modifier;
u32 eqn = in_modifier & 0x1FF;
u64 in_param = vhcr->in_param;
int err = 0;
int i;
if (slave == dev->caps.function)
err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
if (!err) {
if (in_modifier >> 31) {
/* unmap */
event_eq->event_type &= ~in_param;
} else {
event_eq->eqn = eqn;
event_eq->event_type = in_param;
}
}
if (!err)
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
if (in_param & (1LL << i))
event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
return err;
}
@ -546,7 +543,7 @@ static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
@ -554,7 +551,7 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}

View File

@ -158,7 +158,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
@ -182,9 +181,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = 1 << 7; /* enable only ethernet interface */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = slave;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
@ -249,9 +245,6 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
goto out;
}
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
func_cap->function = field;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;

View File

@ -119,7 +119,6 @@ struct mlx4_dev_cap {
};
struct mlx4_func_cap {
u8 function;
u8 num_ports;
u8 flags;
u32 pf_context_behaviour;

View File

@ -108,7 +108,7 @@ static struct mlx4_profile default_profile = {
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
.num_mtt = 1 << 20,
.num_mtt = 1 << 20, /* It is really num mtt segements */
};
static int log_num_mac = 7;
@ -471,7 +471,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENOSYS;
}
dev->caps.function = func_cap.function;
dev->caps.num_ports = func_cap.num_ports;
dev->caps.num_qps = func_cap.qp_quota;
dev->caps.num_srqs = func_cap.srq_quota;

View File

@ -388,9 +388,8 @@ struct mlx4_slave_eqe {
};
struct mlx4_slave_event_eq_info {
u32 eqn;
int eqn;
u16 token;
u64 event_type;
};
struct mlx4_profile {
@ -449,6 +448,8 @@ struct mlx4_steer_index {
struct list_head duplicates;
};
#define MLX4_EVENT_TYPES_NUM 64
struct mlx4_slave_state {
u8 comm_toggle;
u8 last_cmd;
@ -461,7 +462,8 @@ struct mlx4_slave_state {
struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
struct mlx4_slave_event_eq_info event_eq;
/* event type to eq number lookup */
struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM];
u16 eq_pi;
u16 eq_ci;
spinlock_t lock;

View File

@ -325,11 +325,11 @@ struct mlx4_en_port_profile {
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
};
struct mlx4_en_profile {
int rss_xor;
int tcp_rss;
int udp_rss;
u8 rss_mask;
u32 active_ports;
@ -476,6 +476,7 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
u64 stats_bitmap;
char *mc_addrs;
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;

View File

@ -291,7 +291,7 @@ static u32 key_to_hw_index(u32 key)
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
return mlx4_cmd(dev, mailbox->dma, mpt_index,
0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
}

View File

@ -52,8 +52,7 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
if (*pdn == -1)
return -ENOMEM;
if (mlx4_is_mfunc(dev))
*pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);

View File

@ -44,6 +44,11 @@
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
@ -898,6 +903,24 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
if (slave != dev->caps.function)
return 0;
return mlx4_common_dump_eth_stats(dev, slave,
vhcr->in_modifier, outbox);
}
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
{
if (!mlx4_is_mfunc(dev)) {
*stats_bitmap = 0;
return;
}
*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
MLX4_STATS_TRAFFIC_DROPS_MASK |
MLX4_STATS_PORT_COUNTERS_MASK);
if (mlx4_is_master(dev))
*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
}
EXPORT_SYMBOL(mlx4_set_stats_bitmap);

View File

@ -110,7 +110,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt;
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
profile[MLX4_RES_MCG].num = request->num_mcg;
for (i = 0; i < MLX4_RES_NUM; ++i) {

View File

@ -162,7 +162,7 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
ret = mlx4_cmd(dev, mailbox->dma,
qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);

View File

@ -1561,11 +1561,6 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
return be32_to_cpu(mpt->mtt_sz);
}
static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & 0xffffff;
}
static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
@ -1602,16 +1597,6 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
return total_pages;
}
static int qp_get_pdn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->pd) & 0xffffff;
}
static int pdn2slave(int pdn)
{
return (pdn >> NOT_MASKED_PD_BITS) - 1;
}
static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
int size, struct res_mtt *mtt)
{
@ -1656,11 +1641,6 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
mpt->mtt = mtt;
}
if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
err = -EPERM;
goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put;
@ -1792,11 +1772,6 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
if (err)
goto ex_put_mtt;
if (pdn2slave(qp_get_pdn(qpc)) != slave) {
err = -EPERM;
goto ex_put_mtt;
}
err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
if (err)
goto ex_put_mtt;
@ -2048,10 +2023,10 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
if (!priv->mfunc.master.slave_state)
return -EINVAL;
event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */
if ((event_eq->event_type & (1 << eqe->type)) == 0)
if (event_eq->eqn < 0)
return 0;
mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
@ -2289,11 +2264,6 @@ ex_put:
return err;
}
static int srq_get_pdn(struct mlx4_srq_context *srqc)
{
return be32_to_cpu(srqc->pd) & 0xffffff;
}
static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
{
int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
@ -2333,11 +2303,6 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
if (err)
goto ex_put_mtt;
if (pdn2slave(srq_get_pdn(srqc)) != slave) {
err = -EPERM;
goto ex_put_mtt;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put_mtt;

View File

@ -67,7 +67,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}

View File

@ -1745,6 +1745,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
int err;
/* Ensure we have a valid MAC */
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
pr_err("Error: Invalid MAC address\n");
return -EINVAL;
}
/* hardware has been reset, we need to reload some things */
pch_gbe_set_multi(netdev);
@ -2468,9 +2474,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
ret = -EIO;
goto err_free_adapter;
/*
* If the MAC is invalid (or just missing), display a warning
* but do not abort setting up the device. pch_gbe_up will
* prevent the interface from being brought up until a valid MAC
* is set.
*/
dev_err(&pdev->dev, "Invalid MAC address, "
"interface disabled.\n");
}
setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
(unsigned long)adapter);

View File

@ -154,7 +154,7 @@ int stmmac_mdio_register(struct net_device *ndev)
else
irqlist = priv->mii_irq;
new_bus->name = "STMMAC MII Bus";
new_bus->name = "stmmac";
new_bus->read = &stmmac_mdio_read;
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;

View File

@ -170,9 +170,9 @@ static int stmmac_pci_resume(struct pci_dev *pdev)
#define STMMAC_DEVICE_ID 0x1108
static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
{
PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
}
{PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
{}
};
MODULE_DEVICE_TABLE(pci, stmmac_id_table);

View File

@ -68,11 +68,11 @@ static void do_set_multicast(struct work_struct *w)
nvdev = hv_get_drvdata(ndevctx->device_ctx);
if (nvdev == NULL)
return;
goto out;
rdev = nvdev->extension;
if (rdev == NULL)
return;
goto out;
if (net->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
@ -83,6 +83,7 @@ static void do_set_multicast(struct work_struct *w)
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
out:
kfree(w);
}

View File

@ -173,6 +173,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
if (!skb)
return RX_HANDLER_CONSUMED;
eth = eth_hdr(skb);
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */

View File

@ -38,12 +38,11 @@
/**
* mdiobus_alloc_size - allocate a mii_bus structure
* @size: extra amount of memory to allocate for private storage.
* If non-zero, then bus->priv is points to that memory.
*
* Description: called by a bus driver to allocate an mii_bus
* structure to fill in.
*
* 'size' is an an extra amount of memory to allocate for private storage.
* If non-zero, then bus->priv is points to that memory.
*/
struct mii_bus *mdiobus_alloc_size(size_t size)
{

View File

@ -92,9 +92,9 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
return NULL;
}
int team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
{
int i;
struct team_option **dst_opts;
@ -116,8 +116,11 @@ int team_options_register(struct team *team,
}
}
for (i = 0; i < option_count; i++)
for (i = 0; i < option_count; i++) {
dst_opts[i]->changed = true;
dst_opts[i]->removed = false;
list_add_tail(&dst_opts[i]->list, &team->option_list);
}
kfree(dst_opts);
return 0;
@ -130,10 +133,22 @@ rollback:
return err;
}
EXPORT_SYMBOL(team_options_register);
static void __team_options_mark_removed(struct team *team,
const struct team_option *option,
size_t option_count)
{
int i;
static void __team_options_change_check(struct team *team,
struct team_option *changed_option);
for (i = 0; i < option_count; i++, option++) {
struct team_option *del_opt;
del_opt = __team_find_option(team, option->name);
if (del_opt) {
del_opt->changed = true;
del_opt->removed = true;
}
}
}
static void __team_options_unregister(struct team *team,
const struct team_option *option,
@ -152,12 +167,29 @@ static void __team_options_unregister(struct team *team,
}
}
static void __team_options_change_check(struct team *team);
int team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
{
int err;
err = __team_options_register(team, option, option_count);
if (err)
return err;
__team_options_change_check(team);
return 0;
}
EXPORT_SYMBOL(team_options_register);
void team_options_unregister(struct team *team,
const struct team_option *option,
size_t option_count)
{
__team_options_mark_removed(team, option, option_count);
__team_options_change_check(team);
__team_options_unregister(team, option, option_count);
__team_options_change_check(team, NULL);
}
EXPORT_SYMBOL(team_options_unregister);
@ -176,7 +208,8 @@ static int team_option_set(struct team *team, struct team_option *option,
if (err)
return err;
__team_options_change_check(team, option);
option->changed = true;
__team_options_change_check(team);
return err;
}
@ -653,6 +686,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
return -ENOENT;
}
port->removed = true;
__team_port_change_check(port, false);
team_port_list_del_port(team, port);
team_adjust_ops(team);
@ -1200,10 +1234,9 @@ err_fill:
return err;
}
static int team_nl_fill_options_get_changed(struct sk_buff *skb,
u32 pid, u32 seq, int flags,
struct team *team,
struct team_option *changed_option)
static int team_nl_fill_options_get(struct sk_buff *skb,
u32 pid, u32 seq, int flags,
struct team *team, bool fillall)
{
struct nlattr *option_list;
void *hdr;
@ -1223,12 +1256,19 @@ static int team_nl_fill_options_get_changed(struct sk_buff *skb,
struct nlattr *option_item;
long arg;
/* Include only changed options if fill all mode is not on */
if (!fillall && !option->changed)
continue;
option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
if (!option_item)
goto nla_put_failure;
NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
if (option == changed_option)
if (option->changed) {
NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
option->changed = false;
}
if (option->removed)
NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
switch (option->type) {
case TEAM_OPTION_TYPE_U32:
NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
@ -1255,13 +1295,13 @@ nla_put_failure:
return -EMSGSIZE;
}
static int team_nl_fill_options_get(struct sk_buff *skb,
struct genl_info *info, int flags,
struct team *team)
static int team_nl_fill_options_get_all(struct sk_buff *skb,
struct genl_info *info, int flags,
struct team *team)
{
return team_nl_fill_options_get_changed(skb, info->snd_pid,
info->snd_seq, NLM_F_ACK,
team, NULL);
return team_nl_fill_options_get(skb, info->snd_pid,
info->snd_seq, NLM_F_ACK,
team, true);
}
static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
@ -1273,7 +1313,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
if (!team)
return -EINVAL;
err = team_nl_send_generic(info, team, team_nl_fill_options_get);
err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
team_nl_team_put(team);
@ -1365,10 +1405,10 @@ team_put:
return err;
}
static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
u32 pid, u32 seq, int flags,
struct team *team,
struct team_port *changed_port)
static int team_nl_fill_port_list_get(struct sk_buff *skb,
u32 pid, u32 seq, int flags,
struct team *team,
bool fillall)
{
struct nlattr *port_list;
void *hdr;
@ -1387,12 +1427,19 @@ static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
list_for_each_entry(port, &team->port_list, list) {
struct nlattr *port_item;
/* Include only changed ports if fill all mode is not on */
if (!fillall && !port->changed)
continue;
port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
if (!port_item)
goto nla_put_failure;
NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
if (port == changed_port)
if (port->changed) {
NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
port->changed = false;
}
if (port->removed)
NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
if (port->linkup)
NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
@ -1408,13 +1455,13 @@ nla_put_failure:
return -EMSGSIZE;
}
static int team_nl_fill_port_list_get(struct sk_buff *skb,
struct genl_info *info, int flags,
struct team *team)
static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
struct genl_info *info, int flags,
struct team *team)
{
return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
info->snd_seq, NLM_F_ACK,
team, NULL);
return team_nl_fill_port_list_get(skb, info->snd_pid,
info->snd_seq, NLM_F_ACK,
team, true);
}
static int team_nl_cmd_port_list_get(struct sk_buff *skb,
@ -1427,7 +1474,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
if (!team)
return -EINVAL;
err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
team_nl_team_put(team);
@ -1464,8 +1511,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
};
static int team_nl_send_event_options_get(struct team *team,
struct team_option *changed_option)
static int team_nl_send_event_options_get(struct team *team)
{
struct sk_buff *skb;
int err;
@ -1475,8 +1521,7 @@ static int team_nl_send_event_options_get(struct team *team,
if (!skb)
return -ENOMEM;
err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
changed_option);
err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
if (err < 0)
goto err_fill;
@ -1489,18 +1534,17 @@ err_fill:
return err;
}
static int team_nl_send_event_port_list_get(struct team_port *port)
static int team_nl_send_event_port_list_get(struct team *team)
{
struct sk_buff *skb;
int err;
struct net *net = dev_net(port->team->dev);
struct net *net = dev_net(team->dev);
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
port->team, port);
err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
if (err < 0)
goto err_fill;
@ -1544,12 +1588,11 @@ static void team_nl_fini(void)
* Change checkers
******************/
static void __team_options_change_check(struct team *team,
struct team_option *changed_option)
static void __team_options_change_check(struct team *team)
{
int err;
err = team_nl_send_event_options_get(team, changed_option);
err = team_nl_send_event_options_get(team);
if (err)
netdev_warn(team->dev, "Failed to send options change via netlink\n");
}
@ -1559,9 +1602,10 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
{
int err;
if (port->linkup == linkup)
if (!port->removed && port->linkup == linkup)
return;
port->changed = true;
port->linkup = linkup;
if (linkup) {
struct ethtool_cmd ecmd;
@ -1577,7 +1621,7 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
port->duplex = 0;
send_event:
err = team_nl_send_event_port_list_get(port);
err = team_nl_send_event_port_list_get(port->team);
if (err)
netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
port->dev->name);

View File

@ -31,6 +31,12 @@ config B43_BCMA
depends on B43 && BCMA
default y
config B43_BCMA_EXTRA
bool "Hardware support that overlaps with the brcmsmac driver"
depends on B43_BCMA
default n if BRCMSMAC || BRCMSMAC_MODULE
default y
config B43_SSB
bool
depends on B43 && SSB

View File

@ -116,8 +116,10 @@ MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
#ifdef CONFIG_B43_BCMA
static const struct bcma_device_id b43_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
#ifdef CONFIG_B43_BCMA_EXTRA
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
#endif
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
BCMA_CORETABLE_END
};

View File

@ -7981,13 +7981,21 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
{
int timeout = 20;
/* flush packet queue when requested */
if (drop)
brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
/* wait for queue and DMA fifos to run dry */
while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0)
while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
brcms_msleep(wlc->wl, 1);
if (--timeout == 0)
break;
}
WARN_ON_ONCE(timeout == 0);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)

View File

@ -46,6 +46,10 @@ struct team_port {
u32 speed;
u8 duplex;
/* Custom gennetlink interface related flags */
bool changed;
bool removed;
struct rcu_head rcu;
};
@ -72,6 +76,10 @@ struct team_option {
enum team_option_type type;
int (*getter)(struct team *team, void *arg);
int (*setter)(struct team *team, void *arg);
/* Custom gennetlink interface related flags */
bool changed;
bool removed;
};
struct team_mode {
@ -207,6 +215,7 @@ enum {
TEAM_ATTR_OPTION_CHANGED, /* flag */
TEAM_ATTR_OPTION_TYPE, /* u8 */
TEAM_ATTR_OPTION_DATA, /* dynamic */
TEAM_ATTR_OPTION_REMOVED, /* flag */
__TEAM_ATTR_OPTION_MAX,
TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
@ -227,6 +236,7 @@ enum {
TEAM_ATTR_PORT_LINKUP, /* flag */
TEAM_ATTR_PORT_SPEED, /* u32 */
TEAM_ATTR_PORT_DUPLEX, /* u8 */
TEAM_ATTR_PORT_REMOVED, /* flag */
__TEAM_ATTR_PORT_MAX,
TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,

View File

@ -621,6 +621,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);

View File

@ -109,12 +109,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
*
* returns 0 on success and <0 if the counter->usage will exceed the
* counter->limit _locked call expects the counter->lock to be taken
*
* charge_nofail works the same, except that it charges the resource
* counter unconditionally, and returns < 0 if the after the current
* charge we are over limit.
*/
int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val);
int __must_check res_counter_charge(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
int __must_check res_counter_charge_nofail(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
/*
* uncharge - tell that some portion of the resource is released
@ -142,7 +148,10 @@ static inline unsigned long long res_counter_margin(struct res_counter *cnt)
unsigned long flags;
spin_lock_irqsave(&cnt->lock, flags);
margin = cnt->limit - cnt->usage;
if (cnt->limit > cnt->usage)
margin = cnt->limit - cnt->usage;
else
margin = 0;
spin_unlock_irqrestore(&cnt->lock, flags);
return margin;
}

View File

@ -192,7 +192,6 @@ enum
LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */
LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */
LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */
LINUX_MIB_TCPLOSS, /* TCPLoss */
LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */
LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */
LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */

View File

@ -1388,6 +1388,6 @@ struct hci_inquiry_req {
};
#define IREQ_CACHE_FLUSH 0x0001
extern int enable_hs;
extern bool enable_hs;
#endif /* __HCI_H */

View File

@ -13,7 +13,6 @@
#ifndef _NETPRIO_CGROUP_H
#define _NETPRIO_CGROUP_H
#include <linux/module.h>
#include <linux/cgroup.h>
#include <linux/hardirq.h>
#include <linux/rcupdate.h>

View File

@ -226,6 +226,7 @@ struct cg_proto;
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_priority: %SO_PRIORITY setting
* @sk_cgrp_prioidx: socket group's priority map index
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
* @sk_peer_pid: &struct pid for this socket's peer
@ -921,7 +922,7 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
extern struct jump_label_key memcg_socket_limit_enabled;
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
struct cg_proto *cg_proto)
@ -1007,9 +1008,8 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
struct res_counter *fail;
int ret;
ret = res_counter_charge(prot->memory_allocated,
amt << PAGE_SHIFT, &fail);
ret = res_counter_charge_nofail(prot->memory_allocated,
amt << PAGE_SHIFT, &fail);
if (ret < 0)
*parent_status = OVER_LIMIT;
}
@ -1053,12 +1053,11 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
}
static inline void
sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
sk_memory_allocated_sub(struct sock *sk, int amt)
{
struct proto *prot = sk->sk_prot;
if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
memcg_memory_allocated_sub(sk->sk_cgrp, amt);
atomic_long_sub(amt, prot->memory_allocated);

View File

@ -66,6 +66,31 @@ done:
return ret;
}
int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
struct res_counter **limit_fail_at)
{
int ret, r;
unsigned long flags;
struct res_counter *c;
r = ret = 0;
*limit_fail_at = NULL;
local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
r = res_counter_charge_locked(c, val);
if (r)
c->usage += val;
spin_unlock(&c->lock);
if (r < 0 && ret == 0) {
*limit_fail_at = c;
ret = r;
}
}
local_irq_restore(flags);
return ret;
}
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
{
if (WARN_ON(counter->usage < val))

View File

@ -379,7 +379,7 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
void sock_update_memcg(struct sock *sk)
{
if (static_branch(&memcg_socket_limit_enabled)) {
if (mem_cgroup_sockets_enabled) {
struct mem_cgroup *memcg;
BUG_ON(!sk->sk_prot->proto_cgroup);
@ -411,7 +411,7 @@ EXPORT_SYMBOL(sock_update_memcg);
void sock_release_memcg(struct sock *sk)
{
if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
struct mem_cgroup *memcg;
WARN_ON(!sk->sk_cgrp->memcg);
memcg = sk->sk_cgrp->memcg;

View File

@ -55,7 +55,7 @@
#define AUTO_OFF_TIMEOUT 2000
int enable_hs;
bool enable_hs;
static void hci_rx_work(struct work_struct *work);
static void hci_cmd_work(struct work_struct *work);

View File

@ -1311,6 +1311,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
case ETHTOOL_GSG:
case ETHTOOL_GSSET_INFO:
case ETHTOOL_GSTRINGS:
case ETHTOOL_GTSO:
case ETHTOOL_GPERMADDR:

View File

@ -1,4 +1,5 @@
#include <linux/skbuff.h>
#include <linux/export.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>

View File

@ -767,8 +767,8 @@ done:
return i;
}
static unsigned long num_arg(const char __user * user_buffer,
unsigned long maxlen, unsigned long *num)
static long num_arg(const char __user *user_buffer, unsigned long maxlen,
unsigned long *num)
{
int i;
*num = 0;

View File

@ -1827,7 +1827,7 @@ suppress_allocation:
/* Alas. Undo changes. */
sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
sk_memory_allocated_sub(sk, amt, parent_status);
sk_memory_allocated_sub(sk, amt);
return 0;
}
@ -1840,7 +1840,7 @@ EXPORT_SYMBOL(__sk_mem_schedule);
void __sk_mem_reclaim(struct sock *sk)
{
sk_memory_allocated_sub(sk,
sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
if (sk_under_memory_pressure(sk) &&

View File

@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),

View File

@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
ca->loss_cwnd = 0;
ca->last_cwnd = 0;
ca->last_time = 0;
ca->epoch_start = 0;
@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca)
static void bictcp_init(struct sock *sk)
{
bictcp_reset(inet_csk_ca(sk));
struct bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca);
ca->loss_cwnd = 0;
if (initial_ssthresh)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
}
/* if in slow start or link utilization is very low */
if (ca->loss_cwnd == 0) {
if (ca->last_max_cwnd == 0) {
if (ca->cnt > 20) /* increase cwnd 5% per RTT */
ca->cnt = 20;
}
@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct bictcp *ca = inet_csk_ca(sk);
return max(tp->snd_cwnd, ca->last_max_cwnd);
return max(tp->snd_cwnd, ca->loss_cwnd);
}
static void bictcp_state(struct sock *sk, u8 new_state)

View File

@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
ca->loss_cwnd = 0;
ca->last_cwnd = 0;
ca->last_time = 0;
ca->bic_origin_point = 0;
@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk)
static void bictcp_init(struct sock *sk)
{
bictcp_reset(inet_csk_ca(sk));
struct bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca);
ca->loss_cwnd = 0;
if (hystart)
bictcp_hystart_reset(sk);
@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
* The initial growth of cubic function may be too conservative
* when the available bandwidth is still unknown.
*/
if (ca->loss_cwnd == 0 && ca->cnt > 20)
if (ca->last_max_cwnd == 0 && ca->cnt > 20)
ca->cnt = 20; /* increase cwnd 5% per RTT */
/* TCP Friendly */
@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
static void bictcp_state(struct sock *sk, u8 new_state)

View File

@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly;
#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
#define FLAG_DATA_SACKED 0x20 /* New SACK. */
#define FLAG_ECE 0x40 /* ECE in this ACK */
#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
* These 6 states form finite state machine, controlled by the following events:
* 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
* 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
* 3. Loss detection event of one of three flavors:
* 3. Loss detection event of two flavors:
* A. Scoreboard estimator decided the packet is lost.
* A'. Reno "three dupacks" marks head of queue lost.
* A''. Its FACK modfication, head until snd.fack is lost.
* B. SACK arrives sacking data transmitted after never retransmitted
* hole was sent out.
* C. SACK arrives sacking SND.NXT at the moment, when the
* A''. Its FACK modification, head until snd.fack is lost.
* B. SACK arrives sacking SND.NXT at the moment, when the
* segment was retransmitted.
* 4. D-SACK added new rule: D-SACK changes any tag to S.
*
@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
}
/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
* Event "C". Later note: FACK people cheated me again 8), we have to account
* Event "B". Later note: FACK people cheated me again 8), we have to account
* for reordering! Ugly, but should help.
*
* Search retransmitted skbs from write_queue that were sent when snd_nxt was
@ -1844,10 +1841,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (found_dup_sack && ((i + 1) == first_sack_index))
next_dup = &sp[i + 1];
/* Event "B" in the comment above. */
if (after(end_seq, tp->high_seq))
state.flag |= FLAG_DATA_LOST;
/* Skip too early cached blocks */
while (tcp_sack_cache_ok(tp, cache) &&
!before(start_seq, cache->end_seq))
@ -2515,8 +2508,11 @@ static void tcp_timeout_skbs(struct sock *sk)
tcp_verify_left_out(tp);
}
/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
* is against sacked "cnt", otherwise it's against facked "cnt"
/* Detect loss in event "A" above by marking head of queue up as lost.
* For FACK or non-SACK(Reno) senders, the first "packets" number of segments
* are considered lost. For RFC3517 SACK, a segment is considered lost if it
* has at least tp->reordering SACKed seqments above it; "packets" refers to
* the maximum SACKed segments to pass before reaching this limit.
*/
static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
{
@ -2525,6 +2521,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
int cnt, oldcnt;
int err;
unsigned int mss;
/* Use SACK to deduce losses of new sequences sent during recovery */
const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
WARN_ON(packets > tp->packets_out);
if (tp->lost_skb_hint) {
@ -2546,7 +2544,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
tp->lost_skb_hint = skb;
tp->lost_cnt_hint = cnt;
if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
break;
oldcnt = cnt;
@ -3033,19 +3031,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (tcp_check_sack_reneging(sk, flag))
return;
/* C. Process data loss notification, provided it is valid. */
if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
before(tp->snd_una, tp->high_seq) &&
icsk->icsk_ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
}
/* D. Check consistency of the current state. */
/* C. Check consistency of the current state. */
tcp_verify_left_out(tp);
/* E. Check state exit conditions. State can be terminated
/* D. Check state exit conditions. State can be terminated
* when high_seq is ACKed. */
if (icsk->icsk_ca_state == TCP_CA_Open) {
WARN_ON(tp->retrans_out != 0);
@ -3077,7 +3066,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
}
}
/* F. Process state. */
/* E. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
if (!(flag & FLAG_SND_UNA_ADVANCED)) {

View File

@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.iov[0].iov_len = sizeof(rep.th);
#ifdef CONFIG_TCP_MD5SIG
key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
if (key) {
rep.opt[0] = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |

View File

@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
rcu_read_unlock();
}
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
{
struct net *net;
int old;
if (!rtnl_trylock())
return restart_syscall();
net = (struct net *)table->extra2;
if (p == &net->ipv6.devconf_dflt->forwarding)
return 0;
old = *p;
*p = newf;
if (!rtnl_trylock()) {
/* Restore the original values before restarting */
*p = old;
return restart_syscall();
if (p == &net->ipv6.devconf_dflt->forwarding) {
rtnl_unlock();
return 0;
}
if (p == &net->ipv6.devconf_all->forwarding) {
__s32 newf = net->ipv6.devconf_all->forwarding;
net->ipv6.devconf_dflt->forwarding = newf;
addrconf_forward_change(net, newf);
} else if ((!*p) ^ (!old))
} else if ((!newf) ^ (!old))
dev_forward_change((struct inet6_dev *)table->extra1);
rtnl_unlock();
if (*p)
if (newf)
rt6_purge_dflt_routers(net);
return 1;
}
@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
ctl_table lctl;
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
/*
* ctl->data points to idev->cnf.forwarding, we should
* not modify it until we get the rtnl lock.
*/
lctl = *ctl;
lctl.data = &val;
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_fixup_forwarding(ctl, valp, val);
@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
{
struct net *net;
int old;
if (!rtnl_trylock())
return restart_syscall();
net = (struct net *)table->extra2;
old = *p;
*p = newf;
if (p == &net->ipv6.devconf_dflt->disable_ipv6)
if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
rtnl_unlock();
return 0;
if (!rtnl_trylock()) {
/* Restore the original values before restarting */
*p = old;
return restart_syscall();
}
if (p == &net->ipv6.devconf_all->disable_ipv6) {
__s32 newf = net->ipv6.devconf_all->disable_ipv6;
net->ipv6.devconf_dflt->disable_ipv6 = newf;
addrconf_disable_change(net, newf);
} else if ((!*p) ^ (!old))
} else if ((!newf) ^ (!old))
dev_disable_change((struct inet6_dev *)table->extra1);
rtnl_unlock();
@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
ctl_table lctl;
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
/*
* ctl->data points to idev->cnf.disable_ipv6, we should
* not modify it until we get the rtnl lock.
*/
lctl = *ctl;
lctl.data = &val;
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_disable_ipv6(ctl, valp, val);

View File

@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
#ifdef CONFIG_TCP_MD5SIG
if (sk)
key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
#endif
if (th->ack)

View File

@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq;
@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, 0);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
@ -859,7 +862,9 @@ copy_uaddr:
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, 0);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}

View File

@ -225,9 +225,9 @@ KEY_OPS(key);
key, &key_##name##_ops);
void ieee80211_debugfs_key_add(struct ieee80211_key *key)
{
{
static int keycount;
char buf[50];
char buf[100];
struct sta_info *sta;
if (!key->local->debugfs.keys)
@ -244,7 +244,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
sta = key->sta;
if (sta) {
sprintf(buf, "../../stations/%pM", sta->sta.addr);
sprintf(buf, "../../netdev:%s/stations/%pM",
sta->sdata->name, sta->sta.addr);
key->debugfs.stalink =
debugfs_create_symlink("station", key->debugfs.dir, buf);
}

View File

@ -119,12 +119,12 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
sizeof(mgmt->u.action.u.mesh_action);
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + 37); /* max HWMP IE */
if (!skb)
return -1;
skb_reserve(skb, local->hw.extra_tx_headroom);
skb_reserve(skb, local->tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
@ -250,12 +250,12 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
if (time_before(jiffies, ifmsh->next_perr))
return -EAGAIN;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + 15 /* PERR IE */);
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
skb_reserve(skb, local->tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |

View File

@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
sizeof(mgmt->u.action.u.self_prot);
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* capability info */
2 + /* AID */
@ -186,7 +186,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
sdata->u.mesh.ie_len);
if (!skb)
return -1;
skb_reserve(skb, local->hw.extra_tx_headroom);
skb_reserve(skb, local->tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |

View File

@ -2750,7 +2750,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_work *wk;
u8 bssid[ETH_ALEN];
bool assoc_bss = false;
@ -2763,30 +2762,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
assoc_bss = true;
} else {
bool not_auth_yet = false;
struct ieee80211_work *tmp, *wk = NULL;
mutex_unlock(&ifmgd->mtx);
mutex_lock(&local->mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
list_for_each_entry(tmp, &local->work_list, list) {
if (tmp->sdata != sdata)
continue;
if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
wk->type != IEEE80211_WORK_AUTH &&
wk->type != IEEE80211_WORK_ASSOC &&
wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
tmp->type != IEEE80211_WORK_AUTH &&
tmp->type != IEEE80211_WORK_ASSOC &&
tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
continue;
if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
continue;
not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
list_del_rcu(&wk->list);
free_work(wk);
not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
list_del_rcu(&tmp->list);
synchronize_rcu();
wk = tmp;
break;
}
mutex_unlock(&local->mtx);
if (wk && wk->type == IEEE80211_WORK_ASSOC) {
/* clean up dummy sta & TX sync */
sta_info_destroy_addr(wk->sdata, wk->filter_ta);
if (wk->assoc.synced)
drv_finish_tx_sync(local, wk->sdata,
wk->filter_ta,
IEEE80211_TX_SYNC_ASSOC);
} else if (wk && wk->type == IEEE80211_WORK_AUTH) {
if (wk->probe_auth.synced)
drv_finish_tx_sync(local, wk->sdata,
wk->filter_ta,
IEEE80211_TX_SYNC_AUTH);
}
kfree(wk);
/*
* If somebody requests authentication and we haven't
* sent out an auth frame yet there's no need to send

Some files were not shown because too many files have changed in this diff Show More