2009-10-13 15:15:51 +08:00
|
|
|
/*
|
|
|
|
* Linux driver for VMware's vmxnet3 ethernet NIC.
|
|
|
|
*
|
2016-06-17 01:51:53 +08:00
|
|
|
* Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
|
2009-10-13 15:15:51 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; version 2 of the License and no later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution in
|
|
|
|
* the file called "COPYING".
|
|
|
|
*
|
2016-06-17 01:51:53 +08:00
|
|
|
* Maintained by: pv-drivers@vmware.com
|
2009-10-13 15:15:51 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2011-07-04 03:21:01 +08:00
|
|
|
#include <linux/module.h>
|
2009-11-18 15:04:59 +08:00
|
|
|
#include <net/ip6_checksum.h>
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
#include "vmxnet3_int.h"
|
|
|
|
|
|
|
|
char vmxnet3_driver_name[] = "vmxnet3";
|
|
|
|
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PCI Device ID Table
|
|
|
|
* Last entry must be all 0s
|
|
|
|
*/
|
2014-08-08 21:56:03 +08:00
|
|
|
static const struct pci_device_id vmxnet3_pciid_table[] = {
|
2009-10-13 15:15:51 +08:00
|
|
|
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
|
|
|
|
{0}
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
static int enable_mq = 1;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-01-14 22:59:31 +08:00
|
|
|
static void
|
|
|
|
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/*
|
|
|
|
* Enable/Disable the given intr
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
|
|
|
|
{
|
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
|
|
|
|
{
|
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable/Disable all intrs used by the device
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->intr.num_intrs; i++)
|
|
|
|
vmxnet3_enable_intr(adapter, i);
|
2010-07-16 13:18:47 +08:00
|
|
|
adapter->shared->devRead.intrConf.intrCtrl &=
|
|
|
|
cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2010-07-16 13:18:47 +08:00
|
|
|
adapter->shared->devRead.intrConf.intrCtrl |=
|
|
|
|
cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
|
2009-10-13 15:15:51 +08:00
|
|
|
for (i = 0; i < adapter->intr.num_intrs; i++)
|
|
|
|
vmxnet3_disable_intr(adapter, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
|
|
|
|
{
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
return tq->stopped;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
tq->stopped = false;
|
2010-11-19 18:55:24 +08:00
|
|
|
netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
tq->stopped = false;
|
2010-11-19 18:55:24 +08:00
|
|
|
netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
tq->stopped = true;
|
|
|
|
tq->num_stop++;
|
2010-11-19 18:55:24 +08:00
|
|
|
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the link state. This may start or stop the tx queue.
|
|
|
|
*/
|
|
|
|
static void
|
2010-07-16 05:51:14 +08:00
|
|
|
vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
|
|
|
u32 ret;
|
2010-11-19 18:55:24 +08:00
|
|
|
int i;
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
|
|
|
|
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
adapter->link_speed = ret >> 16;
|
|
|
|
if (ret & 1) { /* Link is up. */
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
|
|
|
|
adapter->link_speed);
|
2013-01-30 05:15:45 +08:00
|
|
|
netif_carrier_on(adapter->netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
if (affectTxQueue) {
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
vmxnet3_tq_start(&adapter->tx_queue[i],
|
|
|
|
adapter);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_info(adapter->netdev, "NIC Link is Down\n");
|
2013-01-30 05:15:45 +08:00
|
|
|
netif_carrier_off(adapter->netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
if (affectTxQueue) {
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int i;
|
vmxnet3: Consistently disable irqs when taking adapter->cmd_lock
Using the vmxnet3 driver produces a lockdep warning because
vmxnet3_set_mc(), which is called with mc->mca_lock held, takes
adapter->cmd_lock. However, there are a couple of places where
adapter->cmd_lock is taken with softirqs enabled, lockdep warns that a
softirq that tries to take mc->mca_lock could happen while
adapter->cmd_lock is held, leading to an AB-BA deadlock.
I'm not sure if this is a real potential deadlock or not, but the
simplest and best fix seems to be simply to make sure we take cmd_lock
with spin_lock_irqsave() everywhere -- the places with plain spin_lock
just look like oversights.
The full enormous lockdep warning is:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.39-rc6+ #1
---------------------------------------------------------
ifconfig/567 just changed the state of lock:
(&(&mc->mca_lock)->rlock){+.-...}, at: [<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
but this lock took another, SOFTIRQ-unsafe lock in the past:
(&(&adapter->cmd_lock)->rlock){+.+...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
4 locks held by ifconfig/567:
#0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8147d547>] rtnl_lock+0x17/0x20
#1: ((inetaddr_chain).rwsem){.+.+.+}, at: [<ffffffff810896cf>] __blocking_notifier_call_chain+0x5f/0xb0
#2: (&idev->mc_ifc_timer){+.-...}, at: [<ffffffff8106f21b>] run_timer_softirq+0xeb/0x3f0
#3: (&ndev->lock){++.-..}, at: [<ffffffff81531dd2>] mld_ifc_timer_expire+0x32/0x280
the shortest dependencies between 2nd lock and 1st lock:
-> (&(&adapter->cmd_lock)->rlock){+.+...} ops: 11 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
SOFTIRQ-ON-W at:
[<ffffffff8109adb7>] __lock_acquire+0x827/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffffa0017590>] __key.42516+0x0/0xffffffffffffda70 [vmxnet3]
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571bb5>] _raw_spin_lock_irqsave+0x55/0xa0
[<ffffffffa000de27>] vmxnet3_set_mc+0x97/0x1a0 [vmxnet3]
[<ffffffff8146ffa0>] __dev_set_rx_mode+0x40/0xb0
[<ffffffff81470040>] dev_set_rx_mode+0x30/0x50
[<ffffffff81470127>] __dev_open+0xc7/0x100
[<ffffffff814703c1>] __dev_change_flags+0xa1/0x180
[<ffffffff81470568>] dev_change_flags+0x28/0x70
[<ffffffff814da960>] devinet_ioctl+0x730/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (_xmit_ETHER){+.....} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffff827fd868>] netdev_addr_lock_key+0x8/0x1e0
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (&(&mc->mca_lock)->rlock){+.-...} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
IN-SOFTIRQ-W at:
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
}
... key at: [<ffffffff82801be2>] __key.40877+0x0/0x8
... acquired at:
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
stack backtrace:
Pid: 567, comm: ifconfig Not tainted 2.6.39-rc6+ #1
Call Trace:
<IRQ> [<ffffffff810996f6>] print_irq_inversion_bug+0x146/0x170
[<ffffffff81099720>] ? print_irq_inversion_bug+0x170/0x170
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109a383>] ? mark_lock+0x1f3/0x400
[<ffffffff8109b497>] ? __lock_acquire+0xf07/0x1e10
[<ffffffff81012255>] ? native_sched_clock+0x15/0x70
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8109759d>] ? lock_release_holdtime+0x3d/0x1a0
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8157170b>] ? _raw_spin_unlock+0x2b/0x40
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff8106f21b>] ? run_timer_softirq+0xeb/0x3f0
[<ffffffff810122b9>] ? sched_clock+0x9/0x10
[<ffffffff81531da0>] ? mld_gq_timer_expire+0x30/0x30
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8109455f>] ? tick_program_event+0x1f/0x30
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
<EOI> [<ffffffff81571f14>] ? retint_restore_args+0x13/0x13
[<ffffffff810974a7>] ? lock_is_held+0x17/0xd0
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff8108a3af>] ? local_clock+0x6f/0x80
[<ffffffff81575898>] ? do_page_fault+0x268/0x560
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff810dfe87>] ? __call_rcu+0xa7/0x190
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff8117737e>] ? fget_light+0x33e/0x430
[<ffffffff81571ef9>] ? retint_swapgs+0x13/0x1b
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: Scott J. Goldman <scottjg@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-06 16:32:53 +08:00
|
|
|
unsigned long flags;
|
2009-11-16 21:41:33 +08:00
|
|
|
u32 events = le32_to_cpu(adapter->shared->ecr);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!events)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vmxnet3_ack_events(adapter, events);
|
|
|
|
|
|
|
|
/* Check if link state has changed */
|
|
|
|
if (events & VMXNET3_ECR_LINK)
|
2010-07-16 05:51:14 +08:00
|
|
|
vmxnet3_check_link(adapter, true);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* Check if there is an error on xmit/recv queues */
|
|
|
|
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
|
vmxnet3: Consistently disable irqs when taking adapter->cmd_lock
Using the vmxnet3 driver produces a lockdep warning because
vmxnet3_set_mc(), which is called with mc->mca_lock held, takes
adapter->cmd_lock. However, there are a couple of places where
adapter->cmd_lock is taken with softirqs enabled, lockdep warns that a
softirq that tries to take mc->mca_lock could happen while
adapter->cmd_lock is held, leading to an AB-BA deadlock.
I'm not sure if this is a real potential deadlock or not, but the
simplest and best fix seems to be simply to make sure we take cmd_lock
with spin_lock_irqsave() everywhere -- the places with plain spin_lock
just look like oversights.
The full enormous lockdep warning is:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.39-rc6+ #1
---------------------------------------------------------
ifconfig/567 just changed the state of lock:
(&(&mc->mca_lock)->rlock){+.-...}, at: [<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
but this lock took another, SOFTIRQ-unsafe lock in the past:
(&(&adapter->cmd_lock)->rlock){+.+...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
4 locks held by ifconfig/567:
#0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8147d547>] rtnl_lock+0x17/0x20
#1: ((inetaddr_chain).rwsem){.+.+.+}, at: [<ffffffff810896cf>] __blocking_notifier_call_chain+0x5f/0xb0
#2: (&idev->mc_ifc_timer){+.-...}, at: [<ffffffff8106f21b>] run_timer_softirq+0xeb/0x3f0
#3: (&ndev->lock){++.-..}, at: [<ffffffff81531dd2>] mld_ifc_timer_expire+0x32/0x280
the shortest dependencies between 2nd lock and 1st lock:
-> (&(&adapter->cmd_lock)->rlock){+.+...} ops: 11 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
SOFTIRQ-ON-W at:
[<ffffffff8109adb7>] __lock_acquire+0x827/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffffa0017590>] __key.42516+0x0/0xffffffffffffda70 [vmxnet3]
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571bb5>] _raw_spin_lock_irqsave+0x55/0xa0
[<ffffffffa000de27>] vmxnet3_set_mc+0x97/0x1a0 [vmxnet3]
[<ffffffff8146ffa0>] __dev_set_rx_mode+0x40/0xb0
[<ffffffff81470040>] dev_set_rx_mode+0x30/0x50
[<ffffffff81470127>] __dev_open+0xc7/0x100
[<ffffffff814703c1>] __dev_change_flags+0xa1/0x180
[<ffffffff81470568>] dev_change_flags+0x28/0x70
[<ffffffff814da960>] devinet_ioctl+0x730/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (_xmit_ETHER){+.....} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffff827fd868>] netdev_addr_lock_key+0x8/0x1e0
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (&(&mc->mca_lock)->rlock){+.-...} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
IN-SOFTIRQ-W at:
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
}
... key at: [<ffffffff82801be2>] __key.40877+0x0/0x8
... acquired at:
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
stack backtrace:
Pid: 567, comm: ifconfig Not tainted 2.6.39-rc6+ #1
Call Trace:
<IRQ> [<ffffffff810996f6>] print_irq_inversion_bug+0x146/0x170
[<ffffffff81099720>] ? print_irq_inversion_bug+0x170/0x170
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109a383>] ? mark_lock+0x1f3/0x400
[<ffffffff8109b497>] ? __lock_acquire+0xf07/0x1e10
[<ffffffff81012255>] ? native_sched_clock+0x15/0x70
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8109759d>] ? lock_release_holdtime+0x3d/0x1a0
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8157170b>] ? _raw_spin_unlock+0x2b/0x40
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff8106f21b>] ? run_timer_softirq+0xeb/0x3f0
[<ffffffff810122b9>] ? sched_clock+0x9/0x10
[<ffffffff81531da0>] ? mld_gq_timer_expire+0x30/0x30
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8109455f>] ? tick_program_event+0x1f/0x30
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
<EOI> [<ffffffff81571f14>] ? retint_restore_args+0x13/0x13
[<ffffffff810974a7>] ? lock_is_held+0x17/0xd0
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff8108a3af>] ? local_clock+0x6f/0x80
[<ffffffff81575898>] ? do_page_fault+0x268/0x560
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff810dfe87>] ? __call_rcu+0xa7/0x190
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff8117737e>] ? fget_light+0x33e/0x430
[<ffffffff81571ef9>] ? retint_swapgs+0x13/0x1b
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: Scott J. Goldman <scottjg@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-06 16:32:53 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_GET_QUEUE_STATUS);
|
vmxnet3: Consistently disable irqs when taking adapter->cmd_lock
Using the vmxnet3 driver produces a lockdep warning because
vmxnet3_set_mc(), which is called with mc->mca_lock held, takes
adapter->cmd_lock. However, there are a couple of places where
adapter->cmd_lock is taken with softirqs enabled, lockdep warns that a
softirq that tries to take mc->mca_lock could happen while
adapter->cmd_lock is held, leading to an AB-BA deadlock.
I'm not sure if this is a real potential deadlock or not, but the
simplest and best fix seems to be simply to make sure we take cmd_lock
with spin_lock_irqsave() everywhere -- the places with plain spin_lock
just look like oversights.
The full enormous lockdep warning is:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.39-rc6+ #1
---------------------------------------------------------
ifconfig/567 just changed the state of lock:
(&(&mc->mca_lock)->rlock){+.-...}, at: [<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
but this lock took another, SOFTIRQ-unsafe lock in the past:
(&(&adapter->cmd_lock)->rlock){+.+...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
4 locks held by ifconfig/567:
#0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8147d547>] rtnl_lock+0x17/0x20
#1: ((inetaddr_chain).rwsem){.+.+.+}, at: [<ffffffff810896cf>] __blocking_notifier_call_chain+0x5f/0xb0
#2: (&idev->mc_ifc_timer){+.-...}, at: [<ffffffff8106f21b>] run_timer_softirq+0xeb/0x3f0
#3: (&ndev->lock){++.-..}, at: [<ffffffff81531dd2>] mld_ifc_timer_expire+0x32/0x280
the shortest dependencies between 2nd lock and 1st lock:
-> (&(&adapter->cmd_lock)->rlock){+.+...} ops: 11 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
SOFTIRQ-ON-W at:
[<ffffffff8109adb7>] __lock_acquire+0x827/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffffa0017590>] __key.42516+0x0/0xffffffffffffda70 [vmxnet3]
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571bb5>] _raw_spin_lock_irqsave+0x55/0xa0
[<ffffffffa000de27>] vmxnet3_set_mc+0x97/0x1a0 [vmxnet3]
[<ffffffff8146ffa0>] __dev_set_rx_mode+0x40/0xb0
[<ffffffff81470040>] dev_set_rx_mode+0x30/0x50
[<ffffffff81470127>] __dev_open+0xc7/0x100
[<ffffffff814703c1>] __dev_change_flags+0xa1/0x180
[<ffffffff81470568>] dev_change_flags+0x28/0x70
[<ffffffff814da960>] devinet_ioctl+0x730/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (_xmit_ETHER){+.....} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffff827fd868>] netdev_addr_lock_key+0x8/0x1e0
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (&(&mc->mca_lock)->rlock){+.-...} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
IN-SOFTIRQ-W at:
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
}
... key at: [<ffffffff82801be2>] __key.40877+0x0/0x8
... acquired at:
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
stack backtrace:
Pid: 567, comm: ifconfig Not tainted 2.6.39-rc6+ #1
Call Trace:
<IRQ> [<ffffffff810996f6>] print_irq_inversion_bug+0x146/0x170
[<ffffffff81099720>] ? print_irq_inversion_bug+0x170/0x170
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109a383>] ? mark_lock+0x1f3/0x400
[<ffffffff8109b497>] ? __lock_acquire+0xf07/0x1e10
[<ffffffff81012255>] ? native_sched_clock+0x15/0x70
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8109759d>] ? lock_release_holdtime+0x3d/0x1a0
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8157170b>] ? _raw_spin_unlock+0x2b/0x40
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff8106f21b>] ? run_timer_softirq+0xeb/0x3f0
[<ffffffff810122b9>] ? sched_clock+0x9/0x10
[<ffffffff81531da0>] ? mld_gq_timer_expire+0x30/0x30
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8109455f>] ? tick_program_event+0x1f/0x30
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
<EOI> [<ffffffff81571f14>] ? retint_restore_args+0x13/0x13
[<ffffffff810974a7>] ? lock_is_held+0x17/0xd0
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff8108a3af>] ? local_clock+0x6f/0x80
[<ffffffff81575898>] ? do_page_fault+0x268/0x560
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff810dfe87>] ? __call_rcu+0xa7/0x190
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff8117737e>] ? fget_light+0x33e/0x430
[<ffffffff81571ef9>] ? retint_swapgs+0x13/0x1b
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: Scott J. Goldman <scottjg@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-06 16:32:53 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
if (adapter->tqd_start[i].status.stopped)
|
|
|
|
dev_err(&adapter->netdev->dev,
|
|
|
|
"%s: tq[%d] error 0x%x\n",
|
|
|
|
adapter->netdev->name, i, le32_to_cpu(
|
|
|
|
adapter->tqd_start[i].status.error));
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
if (adapter->rqd_start[i].status.stopped)
|
|
|
|
dev_err(&adapter->netdev->dev,
|
|
|
|
"%s: rq[%d] error 0x%x\n",
|
|
|
|
adapter->netdev->name, i,
|
|
|
|
adapter->rqd_start[i].status.error);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
schedule_work(&adapter->work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
/*
|
|
|
|
* The device expects the bitfields in shared structures to be written in
|
|
|
|
* little endian. When CPU is big endian, the following routines are used to
|
|
|
|
* correctly read and write into ABI.
|
|
|
|
* The general technique used here is : double word bitfields are defined in
|
|
|
|
* opposite order for big endian architecture. Then before reading them in
|
|
|
|
* driver the complete double word is translated using le32_to_cpu. Similarly
|
|
|
|
* After the driver writes into bitfields, cpu_to_le32 is used to translate the
|
|
|
|
* double words into required format.
|
|
|
|
* In order to avoid touching bits in shared structure more than once, temporary
|
|
|
|
* descriptors are used. These are passed as srcDesc to following functions.
|
|
|
|
*/
|
|
|
|
static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
|
|
|
|
struct Vmxnet3_RxDesc *dstDesc)
|
|
|
|
{
|
|
|
|
u32 *src = (u32 *)srcDesc + 2;
|
|
|
|
u32 *dst = (u32 *)dstDesc + 2;
|
|
|
|
dstDesc->addr = le64_to_cpu(srcDesc->addr);
|
|
|
|
*dst = le32_to_cpu(*src);
|
|
|
|
dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
|
|
|
|
struct Vmxnet3_TxDesc *dstDesc)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 *src = (u32 *)(srcDesc + 1);
|
|
|
|
u32 *dst = (u32 *)(dstDesc + 1);
|
|
|
|
|
|
|
|
/* Working backwards so that the gen bit is set at the end. */
|
|
|
|
for (i = 2; i > 0; i--) {
|
|
|
|
src--;
|
|
|
|
dst--;
|
|
|
|
*dst = cpu_to_le32(*src);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
|
|
|
|
struct Vmxnet3_RxCompDesc *dstDesc)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
u32 *src = (u32 *)srcDesc;
|
|
|
|
u32 *dst = (u32 *)dstDesc;
|
|
|
|
for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
|
|
|
|
*dst = le32_to_cpu(*src);
|
|
|
|
src++;
|
|
|
|
dst++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Used to read bitfield values from double words. */
|
|
|
|
static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
|
|
|
|
{
|
|
|
|
u32 temp = le32_to_cpu(*bitfield);
|
|
|
|
u32 mask = ((1 << size) - 1) << pos;
|
|
|
|
temp &= mask;
|
|
|
|
temp >>= pos;
|
|
|
|
return temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#endif /* __BIG_ENDIAN_BITFIELD */
|
|
|
|
|
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
|
|
|
|
# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
|
|
|
|
txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
|
|
|
|
VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
|
|
|
|
# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
|
|
|
|
txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
|
|
|
|
VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
|
|
|
|
# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
|
|
|
|
VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
|
|
|
|
VMXNET3_TCD_GEN_SIZE)
|
|
|
|
# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
|
|
|
|
VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
|
|
|
|
# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
|
|
|
|
(dstrcd) = (tmp); \
|
|
|
|
vmxnet3_RxCompToCPU((rcd), (tmp)); \
|
|
|
|
} while (0)
|
|
|
|
# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
|
|
|
|
(dstrxd) = (tmp); \
|
|
|
|
vmxnet3_RxDescToCPU((rxd), (tmp)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
|
|
|
|
# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
|
|
|
|
# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
|
|
|
|
# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
|
|
|
|
# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
|
|
|
|
# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
|
|
|
|
|
|
|
|
#endif /* __BIG_ENDIAN_BITFIELD */
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
|
|
|
|
struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
if (tbi->map_type == VMXNET3_MAP_SINGLE)
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
|
2009-10-13 15:15:51 +08:00
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
else if (tbi->map_type == VMXNET3_MAP_PAGE)
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
|
2009-10-13 15:15:51 +08:00
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
else
|
|
|
|
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
|
|
|
|
|
|
|
|
tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
|
|
|
|
struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int entries = 0;
|
|
|
|
|
|
|
|
/* no out of order completion */
|
|
|
|
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
|
2009-11-16 21:41:33 +08:00
|
|
|
BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
skb = tq->buf_info[eop_idx].skb;
|
|
|
|
BUG_ON(skb == NULL);
|
|
|
|
tq->buf_info[eop_idx].skb = NULL;
|
|
|
|
|
|
|
|
VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
|
|
|
|
|
|
|
|
while (tq->tx_ring.next2comp != eop_idx) {
|
|
|
|
vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
|
|
|
|
pdev);
|
|
|
|
|
|
|
|
/* update next2comp w/o tx_lock. Since we are marking more,
|
|
|
|
* instead of less, tx ring entries avail, the worst case is
|
|
|
|
* that the tx routine incorrectly re-queues a pkt due to
|
|
|
|
* insufficient tx ring entries.
|
|
|
|
*/
|
|
|
|
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
|
|
|
|
entries++;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int completed = 0;
|
|
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
|
|
|
|
|
|
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
2009-11-16 21:41:33 +08:00
|
|
|
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
|
2018-05-14 20:14:49 +08:00
|
|
|
/* Prevent any &gdesc->tcd field from being (speculatively)
|
|
|
|
* read before (&gdesc->tcd)->gen is read.
|
|
|
|
*/
|
|
|
|
dma_rmb();
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
|
|
|
|
&gdesc->tcd), tq, adapter->pdev,
|
|
|
|
adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
|
|
|
|
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (completed) {
|
|
|
|
spin_lock(&tq->tx_lock);
|
|
|
|
if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
|
|
|
|
vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
|
|
|
|
VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
|
|
|
|
netif_carrier_ok(adapter->netdev))) {
|
|
|
|
vmxnet3_tq_wake(tq, adapter);
|
|
|
|
}
|
|
|
|
spin_unlock(&tq->tx_lock);
|
|
|
|
}
|
|
|
|
return completed;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
|
|
|
|
struct vmxnet3_tx_buf_info *tbi;
|
|
|
|
|
|
|
|
tbi = tq->buf_info + tq->tx_ring.next2comp;
|
|
|
|
|
|
|
|
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
|
|
|
|
if (tbi->skb) {
|
|
|
|
dev_kfree_skb_any(tbi->skb);
|
|
|
|
tbi->skb = NULL;
|
|
|
|
}
|
|
|
|
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sanity check, verify all buffers are indeed unmapped and freed */
|
|
|
|
for (i = 0; i < tq->tx_ring.size; i++) {
|
|
|
|
BUG_ON(tq->buf_info[i].skb != NULL ||
|
|
|
|
tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
tq->tx_ring.gen = VMXNET3_INIT_GEN;
|
|
|
|
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
|
|
|
|
|
|
|
|
tq->comp_ring.gen = VMXNET3_INIT_GEN;
|
|
|
|
tq->comp_ring.next2proc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
static void
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
if (tq->tx_ring.base) {
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
|
|
|
|
sizeof(struct Vmxnet3_TxDesc),
|
|
|
|
tq->tx_ring.base, tq->tx_ring.basePA);
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->tx_ring.base = NULL;
|
|
|
|
}
|
|
|
|
if (tq->data_ring.base) {
|
2016-06-17 01:51:55 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
tq->data_ring.size * tq->txdata_desc_size,
|
2013-08-24 00:33:49 +08:00
|
|
|
tq->data_ring.base, tq->data_ring.basePA);
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->data_ring.base = NULL;
|
|
|
|
}
|
|
|
|
if (tq->comp_ring.base) {
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
|
|
|
|
sizeof(struct Vmxnet3_TxCompDesc),
|
|
|
|
tq->comp_ring.base, tq->comp_ring.basePA);
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->comp_ring.base = NULL;
|
|
|
|
}
|
2013-08-24 00:33:49 +08:00
|
|
|
if (tq->buf_info) {
|
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
tq->tx_ring.size * sizeof(tq->buf_info[0]),
|
|
|
|
tq->buf_info, tq->buf_info_pa);
|
|
|
|
tq->buf_info = NULL;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
/* Destroy all tx queues */
|
|
|
|
void
|
|
|
|
vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
static void
|
|
|
|
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* reset the tx ring contents to 0 and reset the tx ring states */
|
|
|
|
memset(tq->tx_ring.base, 0, tq->tx_ring.size *
|
|
|
|
sizeof(struct Vmxnet3_TxDesc));
|
|
|
|
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
|
|
|
|
tq->tx_ring.gen = VMXNET3_INIT_GEN;
|
|
|
|
|
2016-06-17 01:51:55 +08:00
|
|
|
memset(tq->data_ring.base, 0,
|
|
|
|
tq->data_ring.size * tq->txdata_desc_size);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* reset the tx comp ring contents to 0 and reset comp ring states */
|
|
|
|
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
|
|
|
|
sizeof(struct Vmxnet3_TxCompDesc));
|
|
|
|
tq->comp_ring.next2proc = 0;
|
|
|
|
tq->comp_ring.gen = VMXNET3_INIT_GEN;
|
|
|
|
|
|
|
|
/* reset the bookkeeping data */
|
|
|
|
memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
|
|
|
|
for (i = 0; i < tq->tx_ring.size; i++)
|
|
|
|
tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
|
|
|
|
|
|
|
|
/* stats are not reset */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2013-08-24 00:33:49 +08:00
|
|
|
size_t sz;
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
|
|
|
|
tq->comp_ring.base || tq->buf_info);
|
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
|
tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
|
|
|
|
&tq->tx_ring.basePA, GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!tq->tx_ring.base) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev, "failed to allocate tx ring\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
|
2016-06-17 01:51:55 +08:00
|
|
|
tq->data_ring.size * tq->txdata_desc_size,
|
2013-08-24 00:33:49 +08:00
|
|
|
&tq->data_ring.basePA, GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!tq->data_ring.base) {
|
2016-06-17 01:51:55 +08:00
|
|
|
netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
|
tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
|
|
|
|
&tq->comp_ring.basePA, GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!tq->comp_ring.base) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
|
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 16:23:09 +08:00
|
|
|
tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
|
|
|
&tq->buf_info_pa, GFP_KERNEL);
|
2012-01-29 20:56:23 +08:00
|
|
|
if (!tq->buf_info)
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
vmxnet3_tq_destroy(tq, adapter);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
static void
|
|
|
|
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* starting from ring->next2fill, allocate rx buffers for the given ring
|
|
|
|
* of the rx queue and update the rx desc. stop after @num_to_alloc buffers
|
|
|
|
* are allocated or allocation fails
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|
|
|
int num_to_alloc, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int num_allocated = 0;
|
|
|
|
struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
|
|
|
|
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
|
|
|
|
u32 val;
|
|
|
|
|
2011-07-05 22:34:05 +08:00
|
|
|
while (num_allocated <= num_to_alloc) {
|
2009-10-13 15:15:51 +08:00
|
|
|
struct vmxnet3_rx_buf_info *rbi;
|
|
|
|
union Vmxnet3_GenericDesc *gd;
|
|
|
|
|
|
|
|
rbi = rbi_base + ring->next2fill;
|
|
|
|
gd = ring->base + ring->next2fill;
|
|
|
|
|
|
|
|
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
|
|
|
|
if (rbi->skb == NULL) {
|
2013-01-15 15:28:26 +08:00
|
|
|
rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
|
|
|
|
rbi->len,
|
|
|
|
GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (unlikely(rbi->skb == NULL)) {
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
rbi->dma_addr = dma_map_single(
|
|
|
|
&adapter->pdev->dev,
|
2009-10-13 15:15:51 +08:00
|
|
|
rbi->skb->data, rbi->len,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
2015-11-28 06:29:30 +08:00
|
|
|
if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
|
rbi->dma_addr)) {
|
|
|
|
dev_kfree_skb_any(rbi->skb);
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
break;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
|
|
|
/* rx buffer skipped by the device */
|
|
|
|
}
|
|
|
|
val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
|
|
|
|
} else {
|
|
|
|
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
|
|
|
|
rbi->len != PAGE_SIZE);
|
|
|
|
|
|
|
|
if (rbi->page == NULL) {
|
|
|
|
rbi->page = alloc_page(GFP_ATOMIC);
|
|
|
|
if (unlikely(rbi->page == NULL)) {
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
break;
|
|
|
|
}
|
2013-08-24 00:33:49 +08:00
|
|
|
rbi->dma_addr = dma_map_page(
|
|
|
|
&adapter->pdev->dev,
|
2009-10-13 15:15:51 +08:00
|
|
|
rbi->page, 0, PAGE_SIZE,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
2015-11-28 06:29:30 +08:00
|
|
|
if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
|
rbi->dma_addr)) {
|
|
|
|
put_page(rbi->page);
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
break;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
|
|
|
/* rx buffers skipped by the device */
|
|
|
|
}
|
|
|
|
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
|
|
|
|
}
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
|
2011-07-05 22:34:05 +08:00
|
|
|
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
|
2009-11-16 21:41:33 +08:00
|
|
|
| val | rbi->len);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-07-05 22:34:05 +08:00
|
|
|
/* Fill the last buffer but dont mark it ready, or else the
|
|
|
|
* device will think that the queue is full */
|
|
|
|
if (num_allocated == num_to_alloc)
|
|
|
|
break;
|
|
|
|
|
|
|
|
gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
|
2009-10-13 15:15:51 +08:00
|
|
|
num_allocated++;
|
|
|
|
vmxnet3_cmd_ring_adv_next2fill(ring);
|
|
|
|
}
|
|
|
|
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2013-01-15 15:28:27 +08:00
|
|
|
"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
|
|
|
|
num_allocated, ring->next2fill, ring->next2comp);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* so that the device can distinguish a full ring and an empty ring */
|
|
|
|
BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
|
|
|
|
|
|
|
|
return num_allocated;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
|
|
|
|
struct vmxnet3_rx_buf_info *rbi)
|
|
|
|
{
|
|
|
|
struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
|
|
|
|
skb_shinfo(skb)->nr_frags;
|
|
|
|
|
|
|
|
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
|
|
|
|
|
2011-09-22 05:53:28 +08:00
|
|
|
__skb_frag_set_page(frag, rbi->page);
|
2009-10-13 15:15:51 +08:00
|
|
|
frag->page_offset = 0;
|
2011-10-19 05:00:24 +08:00
|
|
|
skb_frag_size_set(frag, rcd->len);
|
|
|
|
skb->data_len += rcd->len;
|
2011-10-13 19:38:17 +08:00
|
|
|
skb->truesize += PAGE_SIZE;
|
2009-10-13 15:15:51 +08:00
|
|
|
skb_shinfo(skb)->nr_frags++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-28 06:29:30 +08:00
|
|
|
static int
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
|
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
u32 dw2, len;
|
|
|
|
unsigned long buf_offset;
|
|
|
|
int i;
|
|
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
|
|
struct vmxnet3_tx_buf_info *tbi = NULL;
|
|
|
|
|
|
|
|
BUG_ON(ctx->copy_size > skb_headlen(skb));
|
|
|
|
|
|
|
|
/* use the previous gen bit for the SOP desc */
|
|
|
|
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
|
|
|
|
|
|
|
|
ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
|
|
gdesc = ctx->sop_txd; /* both loops below can be skipped */
|
|
|
|
|
|
|
|
/* no need to map the buffer if headers are copied */
|
|
|
|
if (ctx->copy_size) {
|
2009-11-16 21:41:33 +08:00
|
|
|
ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->tx_ring.next2fill *
|
2016-06-17 01:51:55 +08:00
|
|
|
tq->txdata_desc_size);
|
2009-11-16 21:41:33 +08:00
|
|
|
ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->sop_txd->dword[3] = 0;
|
|
|
|
|
|
|
|
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
|
|
|
tbi->map_type = VMXNET3_MAP_NONE;
|
|
|
|
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2009-10-17 08:54:34 +08:00
|
|
|
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
2009-11-16 21:41:33 +08:00
|
|
|
tq->tx_ring.next2fill,
|
|
|
|
le64_to_cpu(ctx->sop_txd->txd.addr),
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
|
|
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
|
|
|
|
|
|
/* use the right gen for non-SOP desc */
|
|
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* linear part can use multiple tx desc if it's big */
|
|
|
|
len = skb_headlen(skb) - ctx->copy_size;
|
|
|
|
buf_offset = ctx->copy_size;
|
|
|
|
while (len) {
|
|
|
|
u32 buf_size;
|
|
|
|
|
2010-07-24 22:43:29 +08:00
|
|
|
if (len < VMXNET3_MAX_TX_BUF_SIZE) {
|
|
|
|
buf_size = len;
|
|
|
|
dw2 |= len;
|
|
|
|
} else {
|
|
|
|
buf_size = VMXNET3_MAX_TX_BUF_SIZE;
|
|
|
|
/* spec says that for TxDesc.len, 0 == 2^14 */
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
|
|
|
tbi->map_type = VMXNET3_MAP_SINGLE;
|
2013-08-24 00:33:49 +08:00
|
|
|
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
|
2009-10-13 15:15:51 +08:00
|
|
|
skb->data + buf_offset, buf_size,
|
|
|
|
PCI_DMA_TODEVICE);
|
2015-11-28 06:29:30 +08:00
|
|
|
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
|
|
|
|
return -EFAULT;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-07-24 22:43:29 +08:00
|
|
|
tbi->len = buf_size;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
|
|
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
|
2010-07-24 22:43:29 +08:00
|
|
|
gdesc->dword[2] = cpu_to_le32(dw2);
|
2009-10-13 15:15:51 +08:00
|
|
|
gdesc->dword[3] = 0;
|
|
|
|
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2009-10-17 08:54:34 +08:00
|
|
|
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
2009-11-16 21:41:33 +08:00
|
|
|
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
|
|
|
|
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
|
|
|
|
|
|
len -= buf_size;
|
|
|
|
buf_offset += buf_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
2011-10-19 05:00:24 +08:00
|
|
|
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
|
2012-10-29 15:30:49 +08:00
|
|
|
u32 buf_size;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2012-10-29 15:30:49 +08:00
|
|
|
buf_offset = 0;
|
|
|
|
len = skb_frag_size(frag);
|
|
|
|
while (len) {
|
|
|
|
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
|
|
|
if (len < VMXNET3_MAX_TX_BUF_SIZE) {
|
|
|
|
buf_size = len;
|
|
|
|
dw2 |= len;
|
|
|
|
} else {
|
|
|
|
buf_size = VMXNET3_MAX_TX_BUF_SIZE;
|
|
|
|
/* spec says that for TxDesc.len, 0 == 2^14 */
|
|
|
|
}
|
|
|
|
tbi->map_type = VMXNET3_MAP_PAGE;
|
|
|
|
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
|
|
|
|
buf_offset, buf_size,
|
|
|
|
DMA_TO_DEVICE);
|
2015-11-28 06:29:30 +08:00
|
|
|
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
|
|
|
|
return -EFAULT;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2012-10-29 15:30:49 +08:00
|
|
|
tbi->len = buf_size;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2012-10-29 15:30:49 +08:00
|
|
|
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
|
|
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2012-10-29 15:30:49 +08:00
|
|
|
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
|
|
|
|
gdesc->dword[2] = cpu_to_le32(dw2);
|
|
|
|
gdesc->dword[3] = 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2014-08-06 12:42:41 +08:00
|
|
|
"txd[%u]: 0x%llx %u %u\n",
|
2012-10-29 15:30:49 +08:00
|
|
|
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
|
|
|
|
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
|
|
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
|
|
|
|
|
|
len -= buf_size;
|
|
|
|
buf_offset += buf_size;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->eop_txd = gdesc;
|
|
|
|
|
|
|
|
/* set the last buf_info for the pkt */
|
|
|
|
tbi->skb = skb;
|
|
|
|
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
|
2015-11-28 06:29:30 +08:00
|
|
|
|
|
|
|
return 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
/* Init all tx queues */
|
|
|
|
static void
|
|
|
|
vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/*
|
2016-03-05 02:40:48 +08:00
|
|
|
* parse relevant protocol headers:
|
2009-10-13 15:15:51 +08:00
|
|
|
* For a tso pkt, relevant headers are L2/3/4 including options
|
|
|
|
* For a pkt requesting csum offloading, they are L2/3 and may include L4
|
|
|
|
* if it's a TCP/UDP pkt
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* -1: error happens during parsing
|
|
|
|
* 0: protocol headers parsed, but too big to be copied
|
|
|
|
* 1: protocol headers parsed and copied
|
|
|
|
*
|
|
|
|
* Other effects:
|
|
|
|
* 1. related *ctx fields are updated.
|
|
|
|
* 2. ctx->copy_size is # of bytes copied
|
2016-03-05 02:40:48 +08:00
|
|
|
* 3. the portion to be copied is guaranteed to be in the linear part
|
2009-10-13 15:15:51 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int
|
2016-03-05 02:40:48 +08:00
|
|
|
vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_tx_ctx *ctx,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
2015-03-01 12:33:09 +08:00
|
|
|
u8 protocol = 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-12-14 23:24:08 +08:00
|
|
|
if (ctx->mss) { /* TSO */
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
|
2012-01-25 03:47:21 +08:00
|
|
|
ctx->l4_hdr_size = tcp_hdrlen(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
|
|
|
|
} else {
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
2010-12-14 23:24:08 +08:00
|
|
|
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (ctx->ipv4) {
|
2012-01-25 03:47:21 +08:00
|
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
|
|
|
2015-03-01 12:33:09 +08:00
|
|
|
protocol = iph->protocol;
|
|
|
|
} else if (ctx->ipv6) {
|
|
|
|
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
protocol = ipv6h->nexthdr;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (protocol) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
ctx->l4_hdr_size = tcp_hdrlen(skb);
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
ctx->l4_hdr_size = sizeof(struct udphdr);
|
|
|
|
break;
|
|
|
|
default:
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->l4_hdr_size = 0;
|
2015-03-01 12:33:09 +08:00
|
|
|
break;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
2015-03-01 12:33:09 +08:00
|
|
|
|
vmxnet3: cap copy length at size of skb to prevent dropped frames on tx
I was recently shown that vmxnet3 devices on transmit, will drop very small udp
frames consistently. This is due to a regression introduced by commit
39d4a96fd7d2926e46151adbd18b810aeeea8ec0. This commit attempts to introduce an
optimization to the tx path, indicating that the underlying hardware behaves
optimally when at least 54 bytes of header data are available for direct access.
This causes problems however, if the entire frame is less than 54 bytes long.
The subsequent pskb_may_pull in vmxnet3_parse_and_copy_hdr fails, causing an
error return code, which leads to vmxnet3_tq_xmit dropping the frame.
Fix it by placing a cap on the copy length. For frames longer than 54 bytes, we
do the pull as we normally would. If the frame is shorter than that, copy the
whole frame, but no more. This ensures that we still get the optimization for
qualifying frames, but don't do any damange for frames that are too short.
Also, since I'm unable to do this, it wuold be great if vmware could follow up
this patch with some additional code commentary as to why 54 bytes is an optimal
pull length for a virtual NIC driver. The comment that introduced this was
vague on that. Thanks!
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Reported-by: Max Matveev <mmatveev@redhat.com>
CC: Max Matveev <mmatveev@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Shreyas Bhatewara <sbhatewara@vmware.com>
CC: "VMware, Inc." <pv-drivers@vmware.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-02-16 09:48:56 +08:00
|
|
|
ctx->copy_size = min(ctx->eth_ip_hdr_size +
|
|
|
|
ctx->l4_hdr_size, skb->len);
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
|
|
|
ctx->eth_ip_hdr_size = 0;
|
|
|
|
ctx->l4_hdr_size = 0;
|
|
|
|
/* copy as much as allowed */
|
2016-06-17 01:51:55 +08:00
|
|
|
ctx->copy_size = min_t(unsigned int,
|
|
|
|
tq->txdata_desc_size,
|
|
|
|
skb_headlen(skb));
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2015-06-20 04:37:03 +08:00
|
|
|
if (skb->len <= VMXNET3_HDR_COPY_SIZE)
|
|
|
|
ctx->copy_size = skb->len;
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/* make sure headers are accessible directly */
|
|
|
|
if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:51:55 +08:00
|
|
|
if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->stats.oversized_hdr++;
|
|
|
|
ctx->copy_size = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-05 02:40:48 +08:00
|
|
|
return 1;
|
|
|
|
err:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* copy relevant protocol headers to the transmit ring:
|
|
|
|
* For a tso pkt, relevant headers are L2/3/4 including options
|
|
|
|
* For a pkt requesting csum offloading, they are L2/3 and may include L4
|
|
|
|
* if it's a TCP/UDP pkt
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Note that this requires that vmxnet3_parse_hdr be called first to set the
|
|
|
|
* appropriate bits in ctx first
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_tx_ctx *ctx,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct Vmxnet3_TxDataDesc *tdd;
|
|
|
|
|
2016-08-20 01:33:42 +08:00
|
|
|
tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
|
|
|
|
tq->tx_ring.next2fill *
|
|
|
|
tq->txdata_desc_size);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
memcpy(tdd->data, skb->data, ctx->copy_size);
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2009-10-17 08:54:34 +08:00
|
|
|
"copy %u bytes to dataRing[%u]\n",
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->copy_size, tq->tx_ring.next2fill);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_prepare_tso(struct sk_buff *skb,
|
|
|
|
struct vmxnet3_tx_ctx *ctx)
|
|
|
|
{
|
2012-01-25 03:47:21 +08:00
|
|
|
struct tcphdr *tcph = tcp_hdr(skb);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
if (ctx->ipv4) {
|
2012-01-25 03:47:21 +08:00
|
|
|
struct iphdr *iph = ip_hdr(skb);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
iph->check = 0;
|
|
|
|
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
|
|
|
|
IPPROTO_TCP, 0);
|
2015-03-01 12:33:09 +08:00
|
|
|
} else if (ctx->ipv6) {
|
2012-01-25 03:47:21 +08:00
|
|
|
struct ipv6hdr *iph = ipv6_hdr(skb);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
|
|
|
|
IPPROTO_TCP, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-29 15:30:49 +08:00
|
|
|
static int txd_estimate(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
|
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
|
|
|
|
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Transmits a pkt thru a given tq
|
|
|
|
* Returns:
|
|
|
|
* NETDEV_TX_OK: descriptors are setup successfully
|
2011-03-31 09:57:33 +08:00
|
|
|
* NETDEV_TX_OK: error occurred, the pkt is dropped
|
2009-10-13 15:15:51 +08:00
|
|
|
* NETDEV_TX_BUSY: tx ring is full, queue is stopped
|
|
|
|
*
|
|
|
|
* Side-effects:
|
|
|
|
* 1. tx ring may be changed
|
|
|
|
* 2. tq stats may be updated accordingly
|
|
|
|
* 3. shared->txNumDeferred may be updated
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
|
struct vmxnet3_adapter *adapter, struct net_device *netdev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u32 count;
|
2018-03-17 05:47:54 +08:00
|
|
|
int num_pkts;
|
|
|
|
int tx_num_deferred;
|
2009-10-13 15:15:51 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct vmxnet3_tx_ctx ctx;
|
|
|
|
union Vmxnet3_GenericDesc *gdesc;
|
2009-11-16 21:41:33 +08:00
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
/* Use temporary descriptor to avoid touching bits multiple times */
|
|
|
|
union Vmxnet3_GenericDesc tempTxDesc;
|
|
|
|
#endif
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2012-10-29 15:30:49 +08:00
|
|
|
count = txd_estimate(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-06-23 21:04:39 +08:00
|
|
|
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
|
2015-03-01 12:33:09 +08:00
|
|
|
ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
ctx.mss = skb_shinfo(skb)->gso_size;
|
|
|
|
if (ctx.mss) {
|
|
|
|
if (skb_header_cloned(skb)) {
|
|
|
|
if (unlikely(pskb_expand_head(skb, 0, 0,
|
|
|
|
GFP_ATOMIC) != 0)) {
|
|
|
|
tq->stats.drop_tso++;
|
|
|
|
goto drop_pkt;
|
|
|
|
}
|
|
|
|
tq->stats.copy_skb_header++;
|
|
|
|
}
|
|
|
|
vmxnet3_prepare_tso(skb, &ctx);
|
|
|
|
} else {
|
|
|
|
if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
|
|
|
|
|
|
|
|
/* non-tso pkts must not use more than
|
|
|
|
* VMXNET3_MAX_TXD_PER_PKT entries
|
|
|
|
*/
|
|
|
|
if (skb_linearize(skb) != 0) {
|
|
|
|
tq->stats.drop_too_many_frags++;
|
|
|
|
goto drop_pkt;
|
|
|
|
}
|
|
|
|
tq->stats.linearized++;
|
|
|
|
|
|
|
|
/* recalculate the # of descriptors to use */
|
|
|
|
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-05 02:40:48 +08:00
|
|
|
ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (ret >= 0) {
|
|
|
|
BUG_ON(ret <= 0 && ctx.copy_size != 0);
|
|
|
|
/* hdrs parsed, check against other limits */
|
|
|
|
if (ctx.mss) {
|
|
|
|
if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
|
|
|
|
VMXNET3_MAX_TX_BUF_SIZE)) {
|
2016-03-14 22:53:57 +08:00
|
|
|
tq->stats.drop_oversized_hdr++;
|
|
|
|
goto drop_pkt;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
|
if (unlikely(ctx.eth_ip_hdr_size +
|
|
|
|
skb->csum_offset >
|
|
|
|
VMXNET3_MAX_CSUM_OFFSET)) {
|
2016-03-14 22:53:57 +08:00
|
|
|
tq->stats.drop_oversized_hdr++;
|
|
|
|
goto drop_pkt;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tq->stats.drop_hdr_inspect_err++;
|
2016-03-05 02:40:48 +08:00
|
|
|
goto drop_pkt;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2016-03-05 02:40:48 +08:00
|
|
|
spin_lock_irqsave(&tq->tx_lock, flags);
|
|
|
|
|
|
|
|
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
|
|
|
|
tq->stats.tx_ring_full++;
|
|
|
|
netdev_dbg(adapter->netdev,
|
|
|
|
"tx queue stopped on %s, next2comp %u"
|
|
|
|
" next2fill %u\n", adapter->netdev->name,
|
|
|
|
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
|
|
|
|
|
|
|
|
vmxnet3_tq_stop(tq, adapter);
|
|
|
|
spin_unlock_irqrestore(&tq->tx_lock, flags);
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/* fill tx descs related to addr & len */
|
2015-11-28 06:29:30 +08:00
|
|
|
if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
|
|
|
|
goto unlock_drop_pkt;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* setup the EOP desc */
|
2009-11-16 21:41:33 +08:00
|
|
|
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* setup the SOP desc */
|
2009-11-16 21:41:33 +08:00
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
gdesc = &tempTxDesc;
|
|
|
|
gdesc->dword[2] = ctx.sop_txd->dword[2];
|
|
|
|
gdesc->dword[3] = ctx.sop_txd->dword[3];
|
|
|
|
#else
|
2009-10-13 15:15:51 +08:00
|
|
|
gdesc = ctx.sop_txd;
|
2009-11-16 21:41:33 +08:00
|
|
|
#endif
|
2018-03-17 05:47:54 +08:00
|
|
|
tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (ctx.mss) {
|
|
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
|
|
|
|
gdesc->txd.om = VMXNET3_OM_TSO;
|
|
|
|
gdesc->txd.msscof = ctx.mss;
|
2018-03-17 05:47:54 +08:00
|
|
|
num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
|
|
|
|
gdesc->txd.om = VMXNET3_OM_CSUM;
|
|
|
|
gdesc->txd.msscof = ctx.eth_ip_hdr_size +
|
|
|
|
skb->csum_offset;
|
|
|
|
} else {
|
|
|
|
gdesc->txd.om = 0;
|
|
|
|
gdesc->txd.msscof = 0;
|
|
|
|
}
|
2018-03-17 05:47:54 +08:00
|
|
|
num_pkts = 1;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
2018-03-17 05:47:54 +08:00
|
|
|
le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
|
|
|
|
tx_num_deferred += num_pkts;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2015-01-14 00:13:44 +08:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
2009-10-13 15:15:51 +08:00
|
|
|
gdesc->txd.ti = 1;
|
2015-01-14 00:13:44 +08:00
|
|
|
gdesc->txd.tci = skb_vlan_tag_get(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2018-05-14 20:14:49 +08:00
|
|
|
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
|
|
|
|
* all other writes to &gdesc->txd.
|
|
|
|
*/
|
|
|
|
dma_wmb();
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
/* finally flips the GEN bit of the SOP desc. */
|
|
|
|
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
|
|
|
|
VMXNET3_TXD_GEN);
|
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
/* Finished updating in bitfields of Tx Desc, so write them in original
|
|
|
|
* place.
|
|
|
|
*/
|
|
|
|
vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
|
|
|
|
(struct Vmxnet3_TxDesc *)ctx.sop_txd);
|
|
|
|
gdesc = ctx.sop_txd;
|
|
|
|
#endif
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2009-10-17 08:54:34 +08:00
|
|
|
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
2012-06-04 20:44:18 +08:00
|
|
|
(u32)(ctx.sop_txd -
|
2009-11-16 21:41:33 +08:00
|
|
|
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
|
|
|
|
le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&tq->tx_lock, flags);
|
|
|
|
|
2018-03-17 05:47:54 +08:00
|
|
|
if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->shared->txNumDeferred = 0;
|
2010-11-19 18:55:24 +08:00
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter,
|
|
|
|
VMXNET3_REG_TXPROD + tq->qid * 8,
|
2009-10-13 15:15:51 +08:00
|
|
|
tq->tx_ring.next2fill);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
2010-12-20 11:03:15 +08:00
|
|
|
unlock_drop_pkt:
|
|
|
|
spin_unlock_irqrestore(&tq->tx_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
drop_pkt:
|
|
|
|
tq->stats.drop_total++;
|
2014-03-16 09:31:16 +08:00
|
|
|
dev_kfree_skb_any(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static netdev_tx_t
|
|
|
|
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2012-11-13 21:53:28 +08:00
|
|
|
BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
|
|
|
|
return vmxnet3_tq_xmit(skb,
|
|
|
|
&adapter->tx_queue[skb->queue_mapping],
|
|
|
|
adapter, netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
union Vmxnet3_GenericDesc *gdesc)
|
|
|
|
{
|
2011-04-18 21:31:21 +08:00
|
|
|
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
|
2016-04-21 09:12:29 +08:00
|
|
|
if (gdesc->rcd.v4 &&
|
|
|
|
(le32_to_cpu(gdesc->dword[3]) &
|
|
|
|
VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
|
|
|
BUG_ON(gdesc->rcd.frg);
|
|
|
|
} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
|
|
|
|
(1 << VMXNET3_RCD_TUC_SHIFT))) {
|
2009-10-13 15:15:51 +08:00
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
|
|
|
BUG_ON(gdesc->rcd.frg);
|
|
|
|
} else {
|
|
|
|
if (gdesc->rcd.csum) {
|
|
|
|
skb->csum = htons(gdesc->rcd.csum);
|
|
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
|
|
} else {
|
2010-09-03 04:07:41 +08:00
|
|
|
skb_checksum_none_assert(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2010-09-03 04:07:41 +08:00
|
|
|
skb_checksum_none_assert(skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
|
|
|
|
struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
rq->stats.drop_err++;
|
|
|
|
if (!rcd->fcs)
|
|
|
|
rq->stats.drop_fcs++;
|
|
|
|
|
|
|
|
rq->stats.drop_total++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We do not unmap and chain the rx buffer to the skb.
|
|
|
|
* We basically pretend this buffer is not used and will be recycled
|
|
|
|
* by vmxnet3_rq_alloc_rx_buf()
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ctx->skb may be NULL if this is the first and the only one
|
|
|
|
* desc for the pkt
|
|
|
|
*/
|
|
|
|
if (ctx->skb)
|
|
|
|
dev_kfree_skb_irq(ctx->skb);
|
|
|
|
|
|
|
|
ctx->skb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-20 04:38:29 +08:00
|
|
|
static u32
|
|
|
|
vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
|
|
|
|
union Vmxnet3_GenericDesc *gdesc)
|
|
|
|
{
|
|
|
|
u32 hlen, maplen;
|
|
|
|
union {
|
|
|
|
void *ptr;
|
|
|
|
struct ethhdr *eth;
|
2018-04-19 03:48:04 +08:00
|
|
|
struct vlan_ethhdr *veth;
|
2015-06-20 04:38:29 +08:00
|
|
|
struct iphdr *ipv4;
|
|
|
|
struct ipv6hdr *ipv6;
|
|
|
|
struct tcphdr *tcp;
|
|
|
|
} hdr;
|
|
|
|
BUG_ON(gdesc->rcd.tcp == 0);
|
|
|
|
|
|
|
|
maplen = skb_headlen(skb);
|
|
|
|
if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
|
|
|
|
return 0;
|
|
|
|
|
2018-04-19 03:48:04 +08:00
|
|
|
if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
|
|
|
|
skb->protocol == cpu_to_be16(ETH_P_8021AD))
|
|
|
|
hlen = sizeof(struct vlan_ethhdr);
|
|
|
|
else
|
|
|
|
hlen = sizeof(struct ethhdr);
|
|
|
|
|
2015-06-20 04:38:29 +08:00
|
|
|
hdr.eth = eth_hdr(skb);
|
|
|
|
if (gdesc->rcd.v4) {
|
2018-04-19 03:48:04 +08:00
|
|
|
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
|
|
|
|
hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
|
|
|
|
hdr.ptr += hlen;
|
2015-06-20 04:38:29 +08:00
|
|
|
BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
|
|
|
|
hlen = hdr.ipv4->ihl << 2;
|
|
|
|
hdr.ptr += hdr.ipv4->ihl << 2;
|
|
|
|
} else if (gdesc->rcd.v6) {
|
2018-04-19 03:48:04 +08:00
|
|
|
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
|
|
|
|
hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
|
|
|
|
hdr.ptr += hlen;
|
2015-06-20 04:38:29 +08:00
|
|
|
/* Use an estimated value, since we also need to handle
|
|
|
|
* TSO case.
|
|
|
|
*/
|
|
|
|
if (hdr.ipv6->nexthdr != IPPROTO_TCP)
|
|
|
|
return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
|
|
|
|
hlen = sizeof(struct ipv6hdr);
|
|
|
|
hdr.ptr += sizeof(struct ipv6hdr);
|
|
|
|
} else {
|
|
|
|
/* Non-IP pkt, dont estimate header length */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hlen + sizeof(struct tcphdr) > maplen)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return (hlen + (hdr.tcp->doff << 2));
|
|
|
|
}
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
static int
|
|
|
|
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
|
struct vmxnet3_adapter *adapter, int quota)
|
|
|
|
{
|
2010-12-21 18:16:10 +08:00
|
|
|
static const u32 rxprod_reg[2] = {
|
|
|
|
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
|
|
|
|
};
|
2015-07-08 02:02:18 +08:00
|
|
|
u32 num_pkts = 0;
|
2011-07-05 22:34:05 +08:00
|
|
|
bool skip_page_frags = false;
|
2009-10-13 15:15:51 +08:00
|
|
|
struct Vmxnet3_RxCompDesc *rcd;
|
|
|
|
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
|
2015-06-20 04:38:29 +08:00
|
|
|
u16 segCnt = 0, mss = 0;
|
2009-11-16 21:41:33 +08:00
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
struct Vmxnet3_RxDesc rxCmdDesc;
|
|
|
|
struct Vmxnet3_RxCompDesc rxComp;
|
|
|
|
#endif
|
|
|
|
vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
|
|
|
|
&rxComp);
|
2009-10-13 15:15:51 +08:00
|
|
|
while (rcd->gen == rq->comp_ring.gen) {
|
|
|
|
struct vmxnet3_rx_buf_info *rbi;
|
2011-07-05 22:34:05 +08:00
|
|
|
struct sk_buff *skb, *new_skb = NULL;
|
|
|
|
struct page *new_page = NULL;
|
2015-11-28 06:29:30 +08:00
|
|
|
dma_addr_t new_dma_addr;
|
2009-10-13 15:15:51 +08:00
|
|
|
int num_to_alloc;
|
|
|
|
struct Vmxnet3_RxDesc *rxd;
|
|
|
|
u32 idx, ring_idx;
|
2011-07-05 22:34:05 +08:00
|
|
|
struct vmxnet3_cmd_ring *ring = NULL;
|
2015-07-08 02:02:18 +08:00
|
|
|
if (num_pkts >= quota) {
|
2009-10-13 15:15:51 +08:00
|
|
|
/* we may stop even before we see the EOP desc of
|
|
|
|
* the current pkt
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
2018-05-14 20:14:49 +08:00
|
|
|
|
|
|
|
/* Prevent any rcd field from being (speculatively) read before
|
|
|
|
* rcd->gen is read.
|
|
|
|
*/
|
|
|
|
dma_rmb();
|
|
|
|
|
2016-06-17 01:51:56 +08:00
|
|
|
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
|
|
|
|
rcd->rqID != rq->dataRingQid);
|
2009-10-13 15:15:51 +08:00
|
|
|
idx = rcd->rxdIdx;
|
2016-06-17 01:51:56 +08:00
|
|
|
ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
|
2011-07-05 22:34:05 +08:00
|
|
|
ring = rq->rx_ring + ring_idx;
|
2009-11-16 21:41:33 +08:00
|
|
|
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
|
|
|
|
&rxCmdDesc);
|
2009-10-13 15:15:51 +08:00
|
|
|
rbi = rq->buf_info[ring_idx] + idx;
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
BUG_ON(rxd->addr != rbi->dma_addr ||
|
|
|
|
rxd->len != rbi->len);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (unlikely(rcd->eop && rcd->err)) {
|
|
|
|
vmxnet3_rx_error(rq, rcd, ctx, adapter);
|
|
|
|
goto rcd_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rcd->sop) { /* first buf of the pkt */
|
2016-06-17 01:51:56 +08:00
|
|
|
bool rxDataRingUsed;
|
|
|
|
u16 len;
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
|
2016-06-17 01:51:56 +08:00
|
|
|
(rcd->rqID != rq->qid &&
|
|
|
|
rcd->rqID != rq->dataRingQid));
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
|
|
|
|
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
|
|
|
|
|
|
|
|
if (unlikely(rcd->len == 0)) {
|
|
|
|
/* Pretend the rx buffer is skipped. */
|
|
|
|
BUG_ON(!(rcd->sop && rcd->eop));
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev,
|
2009-10-17 08:54:34 +08:00
|
|
|
"rxRing[%u][%u] 0 length\n",
|
2009-10-13 15:15:51 +08:00
|
|
|
ring_idx, idx);
|
|
|
|
goto rcd_done;
|
|
|
|
}
|
|
|
|
|
2011-07-05 22:34:05 +08:00
|
|
|
skip_page_frags = false;
|
2009-10-13 15:15:51 +08:00
|
|
|
ctx->skb = rbi->skb;
|
2016-06-17 01:51:56 +08:00
|
|
|
|
|
|
|
rxDataRingUsed =
|
|
|
|
VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
|
|
|
|
len = rxDataRingUsed ? rcd->len : rbi->len;
|
2013-01-15 15:28:26 +08:00
|
|
|
new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
|
2016-06-17 01:51:56 +08:00
|
|
|
len);
|
2011-07-05 22:34:05 +08:00
|
|
|
if (new_skb == NULL) {
|
|
|
|
/* Skb allocation failed, do not handover this
|
|
|
|
* skb to stack. Reuse it. Drop the existing pkt
|
|
|
|
*/
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
ctx->skb = NULL;
|
|
|
|
rq->stats.drop_total++;
|
|
|
|
skip_page_frags = true;
|
|
|
|
goto rcd_done;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2016-06-17 01:51:56 +08:00
|
|
|
if (rxDataRingUsed) {
|
|
|
|
size_t sz;
|
|
|
|
|
|
|
|
BUG_ON(rcd->len > rq->data_ring.desc_size);
|
|
|
|
|
|
|
|
ctx->skb = new_skb;
|
|
|
|
sz = rcd->rxdIdx * rq->data_ring.desc_size;
|
|
|
|
memcpy(new_skb->data,
|
|
|
|
&rq->data_ring.base[sz], rcd->len);
|
|
|
|
} else {
|
|
|
|
ctx->skb = rbi->skb;
|
|
|
|
|
|
|
|
new_dma_addr =
|
|
|
|
dma_map_single(&adapter->pdev->dev,
|
|
|
|
new_skb->data, rbi->len,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
|
new_dma_addr)) {
|
|
|
|
dev_kfree_skb(new_skb);
|
|
|
|
/* Skb allocation failed, do not
|
|
|
|
* handover this skb to stack. Reuse
|
|
|
|
* it. Drop the existing pkt.
|
|
|
|
*/
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
ctx->skb = NULL;
|
|
|
|
rq->stats.drop_total++;
|
|
|
|
skip_page_frags = true;
|
|
|
|
goto rcd_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_unmap_single(&adapter->pdev->dev,
|
|
|
|
rbi->dma_addr,
|
|
|
|
rbi->len,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
|
|
/* Immediate refill */
|
|
|
|
rbi->skb = new_skb;
|
|
|
|
rbi->dma_addr = new_dma_addr;
|
|
|
|
rxd->addr = cpu_to_le64(rbi->dma_addr);
|
|
|
|
rxd->len = rbi->len;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2013-01-15 15:28:35 +08:00
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
|
|
|
|
(adapter->netdev->features & NETIF_F_RXHASH))
|
2013-12-20 20:16:57 +08:00
|
|
|
skb_set_hash(ctx->skb,
|
|
|
|
le32_to_cpu(rcd->rssHash),
|
2013-12-18 15:32:08 +08:00
|
|
|
PKT_HASH_TYPE_L3);
|
2013-01-15 15:28:35 +08:00
|
|
|
#endif
|
2009-10-13 15:15:51 +08:00
|
|
|
skb_put(ctx->skb, rcd->len);
|
2011-07-05 22:34:05 +08:00
|
|
|
|
2016-06-17 01:51:53 +08:00
|
|
|
if (VMXNET3_VERSION_GE_2(adapter) &&
|
2015-06-20 04:38:29 +08:00
|
|
|
rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
|
|
|
|
struct Vmxnet3_RxCompDescExt *rcdlro;
|
|
|
|
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
|
|
|
|
|
|
|
|
segCnt = rcdlro->segCnt;
|
2016-06-08 22:40:53 +08:00
|
|
|
WARN_ON_ONCE(segCnt == 0);
|
2015-06-20 04:38:29 +08:00
|
|
|
mss = rcdlro->mss;
|
|
|
|
if (unlikely(segCnt <= 1))
|
|
|
|
segCnt = 0;
|
|
|
|
} else {
|
|
|
|
segCnt = 0;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
2011-07-05 22:34:05 +08:00
|
|
|
BUG_ON(ctx->skb == NULL && !skip_page_frags);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/* non SOP buffer must be type 1 in most cases */
|
2011-07-05 22:34:05 +08:00
|
|
|
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
|
|
|
|
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-07-05 22:34:05 +08:00
|
|
|
/* If an sop buffer was dropped, skip all
|
|
|
|
* following non-sop fragments. They will be reused.
|
|
|
|
*/
|
|
|
|
if (skip_page_frags)
|
|
|
|
goto rcd_done;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2015-06-20 04:37:03 +08:00
|
|
|
if (rcd->len) {
|
|
|
|
new_page = alloc_page(GFP_ATOMIC);
|
2011-07-05 22:34:05 +08:00
|
|
|
/* Replacement page frag could not be allocated.
|
|
|
|
* Reuse this page. Drop the pkt and free the
|
|
|
|
* skb which contained this page as a frag. Skip
|
|
|
|
* processing all the following non-sop frags.
|
2009-10-13 15:15:51 +08:00
|
|
|
*/
|
2015-06-20 04:37:03 +08:00
|
|
|
if (unlikely(!new_page)) {
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
dev_kfree_skb(ctx->skb);
|
|
|
|
ctx->skb = NULL;
|
|
|
|
skip_page_frags = true;
|
|
|
|
goto rcd_done;
|
|
|
|
}
|
2016-01-07 02:44:27 +08:00
|
|
|
new_dma_addr = dma_map_page(&adapter->pdev->dev,
|
|
|
|
new_page,
|
|
|
|
0, PAGE_SIZE,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
2015-11-28 06:29:30 +08:00
|
|
|
if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
|
new_dma_addr)) {
|
|
|
|
put_page(new_page);
|
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
|
dev_kfree_skb(ctx->skb);
|
|
|
|
ctx->skb = NULL;
|
|
|
|
skip_page_frags = true;
|
|
|
|
goto rcd_done;
|
|
|
|
}
|
2011-07-05 22:34:05 +08:00
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_page(&adapter->pdev->dev,
|
2011-07-05 22:34:05 +08:00
|
|
|
rbi->dma_addr, rbi->len,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
|
|
vmxnet3_append_frag(ctx->skb, rcd, rbi);
|
|
|
|
|
2015-06-20 04:37:03 +08:00
|
|
|
/* Immediate refill */
|
|
|
|
rbi->page = new_page;
|
2015-11-28 06:29:30 +08:00
|
|
|
rbi->dma_addr = new_dma_addr;
|
2015-06-20 04:37:03 +08:00
|
|
|
rxd->addr = cpu_to_le64(rbi->dma_addr);
|
|
|
|
rxd->len = rbi->len;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 22:34:05 +08:00
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
skb = ctx->skb;
|
|
|
|
if (rcd->eop) {
|
2015-06-20 04:38:29 +08:00
|
|
|
u32 mtu = adapter->netdev->mtu;
|
2009-10-13 15:15:51 +08:00
|
|
|
skb->len += skb->data_len;
|
|
|
|
|
|
|
|
vmxnet3_rx_csum(adapter, skb,
|
|
|
|
(union Vmxnet3_GenericDesc *)rcd);
|
|
|
|
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
2018-03-17 05:49:19 +08:00
|
|
|
if (!rcd->tcp ||
|
|
|
|
!(adapter->netdev->features & NETIF_F_LRO))
|
2015-06-20 04:38:29 +08:00
|
|
|
goto not_lro;
|
|
|
|
|
|
|
|
if (segCnt != 0 && mss != 0) {
|
|
|
|
skb_shinfo(skb)->gso_type = rcd->v4 ?
|
|
|
|
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
|
|
|
|
skb_shinfo(skb)->gso_size = mss;
|
|
|
|
skb_shinfo(skb)->gso_segs = segCnt;
|
|
|
|
} else if (segCnt != 0 || skb->len > mtu) {
|
|
|
|
u32 hlen;
|
|
|
|
|
|
|
|
hlen = vmxnet3_get_hdr_len(adapter, skb,
|
|
|
|
(union Vmxnet3_GenericDesc *)rcd);
|
|
|
|
if (hlen == 0)
|
|
|
|
goto not_lro;
|
|
|
|
|
|
|
|
skb_shinfo(skb)->gso_type =
|
|
|
|
rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
|
|
|
|
if (segCnt != 0) {
|
|
|
|
skb_shinfo(skb)->gso_segs = segCnt;
|
|
|
|
skb_shinfo(skb)->gso_size =
|
|
|
|
DIV_ROUND_UP(skb->len -
|
|
|
|
hlen, segCnt);
|
|
|
|
} else {
|
|
|
|
skb_shinfo(skb)->gso_size = mtu - hlen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
not_lro:
|
2011-06-23 21:04:39 +08:00
|
|
|
if (unlikely(rcd->ts))
|
2013-04-19 10:04:30 +08:00
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
|
2011-06-23 21:04:39 +08:00
|
|
|
|
2011-06-24 22:24:35 +08:00
|
|
|
if (adapter->netdev->features & NETIF_F_LRO)
|
|
|
|
netif_receive_skb(skb);
|
|
|
|
else
|
|
|
|
napi_gro_receive(&rq->napi, skb);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
ctx->skb = NULL;
|
2015-07-08 02:02:18 +08:00
|
|
|
num_pkts++;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rcd_done:
|
2011-07-05 22:34:05 +08:00
|
|
|
/* device may have skipped some rx descs */
|
|
|
|
ring->next2comp = idx;
|
|
|
|
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
|
|
|
|
ring = rq->rx_ring + ring_idx;
|
2018-05-14 20:14:49 +08:00
|
|
|
|
|
|
|
/* Ensure that the writes to rxd->gen bits will be observed
|
|
|
|
* after all other writes to rxd objects.
|
|
|
|
*/
|
|
|
|
dma_wmb();
|
|
|
|
|
2011-07-05 22:34:05 +08:00
|
|
|
while (num_to_alloc) {
|
|
|
|
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
|
|
|
|
&rxCmdDesc);
|
|
|
|
BUG_ON(!rxd->addr);
|
|
|
|
|
|
|
|
/* Recv desc is ready to be used by the device */
|
|
|
|
rxd->gen = ring->gen;
|
|
|
|
vmxnet3_cmd_ring_adv_next2fill(ring);
|
|
|
|
num_to_alloc--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if needed, update the register */
|
|
|
|
if (unlikely(rq->shared->updateRxProd)) {
|
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter,
|
2012-11-13 21:53:28 +08:00
|
|
|
rxprod_reg[ring_idx] + rq->qid * 8,
|
|
|
|
ring->next2fill);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
|
2009-11-16 21:41:33 +08:00
|
|
|
vmxnet3_getRxComp(rcd,
|
2012-11-13 21:53:28 +08:00
|
|
|
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2015-07-08 02:02:18 +08:00
|
|
|
return num_pkts;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
u32 i, ring_idx;
|
|
|
|
struct Vmxnet3_RxDesc *rxd;
|
|
|
|
|
|
|
|
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
|
|
|
|
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
|
2009-11-16 21:41:33 +08:00
|
|
|
#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
struct Vmxnet3_RxDesc rxDesc;
|
|
|
|
#endif
|
|
|
|
vmxnet3_getRxDesc(rxd,
|
|
|
|
&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
|
|
|
|
rq->buf_info[ring_idx][i].skb) {
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_single(&adapter->pdev->dev, rxd->addr,
|
2009-10-13 15:15:51 +08:00
|
|
|
rxd->len, PCI_DMA_FROMDEVICE);
|
|
|
|
dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
|
|
|
|
rq->buf_info[ring_idx][i].skb = NULL;
|
|
|
|
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
|
|
|
|
rq->buf_info[ring_idx][i].page) {
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_page(&adapter->pdev->dev, rxd->addr,
|
2009-10-13 15:15:51 +08:00
|
|
|
rxd->len, PCI_DMA_FROMDEVICE);
|
|
|
|
put_page(rq->buf_info[ring_idx][i].page);
|
|
|
|
rq->buf_info[ring_idx][i].page = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
|
|
|
|
rq->rx_ring[ring_idx].next2fill =
|
|
|
|
rq->rx_ring[ring_idx].next2comp = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rq->comp_ring.gen = VMXNET3_INIT_GEN;
|
|
|
|
rq->comp_ring.next2proc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
static void
|
|
|
|
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-22 16:26:29 +08:00
|
|
|
static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
/* all rx buffers must have already been freed */
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
if (rq->buf_info[i]) {
|
|
|
|
for (j = 0; j < rq->rx_ring[i].size; j++)
|
|
|
|
BUG_ON(rq->buf_info[i][j].page != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
if (rq->rx_ring[i].base) {
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
rq->rx_ring[i].size
|
|
|
|
* sizeof(struct Vmxnet3_RxDesc),
|
|
|
|
rq->rx_ring[i].base,
|
|
|
|
rq->rx_ring[i].basePA);
|
2009-10-13 15:15:51 +08:00
|
|
|
rq->rx_ring[i].base = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:51:56 +08:00
|
|
|
if (rq->data_ring.base) {
|
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
rq->rx_ring[0].size * rq->data_ring.desc_size,
|
|
|
|
rq->data_ring.base, rq->data_ring.basePA);
|
|
|
|
rq->data_ring.base = NULL;
|
|
|
|
}
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
if (rq->comp_ring.base) {
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
|
|
|
|
* sizeof(struct Vmxnet3_RxCompDesc),
|
|
|
|
rq->comp_ring.base, rq->comp_ring.basePA);
|
2009-10-13 15:15:51 +08:00
|
|
|
rq->comp_ring.base = NULL;
|
|
|
|
}
|
2013-08-24 00:33:49 +08:00
|
|
|
|
|
|
|
if (rq->buf_info[0]) {
|
|
|
|
size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
|
|
|
|
(rq->rx_ring[0].size + rq->rx_ring[1].size);
|
|
|
|
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
|
|
|
|
rq->buf_info_pa);
|
vmxnet3: repair memory leak
with the introduction of commit
b0eb57cb97e7837ebb746404c2c58c6f536f23fa, it appears that rq->buf_info
is improperly handled. While it is heap allocated when an rx queue is
setup, and freed when torn down, an old line of code in
vmxnet3_rq_destroy was not properly removed, leading to rq->buf_info[0]
being set to NULL prior to its being freed, causing a memory leak, which
eventually exhausts the system on repeated create/destroy operations
(for example, when the mtu of a vmxnet3 interface is changed
frequently.
Fix is pretty straight forward, just move the NULL set to after the
free.
Tested by myself with successful results
Applies to net, and should likely be queued for stable, please
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Reported-By: boyang@redhat.com
CC: boyang@redhat.com
CC: Shrikrishna Khare <skhare@vmware.com>
CC: "VMware, Inc." <pv-drivers@vmware.com>
CC: David S. Miller <davem@davemloft.net>
Acked-by: Shrikrishna Khare <skhare@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-23 05:06:37 +08:00
|
|
|
rq->buf_info[0] = rq->buf_info[1] = NULL;
|
2013-08-24 00:33:49 +08:00
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2016-08-24 23:07:26 +08:00
|
|
|
static void
|
2016-06-17 01:51:56 +08:00
|
|
|
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
|
|
|
|
|
|
|
|
if (rq->data_ring.base) {
|
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
(rq->rx_ring[0].size *
|
|
|
|
rq->data_ring.desc_size),
|
|
|
|
rq->data_ring.base,
|
|
|
|
rq->data_ring.basePA);
|
|
|
|
rq->data_ring.base = NULL;
|
|
|
|
rq->data_ring.desc_size = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
|
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* initialize buf_info */
|
|
|
|
for (i = 0; i < rq->rx_ring[0].size; i++) {
|
|
|
|
|
|
|
|
/* 1st buf for a pkt is skbuff */
|
|
|
|
if (i % adapter->rx_buf_per_pkt == 0) {
|
|
|
|
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
|
|
|
|
rq->buf_info[0][i].len = adapter->skb_buf_size;
|
|
|
|
} else { /* subsequent bufs for a pkt is frag */
|
|
|
|
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
|
|
|
|
rq->buf_info[0][i].len = PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < rq->rx_ring[1].size; i++) {
|
|
|
|
rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
|
|
|
|
rq->buf_info[1][i].len = PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reset internal state and allocate buffers for both rings */
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
|
|
|
|
|
|
|
|
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
|
|
|
|
sizeof(struct Vmxnet3_RxDesc));
|
|
|
|
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
|
|
|
|
}
|
|
|
|
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
|
|
|
|
adapter) == 0) {
|
|
|
|
/* at least has 1 rx buffer for the 1st ring */
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
|
|
|
|
|
|
|
|
/* reset the comp ring */
|
|
|
|
rq->comp_ring.next2proc = 0;
|
|
|
|
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
|
|
|
|
sizeof(struct Vmxnet3_RxCompDesc));
|
|
|
|
rq->comp_ring.gen = VMXNET3_INIT_GEN;
|
|
|
|
|
|
|
|
/* reset rxctx */
|
|
|
|
rq->rx_ctx.skb = NULL;
|
|
|
|
|
|
|
|
/* stats are not reset */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
static int
|
|
|
|
vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
dev_err(&adapter->netdev->dev, "%s: failed to "
|
|
|
|
"initialize rx queue%i\n",
|
|
|
|
adapter->netdev->name, i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
static int
|
|
|
|
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t sz;
|
|
|
|
struct vmxnet3_rx_buf_info *bi;
|
|
|
|
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
|
|
|
|
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
|
2013-08-24 00:33:49 +08:00
|
|
|
rq->rx_ring[i].base = dma_alloc_coherent(
|
|
|
|
&adapter->pdev->dev, sz,
|
|
|
|
&rq->rx_ring[i].basePA,
|
|
|
|
GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!rq->rx_ring[i].base) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"failed to allocate rx ring %d\n", i);
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:51:56 +08:00
|
|
|
if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
|
|
|
|
sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
|
|
|
|
rq->data_ring.base =
|
|
|
|
dma_alloc_coherent(&adapter->pdev->dev, sz,
|
|
|
|
&rq->data_ring.basePA,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!rq->data_ring.base) {
|
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"rx data ring will be disabled\n");
|
|
|
|
adapter->rxdataring_enabled = false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rq->data_ring.base = NULL;
|
|
|
|
rq->data_ring.desc_size = 0;
|
|
|
|
}
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
|
2013-08-24 00:33:49 +08:00
|
|
|
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
|
|
|
&rq->comp_ring.basePA,
|
|
|
|
GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!rq->comp_ring.base) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
|
|
|
|
rq->rx_ring[1].size);
|
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 16:23:09 +08:00
|
|
|
bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
|
|
|
|
GFP_KERNEL);
|
2012-01-29 20:56:23 +08:00
|
|
|
if (!bi)
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err;
|
2012-01-29 20:56:23 +08:00
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
rq->buf_info[0] = bi;
|
|
|
|
rq->buf_info[1] = bi + rq->rx_ring[0].size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
vmxnet3_rq_destroy(rq, adapter);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
static int
|
|
|
|
vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i, err = 0;
|
|
|
|
|
2016-06-17 01:51:56 +08:00
|
|
|
adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
dev_err(&adapter->netdev->dev,
|
|
|
|
"%s: failed to create rx queue%i\n",
|
|
|
|
adapter->netdev->name, i);
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
}
|
2016-06-17 01:51:56 +08:00
|
|
|
|
|
|
|
if (!adapter->rxdataring_enabled)
|
|
|
|
vmxnet3_rq_destroy_all_rxdataring(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
return err;
|
|
|
|
err_out:
|
|
|
|
vmxnet3_rq_destroy_all(adapter);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Multiple queue aware polling function for tx and rx */
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
static int
|
|
|
|
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int rcd_done = 0, i;
|
2009-10-13 15:15:51 +08:00
|
|
|
if (unlikely(adapter->shared->ecr))
|
|
|
|
vmxnet3_process_events(adapter);
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
|
|
|
|
adapter, budget);
|
|
|
|
return rcd_done;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
struct vmxnet3_rx_queue *rx_queue = container_of(napi,
|
|
|
|
struct vmxnet3_rx_queue, napi);
|
|
|
|
int rxd_done;
|
|
|
|
|
|
|
|
rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
|
|
|
|
|
|
|
|
if (rxd_done < budget) {
|
2017-01-31 00:22:01 +08:00
|
|
|
napi_complete_done(napi, rxd_done);
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_enable_all_intrs(rx_queue->adapter);
|
|
|
|
}
|
|
|
|
return rxd_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NAPI polling function for MSI-X mode with multiple Rx queues
|
|
|
|
* Returns the # of the NAPI credit consumed (# of rx descriptors processed)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct vmxnet3_rx_queue *rq = container_of(napi,
|
|
|
|
struct vmxnet3_rx_queue, napi);
|
|
|
|
struct vmxnet3_adapter *adapter = rq->adapter;
|
2009-10-13 15:15:51 +08:00
|
|
|
int rxd_done;
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
/* When sharing interrupt with corresponding tx queue, process
|
|
|
|
* tx completions in that queue as well
|
|
|
|
*/
|
|
|
|
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
|
|
|
|
struct vmxnet3_tx_queue *tq =
|
|
|
|
&adapter->tx_queue[rq - adapter->rx_queue];
|
|
|
|
vmxnet3_tq_tx_complete(tq, adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (rxd_done < budget) {
|
2017-01-31 00:22:01 +08:00
|
|
|
napi_complete_done(napi, rxd_done);
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
return rxd_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle completion interrupts on tx queues
|
|
|
|
* Returns whether or not the intr is handled
|
|
|
|
*/
|
|
|
|
|
|
|
|
static irqreturn_t
|
|
|
|
vmxnet3_msix_tx(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct vmxnet3_tx_queue *tq = data;
|
|
|
|
struct vmxnet3_adapter *adapter = tq->adapter;
|
|
|
|
|
|
|
|
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
|
|
|
|
vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
|
|
|
|
|
|
|
|
/* Handle the case where only one irq is allocate for all tx queues */
|
|
|
|
if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
|
|
|
|
vmxnet3_tq_tx_complete(txq, adapter);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vmxnet3_tq_tx_complete(tq, adapter);
|
|
|
|
}
|
|
|
|
vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle completion interrupts on rx queues. Returns whether or not the
|
|
|
|
* intr is handled
|
|
|
|
*/
|
|
|
|
|
|
|
|
static irqreturn_t
|
|
|
|
vmxnet3_msix_rx(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct vmxnet3_rx_queue *rq = data;
|
|
|
|
struct vmxnet3_adapter *adapter = rq->adapter;
|
|
|
|
|
|
|
|
/* disable intr if needed */
|
|
|
|
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
|
|
|
|
vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
|
|
|
|
napi_schedule(&rq->napi);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*----------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* vmxnet3_msix_event --
|
|
|
|
*
|
|
|
|
* vmxnet3 msix event intr handler
|
|
|
|
*
|
|
|
|
* Result:
|
|
|
|
* whether or not the intr is handled
|
|
|
|
*
|
|
|
|
*----------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
static irqreturn_t
|
|
|
|
vmxnet3_msix_event(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct net_device *dev = data;
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(dev);
|
|
|
|
|
|
|
|
/* disable intr if needed */
|
|
|
|
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
|
|
|
|
vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
|
|
|
|
|
|
|
|
if (adapter->shared->ecr)
|
|
|
|
vmxnet3_process_events(adapter);
|
|
|
|
|
|
|
|
vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
|
|
|
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/* Interrupt handler for vmxnet3 */
|
|
|
|
static irqreturn_t
|
|
|
|
vmxnet3_intr(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(dev);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
if (adapter->intr.type == VMXNET3_IT_INTX) {
|
2009-10-13 15:15:51 +08:00
|
|
|
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
|
|
|
|
if (unlikely(icr == 0))
|
|
|
|
/* not ours */
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* disable intr if needed */
|
|
|
|
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_disable_all_intrs(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
napi_schedule(&adapter->rx_queue[0].napi);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
|
|
|
|
/* netpoll callback. */
|
|
|
|
static void
|
|
|
|
vmxnet3_netpoll(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2014-03-10 18:55:55 +08:00
|
|
|
switch (adapter->intr.type) {
|
2014-03-13 17:44:34 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
case VMXNET3_IT_MSIX: {
|
|
|
|
int i;
|
2014-03-10 18:55:55 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
|
|
|
|
break;
|
2014-03-13 17:44:34 +08:00
|
|
|
}
|
|
|
|
#endif
|
2014-03-10 18:55:55 +08:00
|
|
|
case VMXNET3_IT_MSI:
|
|
|
|
default:
|
|
|
|
vmxnet3_intr(0, adapter->netdev);
|
|
|
|
break;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
struct vmxnet3_intr *intr = &adapter->intr;
|
|
|
|
int err = 0, i;
|
|
|
|
int vector = 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2009-10-15 11:38:58 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2009-10-13 15:15:51 +08:00
|
|
|
if (adapter->intr.type == VMXNET3_IT_MSIX) {
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
|
|
|
|
sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
|
|
|
|
adapter->netdev->name, vector);
|
|
|
|
err = request_irq(
|
|
|
|
intr->msix_entries[vector].vector,
|
|
|
|
vmxnet3_msix_tx, 0,
|
|
|
|
adapter->tx_queue[i].name,
|
|
|
|
&adapter->tx_queue[i]);
|
|
|
|
} else {
|
|
|
|
sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
|
|
|
|
adapter->netdev->name, vector);
|
|
|
|
}
|
|
|
|
if (err) {
|
|
|
|
dev_err(&adapter->netdev->dev,
|
|
|
|
"Failed to request irq for MSIX, %s, "
|
|
|
|
"error %d\n",
|
|
|
|
adapter->tx_queue[i].name, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle the case where only 1 MSIx was allocated for
|
|
|
|
* all tx queues */
|
|
|
|
if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
|
|
|
|
for (; i < adapter->num_tx_queues; i++)
|
|
|
|
adapter->tx_queue[i].comp_ring.intr_idx
|
|
|
|
= vector;
|
|
|
|
vector++;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
adapter->tx_queue[i].comp_ring.intr_idx
|
|
|
|
= vector++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
|
|
|
|
vector = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
|
|
|
|
sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
|
|
|
|
adapter->netdev->name, vector);
|
|
|
|
else
|
|
|
|
sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
|
|
|
|
adapter->netdev->name, vector);
|
|
|
|
err = request_irq(intr->msix_entries[vector].vector,
|
|
|
|
vmxnet3_msix_rx, 0,
|
|
|
|
adapter->rx_queue[i].name,
|
|
|
|
&(adapter->rx_queue[i]));
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Failed to request irq for MSIX, "
|
|
|
|
"%s, error %d\n",
|
|
|
|
adapter->rx_queue[i].name, err);
|
2010-11-19 18:55:24 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
adapter->rx_queue[i].comp_ring.intr_idx = vector++;
|
|
|
|
}
|
|
|
|
|
|
|
|
sprintf(intr->event_msi_vector_name, "%s-event-%d",
|
|
|
|
adapter->netdev->name, vector);
|
|
|
|
err = request_irq(intr->msix_entries[vector].vector,
|
|
|
|
vmxnet3_msix_event, 0,
|
|
|
|
intr->event_msi_vector_name, adapter->netdev);
|
|
|
|
intr->event_intr_idx = vector;
|
|
|
|
|
|
|
|
} else if (intr->type == VMXNET3_IT_MSI) {
|
|
|
|
adapter->num_rx_queues = 1;
|
2009-10-13 15:15:51 +08:00
|
|
|
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
|
|
|
|
adapter->netdev->name, adapter->netdev);
|
2010-11-19 18:55:24 +08:00
|
|
|
} else {
|
2009-11-16 21:41:33 +08:00
|
|
|
#endif
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_rx_queues = 1;
|
2009-10-13 15:15:51 +08:00
|
|
|
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
|
|
|
|
IRQF_SHARED, adapter->netdev->name,
|
|
|
|
adapter->netdev);
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
#endif
|
|
|
|
intr->num_intrs = vector + 1;
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Failed to request irq (intr type:%d), error %d\n",
|
|
|
|
intr->type, err);
|
2010-11-19 18:55:24 +08:00
|
|
|
} else {
|
|
|
|
/* Number of rx queues will not change after this */
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
|
|
|
|
rq->qid = i;
|
|
|
|
rq->qid2 = i + adapter->num_rx_queues;
|
2016-06-17 01:51:56 +08:00
|
|
|
rq->dataRingQid = i + 2 * adapter->num_rx_queues;
|
2010-11-19 18:55:24 +08:00
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
/* init our intr settings */
|
|
|
|
for (i = 0; i < intr->num_intrs; i++)
|
|
|
|
intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
|
|
|
|
if (adapter->intr.type != VMXNET3_IT_MSIX) {
|
|
|
|
adapter->intr.event_intr_idx = 0;
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
adapter->tx_queue[i].comp_ring.intr_idx = 0;
|
|
|
|
adapter->rx_queue[0].comp_ring.intr_idx = 0;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_info(adapter->netdev,
|
|
|
|
"intr type %u, mode %u, %u vectors allocated\n",
|
|
|
|
intr->type, intr->mask_mode, intr->num_intrs);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
struct vmxnet3_intr *intr = &adapter->intr;
|
|
|
|
BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
switch (intr->type) {
|
2009-10-15 11:38:58 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2009-10-13 15:15:51 +08:00
|
|
|
case VMXNET3_IT_MSIX:
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int i, vector = 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
free_irq(intr->msix_entries[vector++].vector,
|
|
|
|
&(adapter->tx_queue[i]));
|
|
|
|
if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
free_irq(intr->msix_entries[vector++].vector,
|
|
|
|
&(adapter->rx_queue[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
free_irq(intr->msix_entries[vector].vector,
|
|
|
|
adapter->netdev);
|
|
|
|
BUG_ON(vector >= intr->num_intrs);
|
2009-10-13 15:15:51 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-10-15 11:38:58 +08:00
|
|
|
#endif
|
2009-10-13 15:15:51 +08:00
|
|
|
case VMXNET3_IT_MSI:
|
|
|
|
free_irq(adapter->pdev->irq, adapter->netdev);
|
|
|
|
break;
|
|
|
|
case VMXNET3_IT_INTX:
|
|
|
|
free_irq(adapter->pdev->irq, adapter->netdev);
|
|
|
|
break;
|
|
|
|
default:
|
2012-11-08 18:23:03 +08:00
|
|
|
BUG();
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2011-06-23 21:04:39 +08:00
|
|
|
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
|
|
|
|
u16 vid;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-06-23 21:04:39 +08:00
|
|
|
/* allow untagged pkts */
|
|
|
|
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
|
|
|
|
|
|
|
|
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
|
|
|
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-09 08:52:37 +08:00
|
|
|
static int
|
2013-04-19 10:04:28 +08:00
|
|
|
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2011-08-08 07:15:47 +08:00
|
|
|
if (!(netdev->flags & IFF_PROMISC)) {
|
|
|
|
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
|
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
}
|
2011-06-23 21:04:39 +08:00
|
|
|
|
|
|
|
set_bit(vid, adapter->active_vlans);
|
2011-12-09 08:52:37 +08:00
|
|
|
|
|
|
|
return 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-09 08:52:37 +08:00
|
|
|
static int
|
2013-04-19 10:04:28 +08:00
|
|
|
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2011-08-08 07:15:47 +08:00
|
|
|
if (!(netdev->flags & IFF_PROMISC)) {
|
|
|
|
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
|
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
}
|
2011-06-23 21:04:39 +08:00
|
|
|
|
|
|
|
clear_bit(vid, adapter->active_vlans);
|
2011-12-09 08:52:37 +08:00
|
|
|
|
|
|
|
return 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static u8 *
|
|
|
|
vmxnet3_copy_mc(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
u8 *buf = NULL;
|
2010-02-08 12:30:35 +08:00
|
|
|
u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
|
|
|
|
if (sz <= 0xffff) {
|
|
|
|
/* We may be called with BH disabled */
|
|
|
|
buf = kmalloc(sz, GFP_ATOMIC);
|
|
|
|
if (buf) {
|
2010-04-02 05:22:57 +08:00
|
|
|
struct netdev_hw_addr *ha;
|
2010-02-24 07:17:07 +08:00
|
|
|
int i = 0;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-04-02 05:22:57 +08:00
|
|
|
netdev_for_each_mc_addr(ha, netdev)
|
|
|
|
memcpy(buf + i++ * ETH_ALEN, ha->addr,
|
2009-10-13 15:15:51 +08:00
|
|
|
ETH_ALEN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_set_mc(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
struct Vmxnet3_RxFilterConf *rxConf =
|
|
|
|
&adapter->shared->devRead.rxFilterConf;
|
|
|
|
u8 *new_table = NULL;
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_addr_t new_table_pa = 0;
|
2016-10-15 05:01:20 +08:00
|
|
|
bool new_table_pa_valid = false;
|
2009-10-13 15:15:51 +08:00
|
|
|
u32 new_mode = VMXNET3_RXM_UCAST;
|
|
|
|
|
2011-06-23 21:04:39 +08:00
|
|
|
if (netdev->flags & IFF_PROMISC) {
|
|
|
|
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
|
|
|
|
memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
new_mode |= VMXNET3_RXM_PROMISC;
|
2011-06-23 21:04:39 +08:00
|
|
|
} else {
|
|
|
|
vmxnet3_restore_vlan(adapter);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (netdev->flags & IFF_BROADCAST)
|
|
|
|
new_mode |= VMXNET3_RXM_BCAST;
|
|
|
|
|
|
|
|
if (netdev->flags & IFF_ALLMULTI)
|
|
|
|
new_mode |= VMXNET3_RXM_ALL_MULTI;
|
|
|
|
else
|
2010-02-08 12:30:35 +08:00
|
|
|
if (!netdev_mc_empty(netdev)) {
|
2009-10-13 15:15:51 +08:00
|
|
|
new_table = vmxnet3_copy_mc(netdev);
|
|
|
|
if (new_table) {
|
2015-11-14 07:42:10 +08:00
|
|
|
size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
|
|
|
|
|
|
|
|
rxConf->mfTableLen = cpu_to_le16(sz);
|
2013-08-24 00:33:49 +08:00
|
|
|
new_table_pa = dma_map_single(
|
|
|
|
&adapter->pdev->dev,
|
|
|
|
new_table,
|
2015-11-14 07:42:10 +08:00
|
|
|
sz,
|
2013-08-24 00:33:49 +08:00
|
|
|
PCI_DMA_TODEVICE);
|
2016-10-15 05:01:20 +08:00
|
|
|
if (!dma_mapping_error(&adapter->pdev->dev,
|
|
|
|
new_table_pa)) {
|
|
|
|
new_mode |= VMXNET3_RXM_MCAST;
|
|
|
|
new_table_pa_valid = true;
|
|
|
|
rxConf->mfTablePA = cpu_to_le64(
|
|
|
|
new_table_pa);
|
|
|
|
}
|
2014-09-03 04:13:44 +08:00
|
|
|
}
|
2016-10-15 05:01:20 +08:00
|
|
|
if (!new_table_pa_valid) {
|
2014-09-03 04:13:44 +08:00
|
|
|
netdev_info(netdev,
|
|
|
|
"failed to copy mcast list, setting ALL_MULTI\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
new_mode |= VMXNET3_RXM_ALL_MULTI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(new_mode & VMXNET3_RXM_MCAST)) {
|
|
|
|
rxConf->mfTableLen = 0;
|
|
|
|
rxConf->mfTablePA = 0;
|
|
|
|
}
|
|
|
|
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (new_mode != rxConf->rxMode) {
|
2009-11-16 21:41:33 +08:00
|
|
|
rxConf->rxMode = cpu_to_le32(new_mode);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_UPDATE_RX_MODE);
|
2011-06-23 21:04:39 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_UPDATE_MAC_FILTERS);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2016-10-15 05:01:20 +08:00
|
|
|
if (new_table_pa_valid)
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
|
|
|
|
rxConf->mfTableLen, PCI_DMA_TODEVICE);
|
2014-09-03 04:13:44 +08:00
|
|
|
kfree(new_table);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
void
|
|
|
|
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
|
|
|
|
}
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up driver_shared based on settings in adapter.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct Vmxnet3_DriverShared *shared = adapter->shared;
|
|
|
|
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
|
|
|
|
struct Vmxnet3_TxQueueConf *tqc;
|
|
|
|
struct Vmxnet3_RxQueueConf *rqc;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(shared, 0, sizeof(*shared));
|
|
|
|
|
|
|
|
/* driver settings */
|
2009-11-16 21:41:33 +08:00
|
|
|
shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
|
|
|
|
devRead->misc.driverInfo.version = cpu_to_le32(
|
|
|
|
VMXNET3_DRIVER_VERSION_NUM);
|
2009-10-13 15:15:51 +08:00
|
|
|
devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
|
|
|
|
VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
|
|
|
|
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
|
2009-11-16 21:41:33 +08:00
|
|
|
*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
|
|
|
|
*((u32 *)&devRead->misc.driverInfo.gos));
|
|
|
|
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
|
|
|
|
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
|
2009-11-16 21:41:33 +08:00
|
|
|
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* set up feature flags */
|
2011-04-18 21:31:21 +08:00
|
|
|
if (adapter->netdev->features & NETIF_F_RXCSUM)
|
2010-10-22 02:05:32 +08:00
|
|
|
devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-04-18 21:31:21 +08:00
|
|
|
if (adapter->netdev->features & NETIF_F_LRO) {
|
2010-10-22 02:05:32 +08:00
|
|
|
devRead->misc.uptFeatures |= UPT1_F_LRO;
|
2009-11-16 21:41:33 +08:00
|
|
|
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
2013-04-19 10:04:27 +08:00
|
|
|
if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
2010-10-22 02:05:32 +08:00
|
|
|
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
|
|
|
|
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
|
|
|
|
devRead->misc.queueDescLen = cpu_to_le32(
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
|
|
|
|
adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* tx queue settings */
|
2010-11-19 18:55:24 +08:00
|
|
|
devRead->misc.numTxQueues = adapter->num_tx_queues;
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
|
|
|
|
BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
|
|
|
|
tqc = &adapter->tqd_start[i].conf;
|
|
|
|
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
|
|
|
|
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
|
|
|
|
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
|
2013-08-24 00:33:49 +08:00
|
|
|
tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
|
2010-11-19 18:55:24 +08:00
|
|
|
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
|
|
|
|
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
|
2016-06-17 01:51:55 +08:00
|
|
|
tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
|
2010-11-19 18:55:24 +08:00
|
|
|
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
|
|
|
|
tqc->ddLen = cpu_to_le32(
|
|
|
|
sizeof(struct vmxnet3_tx_buf_info) *
|
|
|
|
tqc->txRingSize);
|
|
|
|
tqc->intrIdx = tq->comp_ring.intr_idx;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* rx queue settings */
|
2010-11-19 18:55:24 +08:00
|
|
|
devRead->misc.numRxQueues = adapter->num_rx_queues;
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
|
|
|
|
rqc = &adapter->rqd_start[i].conf;
|
|
|
|
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
|
|
|
|
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
|
|
|
|
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
|
2013-08-24 00:33:49 +08:00
|
|
|
rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
|
2010-11-19 18:55:24 +08:00
|
|
|
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
|
|
|
|
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
|
|
|
|
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
|
|
|
|
rqc->ddLen = cpu_to_le32(
|
|
|
|
sizeof(struct vmxnet3_rx_buf_info) *
|
|
|
|
(rqc->rxRingSize[0] +
|
|
|
|
rqc->rxRingSize[1]));
|
|
|
|
rqc->intrIdx = rq->comp_ring.intr_idx;
|
2016-06-17 01:51:56 +08:00
|
|
|
if (VMXNET3_VERSION_GE_3(adapter)) {
|
|
|
|
rqc->rxDataRingBasePA =
|
|
|
|
cpu_to_le64(rq->data_ring.basePA);
|
|
|
|
rqc->rxDataRingDescSize =
|
|
|
|
cpu_to_le16(rq->data_ring.desc_size);
|
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
|
|
|
|
|
|
|
|
if (adapter->rss) {
|
|
|
|
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
|
2013-01-15 15:28:34 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
devRead->misc.uptFeatures |= UPT1_F_RSS;
|
|
|
|
devRead->misc.numRxQueues = adapter->num_rx_queues;
|
|
|
|
rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
|
|
|
|
UPT1_RSS_HASH_TYPE_IPV4 |
|
|
|
|
UPT1_RSS_HASH_TYPE_TCP_IPV6 |
|
|
|
|
UPT1_RSS_HASH_TYPE_IPV6;
|
|
|
|
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
|
|
|
|
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
|
|
|
|
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
|
2014-11-16 22:23:18 +08:00
|
|
|
netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
|
2013-01-15 15:28:34 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < rssConf->indTableSize; i++)
|
2011-12-15 21:56:49 +08:00
|
|
|
rssConf->indTable[i] = ethtool_rxfh_indir_default(
|
|
|
|
i, adapter->num_rx_queues);
|
2010-11-19 18:55:24 +08:00
|
|
|
|
|
|
|
devRead->rssConfDesc.confVer = 1;
|
2013-08-24 00:33:49 +08:00
|
|
|
devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
|
|
|
|
devRead->rssConfDesc.confPA =
|
|
|
|
cpu_to_le64(adapter->rss_conf_pa);
|
2010-11-19 18:55:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* VMXNET3_RSS */
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* intr settings */
|
|
|
|
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
|
|
|
|
VMXNET3_IMM_AUTO;
|
|
|
|
devRead->intrConf.numIntrs = adapter->intr.num_intrs;
|
|
|
|
for (i = 0; i < adapter->intr.num_intrs; i++)
|
|
|
|
devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
|
|
|
|
|
|
|
|
devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
|
2010-07-16 13:18:47 +08:00
|
|
|
devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* rx filter settings */
|
|
|
|
devRead->rxFilterConf.rxMode = 0;
|
|
|
|
vmxnet3_restore_vlan(adapter);
|
2011-01-14 22:59:31 +08:00
|
|
|
vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/* the rest are already zeroed */
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:51:57 +08:00
|
|
|
static void
|
|
|
|
vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct Vmxnet3_DriverShared *shared = adapter->shared;
|
|
|
|
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!VMXNET3_VERSION_GE_3(adapter))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
|
|
|
cmdInfo->varConf.confVer = 1;
|
|
|
|
cmdInfo->varConf.confLen =
|
|
|
|
cpu_to_le32(sizeof(*adapter->coal_conf));
|
|
|
|
cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
|
|
|
|
|
|
|
|
if (adapter->default_coal_mode) {
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_GET_COALESCE);
|
|
|
|
} else {
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_SET_COALESCE);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
int
|
|
|
|
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int err, i;
|
2009-10-13 15:15:51 +08:00
|
|
|
u32 ret;
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2013-01-15 15:28:29 +08:00
|
|
|
netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
|
2010-11-19 18:55:24 +08:00
|
|
|
" ring sizes %u %u %u\n", adapter->netdev->name,
|
|
|
|
adapter->skb_buf_size, adapter->rx_buf_per_pkt,
|
|
|
|
adapter->tx_queue[0].tx_ring.size,
|
|
|
|
adapter->rx_queue[0].rx_ring[0].size,
|
|
|
|
adapter->rx_queue[0].rx_ring[1].size);
|
|
|
|
|
|
|
|
vmxnet3_tq_init_all(adapter);
|
|
|
|
err = vmxnet3_rq_init_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Failed to init rx queue error %d\n", err);
|
2009-10-13 15:15:51 +08:00
|
|
|
goto rq_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vmxnet3_request_irqs(adapter);
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Failed to setup irq for error %d\n", err);
|
2009-10-13 15:15:51 +08:00
|
|
|
goto irq_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmxnet3_setup_driver_shared(adapter);
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
|
|
|
|
adapter->shared_pa));
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
|
|
|
|
adapter->shared_pa));
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_ACTIVATE_DEV);
|
|
|
|
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (ret != 0) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Failed to activate dev: error %u\n", ret);
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto activate_err;
|
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
|
2016-06-17 01:51:57 +08:00
|
|
|
vmxnet3_init_coalesce(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter,
|
|
|
|
VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
|
|
|
|
adapter->rx_queue[i].rx_ring[0].next2fill);
|
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
|
|
|
|
(i * VMXNET3_REG_ALIGN)),
|
|
|
|
adapter->rx_queue[i].rx_ring[1].next2fill);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* Apply the rx filter settins last. */
|
|
|
|
vmxnet3_set_mc(adapter->netdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check link state when first activating device. It will start the
|
|
|
|
* tx queue if the link is up.
|
|
|
|
*/
|
2010-07-16 05:51:14 +08:00
|
|
|
vmxnet3_check_link(adapter, true);
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
napi_enable(&adapter->rx_queue[i].napi);
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_enable_all_intrs(adapter);
|
|
|
|
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
activate_err:
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
|
|
|
|
vmxnet3_free_irqs(adapter);
|
|
|
|
irq_err:
|
|
|
|
rq_err:
|
|
|
|
/* free up buffers we allocated */
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_rq_cleanup_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int i;
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_QUIESCE_DEV);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_disable_all_intrs(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
napi_disable(&adapter->rx_queue[i].napi);
|
2009-10-13 15:15:51 +08:00
|
|
|
netif_tx_disable(adapter->netdev);
|
|
|
|
adapter->link_speed = 0;
|
|
|
|
netif_carrier_off(adapter->netdev);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_tq_cleanup_all(adapter);
|
|
|
|
vmxnet3_rq_cleanup_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_free_irqs(adapter);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
|
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
tmp = *(u32 *)mac;
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
|
|
|
|
|
|
|
|
tmp = (mac[5] << 8) | mac[4];
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
|
|
|
|
{
|
|
|
|
struct sockaddr *addr = p;
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
|
|
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
|
|
|
|
vmxnet3_write_mac_addr(adapter, addr->sa_data);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* ==================== initialization and cleanup routines ============ */
|
|
|
|
|
|
|
|
static int
|
2018-05-14 20:14:34 +08:00
|
|
|
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
unsigned long mmio_start, mmio_len;
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
|
|
|
|
|
err = pci_enable_device(pdev);
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
|
2009-10-13 15:15:51 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
|
|
|
|
vmxnet3_driver_name);
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Failed to request region for adapter: error %d\n", err);
|
2018-05-14 20:14:34 +08:00
|
|
|
goto err_enable_device;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
mmio_start = pci_resource_start(pdev, 0);
|
|
|
|
mmio_len = pci_resource_len(pdev, 0);
|
|
|
|
adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
|
|
|
|
if (!adapter->hw_addr0) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev, "Failed to map bar0\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ioremap;
|
|
|
|
}
|
|
|
|
|
|
|
|
mmio_start = pci_resource_start(pdev, 1);
|
|
|
|
mmio_len = pci_resource_len(pdev, 1);
|
|
|
|
adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
|
|
|
|
if (!adapter->hw_addr1) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev, "Failed to map bar1\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -EIO;
|
|
|
|
goto err_bar1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_bar1:
|
|
|
|
iounmap(adapter->hw_addr0);
|
|
|
|
err_ioremap:
|
|
|
|
pci_release_selected_regions(pdev, (1 << 2) - 1);
|
2018-05-14 20:14:34 +08:00
|
|
|
err_enable_device:
|
2009-10-13 15:15:51 +08:00
|
|
|
pci_disable_device(pdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
BUG_ON(!adapter->pdev);
|
|
|
|
|
|
|
|
iounmap(adapter->hw_addr0);
|
|
|
|
iounmap(adapter->hw_addr1);
|
|
|
|
pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
|
|
|
|
pci_disable_device(adapter->pdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
size_t sz, i, ring0_size, ring1_size, comp_size;
|
2009-10-13 15:15:51 +08:00
|
|
|
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
|
|
|
|
VMXNET3_MAX_ETH_HDR_SIZE) {
|
|
|
|
adapter->skb_buf_size = adapter->netdev->mtu +
|
|
|
|
VMXNET3_MAX_ETH_HDR_SIZE;
|
|
|
|
if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
|
|
|
|
adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
|
|
|
|
|
|
|
|
adapter->rx_buf_per_pkt = 1;
|
|
|
|
} else {
|
|
|
|
adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
|
|
|
|
sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
|
|
|
|
VMXNET3_MAX_ETH_HDR_SIZE;
|
|
|
|
adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for simplicity, force the ring0 size to be a multiple of
|
|
|
|
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
|
|
|
|
*/
|
|
|
|
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
|
2010-11-19 18:55:24 +08:00
|
|
|
ring0_size = adapter->rx_queue[0].rx_ring[0].size;
|
|
|
|
ring0_size = (ring0_size + sz - 1) / sz * sz;
|
2011-01-14 22:59:25 +08:00
|
|
|
ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
|
2010-11-19 18:55:24 +08:00
|
|
|
sz * sz);
|
|
|
|
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
|
2015-01-07 01:20:15 +08:00
|
|
|
ring1_size = (ring1_size + sz - 1) / sz * sz;
|
|
|
|
ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
|
|
|
|
sz * sz);
|
2010-11-19 18:55:24 +08:00
|
|
|
comp_size = ring0_size + ring1_size;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
2018-02-02 01:29:21 +08:00
|
|
|
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
rq->rx_ring[0].size = ring0_size;
|
|
|
|
rq->rx_ring[1].size = ring1_size;
|
|
|
|
rq->comp_ring.size = comp_size;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
|
2016-06-17 01:51:55 +08:00
|
|
|
u32 rx_ring_size, u32 rx_ring2_size,
|
2016-06-17 01:51:56 +08:00
|
|
|
u16 txdata_desc_size, u16 rxdata_desc_size)
|
2009-10-13 15:15:51 +08:00
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int err = 0, i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
|
|
|
|
tq->tx_ring.size = tx_ring_size;
|
|
|
|
tq->data_ring.size = tx_ring_size;
|
|
|
|
tq->comp_ring.size = tx_ring_size;
|
2016-06-17 01:51:55 +08:00
|
|
|
tq->txdata_desc_size = txdata_desc_size;
|
2010-11-19 18:55:24 +08:00
|
|
|
tq->shared = &adapter->tqd_start[i].ctrl;
|
|
|
|
tq->stopped = true;
|
|
|
|
tq->adapter = adapter;
|
|
|
|
tq->qid = i;
|
|
|
|
err = vmxnet3_tq_create(tq, adapter);
|
|
|
|
/*
|
|
|
|
* Too late to change num_tx_queues. We cannot do away with
|
|
|
|
* lesser number of queues than what we asked for
|
|
|
|
*/
|
|
|
|
if (err)
|
|
|
|
goto queue_err;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
|
|
|
|
adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_adjust_rx_ring_size(adapter);
|
2016-06-17 01:51:56 +08:00
|
|
|
|
|
|
|
adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
|
|
|
|
/* qid and qid2 for rx queues will be assigned later when num
|
|
|
|
* of rx queues is finalized after allocating intrs */
|
|
|
|
rq->shared = &adapter->rqd_start[i].ctrl;
|
|
|
|
rq->adapter = adapter;
|
2016-06-17 01:51:56 +08:00
|
|
|
rq->data_ring.desc_size = rxdata_desc_size;
|
2010-11-19 18:55:24 +08:00
|
|
|
err = vmxnet3_rq_create(rq, adapter);
|
|
|
|
if (err) {
|
|
|
|
if (i == 0) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Could not allocate any rx queues. "
|
|
|
|
"Aborting.\n");
|
2010-11-19 18:55:24 +08:00
|
|
|
goto queue_err;
|
|
|
|
} else {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_info(adapter->netdev,
|
|
|
|
"Number of rx queues changed "
|
|
|
|
"to : %d.\n", i);
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_rx_queues = i;
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-06-17 01:51:56 +08:00
|
|
|
|
|
|
|
if (!adapter->rxdataring_enabled)
|
|
|
|
vmxnet3_rq_destroy_all_rxdataring(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
return err;
|
|
|
|
queue_err:
|
|
|
|
vmxnet3_tq_destroy_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_open(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter;
|
2010-11-19 18:55:24 +08:00
|
|
|
int err, i;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
adapter = netdev_priv(netdev);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
spin_lock_init(&adapter->tx_queue[i].tx_lock);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2016-06-17 01:51:55 +08:00
|
|
|
if (VMXNET3_VERSION_GE_3(adapter)) {
|
|
|
|
unsigned long flags;
|
|
|
|
u16 txdata_desc_size;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
|
|
|
|
txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
|
|
|
|
VMXNET3_REG_CMD);
|
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
|
|
|
|
if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
|
|
|
|
(txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
|
|
|
|
(txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
|
|
|
|
adapter->txdata_desc_size =
|
|
|
|
sizeof(struct Vmxnet3_TxDataDesc);
|
|
|
|
} else {
|
|
|
|
adapter->txdata_desc_size = txdata_desc_size;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vmxnet3_create_queues(adapter,
|
|
|
|
adapter->tx_ring_size,
|
2014-06-13 22:03:21 +08:00
|
|
|
adapter->rx_ring_size,
|
2016-06-17 01:51:55 +08:00
|
|
|
adapter->rx_ring2_size,
|
2016-06-17 01:51:56 +08:00
|
|
|
adapter->txdata_desc_size,
|
|
|
|
adapter->rxdata_desc_size);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (err)
|
|
|
|
goto queue_err;
|
|
|
|
|
|
|
|
err = vmxnet3_activate_dev(adapter);
|
|
|
|
if (err)
|
|
|
|
goto activate_err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
activate_err:
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_rq_destroy_all(adapter);
|
|
|
|
vmxnet3_tq_destroy_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
queue_err:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_close(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset_work may be in the middle of resetting the device, wait for its
|
|
|
|
* completion.
|
|
|
|
*/
|
|
|
|
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
|
2018-05-17 11:46:41 +08:00
|
|
|
usleep_range(1000, 2000);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
vmxnet3_quiesce_dev(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_rq_destroy_all(adapter);
|
|
|
|
vmxnet3_tq_destroy_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
2010-11-19 18:55:24 +08:00
|
|
|
int i;
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
/*
|
|
|
|
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
|
|
|
|
* vmxnet3_close() will deadlock.
|
|
|
|
*/
|
|
|
|
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
|
|
|
|
|
|
|
|
/* we need to enable NAPI, otherwise dev_close will deadlock */
|
2010-11-19 18:55:24 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
napi_enable(&adapter->rx_queue[i].napi);
|
2017-05-13 00:00:01 +08:00
|
|
|
/*
|
|
|
|
* Need to clear the quiesce bit to ensure that vmxnet3_close
|
|
|
|
* can quiesce the device properly
|
|
|
|
*/
|
|
|
|
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
|
2009-10-13 15:15:51 +08:00
|
|
|
dev_close(adapter->netdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
netdev->mtu = new_mtu;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset_work may be in the middle of resetting the device, wait for its
|
|
|
|
* completion.
|
|
|
|
*/
|
|
|
|
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
|
2018-05-17 11:46:41 +08:00
|
|
|
usleep_range(1000, 2000);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (netif_running(netdev)) {
|
|
|
|
vmxnet3_quiesce_dev(adapter);
|
|
|
|
vmxnet3_reset_dev(adapter);
|
|
|
|
|
|
|
|
/* we need to re-create the rx queue based on the new mtu */
|
2010-11-19 18:55:24 +08:00
|
|
|
vmxnet3_rq_destroy_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_adjust_rx_ring_size(adapter);
|
2010-11-19 18:55:24 +08:00
|
|
|
err = vmxnet3_rq_create_all(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(netdev,
|
|
|
|
"failed to re-create rx queues, "
|
|
|
|
" error %d. Closing it.\n", err);
|
2009-10-13 15:15:51 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vmxnet3_activate_dev(adapter);
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(netdev,
|
|
|
|
"failed to re-activate, error %d. "
|
|
|
|
"Closing it\n", err);
|
2009-10-13 15:15:51 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
|
|
|
|
if (err)
|
|
|
|
vmxnet3_force_close(adapter);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
2011-04-18 21:31:21 +08:00
|
|
|
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
|
2013-04-19 10:04:27 +08:00
|
|
|
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
|
2011-06-23 21:04:39 +08:00
|
|
|
NETIF_F_LRO;
|
2011-04-18 21:31:21 +08:00
|
|
|
if (dma64)
|
2011-07-21 01:21:51 +08:00
|
|
|
netdev->hw_features |= NETIF_F_HIGHDMA;
|
2011-06-23 21:04:39 +08:00
|
|
|
netdev->vlan_features = netdev->hw_features &
|
2013-04-19 10:04:27 +08:00
|
|
|
~(NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_RX);
|
|
|
|
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
|
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
|
|
|
|
*(u32 *)mac = tmp;
|
|
|
|
|
|
|
|
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
|
|
|
|
mac[4] = tmp & 0xff;
|
|
|
|
mac[5] = (tmp >> 8) & 0xff;
|
|
|
|
}
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable MSIx vectors.
|
|
|
|
* Returns :
|
2011-03-31 09:57:33 +08:00
|
|
|
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
|
2014-02-18 18:12:02 +08:00
|
|
|
* were enabled.
|
|
|
|
* number of vectors which were enabled otherwise (this number is greater
|
2010-11-19 18:55:24 +08:00
|
|
|
* than VMXNET3_LINUX_MIN_MSIX_VECT)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2014-02-18 18:12:02 +08:00
|
|
|
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
|
2010-11-19 18:55:24 +08:00
|
|
|
{
|
2014-02-18 18:12:03 +08:00
|
|
|
int ret = pci_enable_msix_range(adapter->pdev,
|
|
|
|
adapter->intr.msix_entries, nvec, nvec);
|
2010-11-19 18:55:24 +08:00
|
|
|
|
2014-02-18 18:12:03 +08:00
|
|
|
if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
|
|
|
|
dev_err(&adapter->netdev->dev,
|
|
|
|
"Failed to enable %d MSI-X, trying %d\n",
|
|
|
|
nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
|
|
|
|
|
|
|
|
ret = pci_enable_msix_range(adapter->pdev,
|
|
|
|
adapter->intr.msix_entries,
|
|
|
|
VMXNET3_LINUX_MIN_MSIX_VECT,
|
|
|
|
VMXNET3_LINUX_MIN_MSIX_VECT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&adapter->netdev->dev,
|
|
|
|
"Failed to enable MSI-X, error: %d\n", ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2010-11-19 18:55:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
u32 cfg;
|
vmxnet3: Consistently disable irqs when taking adapter->cmd_lock
Using the vmxnet3 driver produces a lockdep warning because
vmxnet3_set_mc(), which is called with mc->mca_lock held, takes
adapter->cmd_lock. However, there are a couple of places where
adapter->cmd_lock is taken with softirqs enabled, lockdep warns that a
softirq that tries to take mc->mca_lock could happen while
adapter->cmd_lock is held, leading to an AB-BA deadlock.
I'm not sure if this is a real potential deadlock or not, but the
simplest and best fix seems to be simply to make sure we take cmd_lock
with spin_lock_irqsave() everywhere -- the places with plain spin_lock
just look like oversights.
The full enormous lockdep warning is:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.39-rc6+ #1
---------------------------------------------------------
ifconfig/567 just changed the state of lock:
(&(&mc->mca_lock)->rlock){+.-...}, at: [<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
but this lock took another, SOFTIRQ-unsafe lock in the past:
(&(&adapter->cmd_lock)->rlock){+.+...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
4 locks held by ifconfig/567:
#0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8147d547>] rtnl_lock+0x17/0x20
#1: ((inetaddr_chain).rwsem){.+.+.+}, at: [<ffffffff810896cf>] __blocking_notifier_call_chain+0x5f/0xb0
#2: (&idev->mc_ifc_timer){+.-...}, at: [<ffffffff8106f21b>] run_timer_softirq+0xeb/0x3f0
#3: (&ndev->lock){++.-..}, at: [<ffffffff81531dd2>] mld_ifc_timer_expire+0x32/0x280
the shortest dependencies between 2nd lock and 1st lock:
-> (&(&adapter->cmd_lock)->rlock){+.+...} ops: 11 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
SOFTIRQ-ON-W at:
[<ffffffff8109adb7>] __lock_acquire+0x827/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffffa0017590>] __key.42516+0x0/0xffffffffffffda70 [vmxnet3]
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571bb5>] _raw_spin_lock_irqsave+0x55/0xa0
[<ffffffffa000de27>] vmxnet3_set_mc+0x97/0x1a0 [vmxnet3]
[<ffffffff8146ffa0>] __dev_set_rx_mode+0x40/0xb0
[<ffffffff81470040>] dev_set_rx_mode+0x30/0x50
[<ffffffff81470127>] __dev_open+0xc7/0x100
[<ffffffff814703c1>] __dev_change_flags+0xa1/0x180
[<ffffffff81470568>] dev_change_flags+0x28/0x70
[<ffffffff814da960>] devinet_ioctl+0x730/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (_xmit_ETHER){+.....} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffff827fd868>] netdev_addr_lock_key+0x8/0x1e0
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (&(&mc->mca_lock)->rlock){+.-...} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
IN-SOFTIRQ-W at:
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
}
... key at: [<ffffffff82801be2>] __key.40877+0x0/0x8
... acquired at:
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
stack backtrace:
Pid: 567, comm: ifconfig Not tainted 2.6.39-rc6+ #1
Call Trace:
<IRQ> [<ffffffff810996f6>] print_irq_inversion_bug+0x146/0x170
[<ffffffff81099720>] ? print_irq_inversion_bug+0x170/0x170
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109a383>] ? mark_lock+0x1f3/0x400
[<ffffffff8109b497>] ? __lock_acquire+0xf07/0x1e10
[<ffffffff81012255>] ? native_sched_clock+0x15/0x70
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8109759d>] ? lock_release_holdtime+0x3d/0x1a0
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8157170b>] ? _raw_spin_unlock+0x2b/0x40
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff8106f21b>] ? run_timer_softirq+0xeb/0x3f0
[<ffffffff810122b9>] ? sched_clock+0x9/0x10
[<ffffffff81531da0>] ? mld_gq_timer_expire+0x30/0x30
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8109455f>] ? tick_program_event+0x1f/0x30
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
<EOI> [<ffffffff81571f14>] ? retint_restore_args+0x13/0x13
[<ffffffff810974a7>] ? lock_is_held+0x17/0xd0
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff8108a3af>] ? local_clock+0x6f/0x80
[<ffffffff81575898>] ? do_page_fault+0x268/0x560
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff810dfe87>] ? __call_rcu+0xa7/0x190
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff8117737e>] ? fget_light+0x33e/0x430
[<ffffffff81571ef9>] ? retint_swapgs+0x13/0x1b
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: Scott J. Goldman <scottjg@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-06 16:32:53 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* intr settings */
|
vmxnet3: Consistently disable irqs when taking adapter->cmd_lock
Using the vmxnet3 driver produces a lockdep warning because
vmxnet3_set_mc(), which is called with mc->mca_lock held, takes
adapter->cmd_lock. However, there are a couple of places where
adapter->cmd_lock is taken with softirqs enabled, lockdep warns that a
softirq that tries to take mc->mca_lock could happen while
adapter->cmd_lock is held, leading to an AB-BA deadlock.
I'm not sure if this is a real potential deadlock or not, but the
simplest and best fix seems to be simply to make sure we take cmd_lock
with spin_lock_irqsave() everywhere -- the places with plain spin_lock
just look like oversights.
The full enormous lockdep warning is:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.39-rc6+ #1
---------------------------------------------------------
ifconfig/567 just changed the state of lock:
(&(&mc->mca_lock)->rlock){+.-...}, at: [<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
but this lock took another, SOFTIRQ-unsafe lock in the past:
(&(&adapter->cmd_lock)->rlock){+.+...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
4 locks held by ifconfig/567:
#0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8147d547>] rtnl_lock+0x17/0x20
#1: ((inetaddr_chain).rwsem){.+.+.+}, at: [<ffffffff810896cf>] __blocking_notifier_call_chain+0x5f/0xb0
#2: (&idev->mc_ifc_timer){+.-...}, at: [<ffffffff8106f21b>] run_timer_softirq+0xeb/0x3f0
#3: (&ndev->lock){++.-..}, at: [<ffffffff81531dd2>] mld_ifc_timer_expire+0x32/0x280
the shortest dependencies between 2nd lock and 1st lock:
-> (&(&adapter->cmd_lock)->rlock){+.+...} ops: 11 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
SOFTIRQ-ON-W at:
[<ffffffff8109adb7>] __lock_acquire+0x827/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffffa0017590>] __key.42516+0x0/0xffffffffffffda70 [vmxnet3]
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571bb5>] _raw_spin_lock_irqsave+0x55/0xa0
[<ffffffffa000de27>] vmxnet3_set_mc+0x97/0x1a0 [vmxnet3]
[<ffffffff8146ffa0>] __dev_set_rx_mode+0x40/0xb0
[<ffffffff81470040>] dev_set_rx_mode+0x30/0x50
[<ffffffff81470127>] __dev_open+0xc7/0x100
[<ffffffff814703c1>] __dev_change_flags+0xa1/0x180
[<ffffffff81470568>] dev_change_flags+0x28/0x70
[<ffffffff814da960>] devinet_ioctl+0x730/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (_xmit_ETHER){+.....} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffff827fd868>] netdev_addr_lock_key+0x8/0x1e0
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (&(&mc->mca_lock)->rlock){+.-...} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
IN-SOFTIRQ-W at:
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
}
... key at: [<ffffffff82801be2>] __key.40877+0x0/0x8
... acquired at:
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
stack backtrace:
Pid: 567, comm: ifconfig Not tainted 2.6.39-rc6+ #1
Call Trace:
<IRQ> [<ffffffff810996f6>] print_irq_inversion_bug+0x146/0x170
[<ffffffff81099720>] ? print_irq_inversion_bug+0x170/0x170
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109a383>] ? mark_lock+0x1f3/0x400
[<ffffffff8109b497>] ? __lock_acquire+0xf07/0x1e10
[<ffffffff81012255>] ? native_sched_clock+0x15/0x70
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8109759d>] ? lock_release_holdtime+0x3d/0x1a0
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8157170b>] ? _raw_spin_unlock+0x2b/0x40
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff8106f21b>] ? run_timer_softirq+0xeb/0x3f0
[<ffffffff810122b9>] ? sched_clock+0x9/0x10
[<ffffffff81531da0>] ? mld_gq_timer_expire+0x30/0x30
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8109455f>] ? tick_program_event+0x1f/0x30
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
<EOI> [<ffffffff81571f14>] ? retint_restore_args+0x13/0x13
[<ffffffff810974a7>] ? lock_is_held+0x17/0xd0
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff8108a3af>] ? local_clock+0x6f/0x80
[<ffffffff81575898>] ? do_page_fault+0x268/0x560
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff810dfe87>] ? __call_rcu+0xa7/0x190
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff8117737e>] ? fget_light+0x33e/0x430
[<ffffffff81571ef9>] ? retint_swapgs+0x13/0x1b
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: Scott J. Goldman <scottjg@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-06 16:32:53 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_GET_CONF_INTR);
|
|
|
|
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
|
vmxnet3: Consistently disable irqs when taking adapter->cmd_lock
Using the vmxnet3 driver produces a lockdep warning because
vmxnet3_set_mc(), which is called with mc->mca_lock held, takes
adapter->cmd_lock. However, there are a couple of places where
adapter->cmd_lock is taken with softirqs enabled, lockdep warns that a
softirq that tries to take mc->mca_lock could happen while
adapter->cmd_lock is held, leading to an AB-BA deadlock.
I'm not sure if this is a real potential deadlock or not, but the
simplest and best fix seems to be simply to make sure we take cmd_lock
with spin_lock_irqsave() everywhere -- the places with plain spin_lock
just look like oversights.
The full enormous lockdep warning is:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.39-rc6+ #1
---------------------------------------------------------
ifconfig/567 just changed the state of lock:
(&(&mc->mca_lock)->rlock){+.-...}, at: [<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
but this lock took another, SOFTIRQ-unsafe lock in the past:
(&(&adapter->cmd_lock)->rlock){+.+...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
4 locks held by ifconfig/567:
#0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8147d547>] rtnl_lock+0x17/0x20
#1: ((inetaddr_chain).rwsem){.+.+.+}, at: [<ffffffff810896cf>] __blocking_notifier_call_chain+0x5f/0xb0
#2: (&idev->mc_ifc_timer){+.-...}, at: [<ffffffff8106f21b>] run_timer_softirq+0xeb/0x3f0
#3: (&ndev->lock){++.-..}, at: [<ffffffff81531dd2>] mld_ifc_timer_expire+0x32/0x280
the shortest dependencies between 2nd lock and 1st lock:
-> (&(&adapter->cmd_lock)->rlock){+.+...} ops: 11 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
SOFTIRQ-ON-W at:
[<ffffffff8109adb7>] __lock_acquire+0x827/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571156>] _raw_spin_lock+0x36/0x70
[<ffffffffa000d212>] vmxnet3_alloc_intr_resources+0x22/0x230 [vmxnet3]
[<ffffffffa0014031>] vmxnet3_probe_device+0x5f6/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffffa0017590>] __key.42516+0x0/0xffffffffffffda70 [vmxnet3]
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81571bb5>] _raw_spin_lock_irqsave+0x55/0xa0
[<ffffffffa000de27>] vmxnet3_set_mc+0x97/0x1a0 [vmxnet3]
[<ffffffff8146ffa0>] __dev_set_rx_mode+0x40/0xb0
[<ffffffff81470040>] dev_set_rx_mode+0x30/0x50
[<ffffffff81470127>] __dev_open+0xc7/0x100
[<ffffffff814703c1>] __dev_change_flags+0xa1/0x180
[<ffffffff81470568>] dev_change_flags+0x28/0x70
[<ffffffff814da960>] devinet_ioctl+0x730/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (_xmit_ETHER){+.....} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
}
... key at: [<ffffffff827fd868>] netdev_addr_lock_key+0x8/0x1e0
... acquired at:
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81475618>] __dev_mc_add+0x38/0x90
[<ffffffff814756a0>] dev_mc_add+0x10/0x20
[<ffffffff81532c9e>] igmp6_group_added+0x10e/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81519f27>] addrconf_notify+0x2f7/0xb10
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff81089586>] raw_notifier_call_chain+0x16/0x20
[<ffffffff814689b7>] call_netdevice_notifiers+0x37/0x70
[<ffffffff8146a944>] register_netdevice+0x244/0x2d0
[<ffffffff8146aa0f>] register_netdev+0x3f/0x60
[<ffffffffa001419b>] vmxnet3_probe_device+0x760/0x15c5 [vmxnet3]
[<ffffffff812df67f>] local_pci_probe+0x5f/0xd0
[<ffffffff812dfde9>] pci_device_probe+0x119/0x120
[<ffffffff81373df6>] driver_probe_device+0x96/0x1c0
[<ffffffff81373fcb>] __driver_attach+0xab/0xb0
[<ffffffff81372a1e>] bus_for_each_dev+0x5e/0x90
[<ffffffff81373a2e>] driver_attach+0x1e/0x20
[<ffffffff813735b8>] bus_add_driver+0xc8/0x290
[<ffffffff813745b6>] driver_register+0x76/0x140
[<ffffffff812e0046>] __pci_register_driver+0x66/0xe0
[<ffffffffa001b03a>] serio_raw_poll+0x3a/0x60 [serio_raw]
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff810aa76b>] sys_init_module+0xfb/0x250
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
-> (&(&mc->mca_lock)->rlock){+.-...} ops: 6 {
HARDIRQ-ON-W at:
[<ffffffff8109ad86>] __lock_acquire+0x7f6/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
IN-SOFTIRQ-W at:
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
INITIAL USE at:
[<ffffffff8109a9e9>] __lock_acquire+0x459/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81532bd5>] igmp6_group_added+0x45/0x1b0
[<ffffffff81533f2d>] ipv6_dev_mc_inc+0x2cd/0x430
[<ffffffff81515e17>] ipv6_add_dev+0x357/0x450
[<ffffffff81ce0d16>] addrconf_init+0x4e/0x183
[<ffffffff81ce0ba1>] inet6_init+0x191/0x2a6
[<ffffffff81002165>] do_one_initcall+0x45/0x190
[<ffffffff81ca4d3f>] kernel_init+0xe3/0x168
[<ffffffff8157b2e4>] kernel_thread_helper+0x4/0x10
}
... key at: [<ffffffff82801be2>] __key.40877+0x0/0x8
... acquired at:
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
stack backtrace:
Pid: 567, comm: ifconfig Not tainted 2.6.39-rc6+ #1
Call Trace:
<IRQ> [<ffffffff810996f6>] print_irq_inversion_bug+0x146/0x170
[<ffffffff81099720>] ? print_irq_inversion_bug+0x170/0x170
[<ffffffff810997bc>] check_usage_forwards+0x9c/0x110
[<ffffffff8109a32c>] mark_lock+0x19c/0x400
[<ffffffff8109ad5e>] __lock_acquire+0x7ce/0x1e10
[<ffffffff8109a383>] ? mark_lock+0x1f3/0x400
[<ffffffff8109b497>] ? __lock_acquire+0xf07/0x1e10
[<ffffffff81012255>] ? native_sched_clock+0x15/0x70
[<ffffffff8109ca4d>] lock_acquire+0x9d/0x130
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8109759d>] ? lock_release_holdtime+0x3d/0x1a0
[<ffffffff8157124b>] _raw_spin_lock_bh+0x3b/0x70
[<ffffffff81531e9f>] ? mld_ifc_timer_expire+0xff/0x280
[<ffffffff8157170b>] ? _raw_spin_unlock+0x2b/0x40
[<ffffffff81531e9f>] mld_ifc_timer_expire+0xff/0x280
[<ffffffff8106f2a9>] run_timer_softirq+0x179/0x3f0
[<ffffffff8106f21b>] ? run_timer_softirq+0xeb/0x3f0
[<ffffffff810122b9>] ? sched_clock+0x9/0x10
[<ffffffff81531da0>] ? mld_gq_timer_expire+0x30/0x30
[<ffffffff810666d0>] __do_softirq+0xc0/0x210
[<ffffffff8109455f>] ? tick_program_event+0x1f/0x30
[<ffffffff8157b3dc>] call_softirq+0x1c/0x30
[<ffffffff8100d42d>] do_softirq+0xad/0xe0
[<ffffffff81066afe>] irq_exit+0x9e/0xb0
[<ffffffff8157bd40>] smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8157ab93>] apic_timer_interrupt+0x13/0x20
<EOI> [<ffffffff81571f14>] ? retint_restore_args+0x13/0x13
[<ffffffff810974a7>] ? lock_is_held+0x17/0xd0
[<ffffffff8149d857>] rt_do_flush+0x87/0x2a0
[<ffffffff814a16b6>] rt_cache_flush+0x46/0x60
[<ffffffff814e36e0>] fib_disable_ip+0x40/0x60
[<ffffffff814e5447>] fib_inetaddr_event+0xd7/0xe0
[<ffffffff81575c1c>] notifier_call_chain+0x8c/0xc0
[<ffffffff810896e8>] __blocking_notifier_call_chain+0x78/0xb0
[<ffffffff81089736>] blocking_notifier_call_chain+0x16/0x20
[<ffffffff814d8021>] __inet_del_ifa+0xf1/0x2e0
[<ffffffff814d8223>] inet_del_ifa+0x13/0x20
[<ffffffff814da731>] devinet_ioctl+0x501/0x800
[<ffffffff8108a3af>] ? local_clock+0x6f/0x80
[<ffffffff81575898>] ? do_page_fault+0x268/0x560
[<ffffffff814db508>] inet_ioctl+0x88/0xa0
[<ffffffff814541f0>] sock_do_ioctl+0x30/0x70
[<ffffffff814542a9>] sock_ioctl+0x79/0x2f0
[<ffffffff810dfe87>] ? __call_rcu+0xa7/0x190
[<ffffffff81188798>] do_vfs_ioctl+0x98/0x570
[<ffffffff8117737e>] ? fget_light+0x33e/0x430
[<ffffffff81571ef9>] ? retint_swapgs+0x13/0x1b
[<ffffffff81188d01>] sys_ioctl+0x91/0xa0
[<ffffffff8157a142>] system_call_fastpath+0x16/0x1b
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: Scott J. Goldman <scottjg@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-06 16:32:53 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
adapter->intr.type = cfg & 0x3;
|
|
|
|
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
|
|
|
|
|
|
|
|
if (adapter->intr.type == VMXNET3_IT_AUTO) {
|
2010-07-15 23:21:27 +08:00
|
|
|
adapter->intr.type = VMXNET3_IT_MSIX;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2009-10-15 11:38:58 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2010-07-15 23:21:27 +08:00
|
|
|
if (adapter->intr.type == VMXNET3_IT_MSIX) {
|
2014-02-18 18:12:02 +08:00
|
|
|
int i, nvec;
|
|
|
|
|
|
|
|
nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
|
|
|
|
1 : adapter->num_tx_queues;
|
|
|
|
nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
|
|
|
|
0 : adapter->num_rx_queues;
|
|
|
|
nvec += 1; /* for link event */
|
|
|
|
nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
|
|
|
|
nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
|
|
|
|
|
|
|
|
for (i = 0; i < nvec; i++)
|
|
|
|
adapter->intr.msix_entries[i].entry = i;
|
|
|
|
|
|
|
|
nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
|
|
|
|
if (nvec < 0)
|
|
|
|
goto msix_err;
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
/* If we cannot allocate one MSIx vector per queue
|
|
|
|
* then limit the number of rx queues to 1
|
|
|
|
*/
|
2014-02-18 18:12:02 +08:00
|
|
|
if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
|
2010-11-19 18:55:24 +08:00
|
|
|
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|
2011-01-14 23:00:03 +08:00
|
|
|
|| adapter->num_rx_queues != 1) {
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->share_intr = VMXNET3_INTR_TXSHARE;
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev,
|
|
|
|
"Number of rx queues : 1\n");
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_rx_queues = 1;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
|
2014-02-18 18:12:02 +08:00
|
|
|
adapter->intr.num_intrs = nvec;
|
|
|
|
return;
|
|
|
|
|
|
|
|
msix_err:
|
2010-11-19 18:55:24 +08:00
|
|
|
/* If we cannot allocate MSIx vectors use only one rx queue */
|
2013-01-15 15:28:28 +08:00
|
|
|
dev_info(&adapter->pdev->dev,
|
|
|
|
"Failed to enable MSI-X, error %d. "
|
2014-02-18 18:12:02 +08:00
|
|
|
"Limiting #rx queues to 1, try MSI.\n", nvec);
|
2010-11-19 18:55:24 +08:00
|
|
|
|
2010-07-15 23:21:27 +08:00
|
|
|
adapter->intr.type = VMXNET3_IT_MSI;
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-07-15 23:21:27 +08:00
|
|
|
if (adapter->intr.type == VMXNET3_IT_MSI) {
|
2014-02-18 18:12:02 +08:00
|
|
|
if (!pci_enable_msi(adapter->pdev)) {
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_rx_queues = 1;
|
2009-10-13 15:15:51 +08:00
|
|
|
adapter->intr.num_intrs = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2010-07-15 23:21:27 +08:00
|
|
|
#endif /* CONFIG_PCI_MSI */
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_rx_queues = 1;
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_info(&adapter->netdev->dev,
|
|
|
|
"Using INTx interrupt, #Rx queues: 1.\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
adapter->intr.type = VMXNET3_IT_INTX;
|
|
|
|
|
|
|
|
/* INT-X related setting */
|
|
|
|
adapter->intr.num_intrs = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
|
|
|
|
{
|
|
|
|
if (adapter->intr.type == VMXNET3_IT_MSIX)
|
|
|
|
pci_disable_msix(adapter->pdev);
|
|
|
|
else if (adapter->intr.type == VMXNET3_IT_MSI)
|
|
|
|
pci_disable_msi(adapter->pdev);
|
|
|
|
else
|
|
|
|
BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_tx_timeout(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
adapter->tx_timeout_count++;
|
|
|
|
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_err(adapter->netdev, "tx hang\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
schedule_work(&adapter->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_reset_work(struct work_struct *data)
|
|
|
|
{
|
|
|
|
struct vmxnet3_adapter *adapter;
|
|
|
|
|
|
|
|
adapter = container_of(data, struct vmxnet3_adapter, work);
|
|
|
|
|
|
|
|
/* if another thread is resetting the device, no need to proceed */
|
|
|
|
if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* if the device is closed, we must leave it alone */
|
2010-07-19 15:02:13 +08:00
|
|
|
rtnl_lock();
|
2009-10-13 15:15:51 +08:00
|
|
|
if (netif_running(adapter->netdev)) {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_notice(adapter->netdev, "resetting\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_quiesce_dev(adapter);
|
|
|
|
vmxnet3_reset_dev(adapter);
|
|
|
|
vmxnet3_activate_dev(adapter);
|
|
|
|
} else {
|
2013-01-15 15:28:30 +08:00
|
|
|
netdev_info(adapter->netdev, "already closed\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
}
|
2010-07-19 15:02:13 +08:00
|
|
|
rtnl_unlock();
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2016-10-03 10:47:50 +08:00
|
|
|
netif_wake_queue(adapter->netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-12-03 22:24:16 +08:00
|
|
|
static int
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_probe_device(struct pci_dev *pdev,
|
|
|
|
const struct pci_device_id *id)
|
|
|
|
{
|
|
|
|
static const struct net_device_ops vmxnet3_netdev_ops = {
|
|
|
|
.ndo_open = vmxnet3_open,
|
|
|
|
.ndo_stop = vmxnet3_close,
|
|
|
|
.ndo_start_xmit = vmxnet3_xmit_frame,
|
|
|
|
.ndo_set_mac_address = vmxnet3_set_mac_addr,
|
|
|
|
.ndo_change_mtu = vmxnet3_change_mtu,
|
2011-04-18 21:31:21 +08:00
|
|
|
.ndo_set_features = vmxnet3_set_features,
|
2011-06-08 22:53:57 +08:00
|
|
|
.ndo_get_stats64 = vmxnet3_get_stats64,
|
2009-10-13 15:15:51 +08:00
|
|
|
.ndo_tx_timeout = vmxnet3_tx_timeout,
|
2011-08-16 14:29:01 +08:00
|
|
|
.ndo_set_rx_mode = vmxnet3_set_mc,
|
2009-10-13 15:15:51 +08:00
|
|
|
.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = vmxnet3_netpoll,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
int err;
|
2018-05-14 20:14:34 +08:00
|
|
|
bool dma64;
|
2009-10-13 15:15:51 +08:00
|
|
|
u32 ver;
|
|
|
|
struct net_device *netdev;
|
|
|
|
struct vmxnet3_adapter *adapter;
|
|
|
|
u8 mac[ETH_ALEN];
|
2010-11-19 18:55:24 +08:00
|
|
|
int size;
|
|
|
|
int num_tx_queues;
|
|
|
|
int num_rx_queues;
|
|
|
|
|
2011-05-10 14:13:56 +08:00
|
|
|
if (!pci_msi_enabled())
|
|
|
|
enable_mq = 0;
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
if (enable_mq)
|
|
|
|
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
|
|
|
|
(int)num_online_cpus());
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
num_rx_queues = 1;
|
2011-07-07 15:25:52 +08:00
|
|
|
num_rx_queues = rounddown_pow_of_two(num_rx_queues);
|
2010-11-19 18:55:24 +08:00
|
|
|
|
|
|
|
if (enable_mq)
|
|
|
|
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
|
|
|
|
(int)num_online_cpus());
|
|
|
|
else
|
|
|
|
num_tx_queues = 1;
|
|
|
|
|
2011-07-07 15:25:52 +08:00
|
|
|
num_tx_queues = rounddown_pow_of_two(num_tx_queues);
|
2010-11-19 18:55:24 +08:00
|
|
|
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
|
|
|
|
max(num_tx_queues, num_rx_queues));
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_info(&pdev->dev,
|
|
|
|
"# of Tx queues : %d, # of Rx queues : %d\n",
|
|
|
|
num_tx_queues, num_rx_queues);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2012-01-29 21:47:52 +08:00
|
|
|
if (!netdev)
|
2009-10-13 15:15:51 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, netdev);
|
|
|
|
adapter = netdev_priv(netdev);
|
|
|
|
adapter->netdev = netdev;
|
|
|
|
adapter->pdev = pdev;
|
|
|
|
|
2014-06-13 22:03:21 +08:00
|
|
|
adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
|
|
|
|
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
|
2015-01-07 01:20:15 +08:00
|
|
|
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
|
2014-06-13 22:03:21 +08:00
|
|
|
|
2018-05-14 20:14:34 +08:00
|
|
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
|
|
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"pci_set_consistent_dma_mask failed\n");
|
|
|
|
err = -EIO;
|
|
|
|
goto err_set_mask;
|
|
|
|
}
|
|
|
|
dma64 = true;
|
|
|
|
} else {
|
|
|
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"pci_set_dma_mask failed\n");
|
|
|
|
err = -EIO;
|
|
|
|
goto err_set_mask;
|
|
|
|
}
|
|
|
|
dma64 = false;
|
|
|
|
}
|
|
|
|
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_init(&adapter->cmd_lock);
|
2013-08-24 00:33:49 +08:00
|
|
|
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
|
|
|
|
sizeof(struct vmxnet3_adapter),
|
|
|
|
PCI_DMA_TODEVICE);
|
2015-11-28 06:29:30 +08:00
|
|
|
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
|
|
|
|
dev_err(&pdev->dev, "Failed to map dma\n");
|
|
|
|
err = -EFAULT;
|
2018-05-14 20:14:34 +08:00
|
|
|
goto err_set_mask;
|
2015-11-28 06:29:30 +08:00
|
|
|
}
|
2013-08-24 00:33:49 +08:00
|
|
|
adapter->shared = dma_alloc_coherent(
|
|
|
|
&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_DriverShared),
|
|
|
|
&adapter->shared_pa, GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (!adapter->shared) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev, "Failed to allocate memory\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_shared;
|
|
|
|
}
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->num_rx_queues = num_rx_queues;
|
|
|
|
adapter->num_tx_queues = num_tx_queues;
|
2013-03-06 20:04:53 +08:00
|
|
|
adapter->rx_buf_per_pkt = 1;
|
2010-11-19 18:55:24 +08:00
|
|
|
|
|
|
|
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
|
|
|
|
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
|
2013-08-24 00:33:49 +08:00
|
|
|
adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
|
|
|
|
&adapter->queue_desc_pa,
|
|
|
|
GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
if (!adapter->tqd_start) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev, "Failed to allocate memory\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_queue_desc;
|
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
|
2012-11-13 21:53:28 +08:00
|
|
|
adapter->num_tx_queues);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_PMConf),
|
|
|
|
&adapter->pm_conf_pa,
|
|
|
|
GFP_KERNEL);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (adapter->pm_conf == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_pm;
|
|
|
|
}
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
|
2013-08-24 00:33:49 +08:00
|
|
|
adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct UPT1_RSSConf),
|
|
|
|
&adapter->rss_conf_pa,
|
|
|
|
GFP_KERNEL);
|
2010-11-19 18:55:24 +08:00
|
|
|
if (adapter->rss_conf == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_rss;
|
|
|
|
}
|
|
|
|
#endif /* VMXNET3_RSS */
|
|
|
|
|
2018-05-14 20:14:34 +08:00
|
|
|
err = vmxnet3_alloc_pci_resources(adapter);
|
2009-10-13 15:15:51 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto err_alloc_pci;
|
|
|
|
|
|
|
|
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
|
2016-06-17 01:51:59 +08:00
|
|
|
if (ver & (1 << VMXNET3_REV_3)) {
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter,
|
|
|
|
VMXNET3_REG_VRRS,
|
|
|
|
1 << VMXNET3_REV_3);
|
|
|
|
adapter->version = VMXNET3_REV_3 + 1;
|
|
|
|
} else if (ver & (1 << VMXNET3_REV_2)) {
|
2016-06-17 01:51:53 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter,
|
|
|
|
VMXNET3_REG_VRRS,
|
|
|
|
1 << VMXNET3_REV_2);
|
|
|
|
adapter->version = VMXNET3_REV_2 + 1;
|
|
|
|
} else if (ver & (1 << VMXNET3_REV_1)) {
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter,
|
|
|
|
VMXNET3_REG_VRRS,
|
|
|
|
1 << VMXNET3_REV_1);
|
|
|
|
adapter->version = VMXNET3_REV_1 + 1;
|
2009-10-13 15:15:51 +08:00
|
|
|
} else {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Incompatible h/w version (0x%x) for adapter\n", ver);
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -EBUSY;
|
|
|
|
goto err_ver;
|
|
|
|
}
|
2015-06-20 04:38:29 +08:00
|
|
|
dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
|
|
|
|
if (ver & 1) {
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
|
|
|
|
} else {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Incompatible upt version (0x%x) for adapter\n", ver);
|
2009-10-13 15:15:51 +08:00
|
|
|
err = -EBUSY;
|
|
|
|
goto err_ver;
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:51:57 +08:00
|
|
|
if (VMXNET3_VERSION_GE_3(adapter)) {
|
|
|
|
adapter->coal_conf =
|
|
|
|
dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_CoalesceScheme)
|
|
|
|
,
|
|
|
|
&adapter->coal_conf_pa,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!adapter->coal_conf) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_ver;
|
|
|
|
}
|
|
|
|
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
|
|
|
|
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
|
|
|
|
adapter->default_coal_mode = true;
|
|
|
|
}
|
|
|
|
|
2011-07-21 00:01:11 +08:00
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_declare_features(adapter, dma64);
|
|
|
|
|
2016-06-17 01:51:56 +08:00
|
|
|
adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
|
|
|
|
VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
|
|
|
|
|
2013-01-15 15:28:33 +08:00
|
|
|
if (adapter->num_tx_queues == adapter->num_rx_queues)
|
|
|
|
adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
|
|
|
|
else
|
2010-11-19 18:55:24 +08:00
|
|
|
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_alloc_intr_resources(adapter);
|
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
if (adapter->num_rx_queues > 1 &&
|
|
|
|
adapter->intr.type == VMXNET3_IT_MSIX) {
|
|
|
|
adapter->rss = true;
|
2013-01-15 15:28:35 +08:00
|
|
|
netdev->hw_features |= NETIF_F_RXHASH;
|
|
|
|
netdev->features |= NETIF_F_RXHASH;
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_dbg(&pdev->dev, "RSS is enabled.\n");
|
2010-11-19 18:55:24 +08:00
|
|
|
} else {
|
|
|
|
adapter->rss = false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_read_mac_addr(adapter, mac);
|
|
|
|
memcpy(netdev->dev_addr, mac, netdev->addr_len);
|
|
|
|
|
|
|
|
netdev->netdev_ops = &vmxnet3_netdev_ops;
|
|
|
|
vmxnet3_set_ethtool_ops(netdev);
|
2010-11-19 18:55:24 +08:00
|
|
|
netdev->watchdog_timeo = 5 * HZ;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2016-10-21 01:55:21 +08:00
|
|
|
/* MTU range: 60 - 9000 */
|
|
|
|
netdev->min_mtu = VMXNET3_MIN_MTU;
|
|
|
|
netdev->max_mtu = VMXNET3_MAX_MTU;
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
INIT_WORK(&adapter->work, vmxnet3_reset_work);
|
2012-08-15 00:13:36 +08:00
|
|
|
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-11-19 18:55:24 +08:00
|
|
|
if (adapter->intr.type == VMXNET3_IT_MSIX) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
netif_napi_add(adapter->netdev,
|
|
|
|
&adapter->rx_queue[i].napi,
|
|
|
|
vmxnet3_poll_rx_only, 64);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
|
|
|
|
vmxnet3_poll, 64);
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
|
|
|
|
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
|
|
|
|
|
2013-01-30 05:15:45 +08:00
|
|
|
netif_carrier_off(netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
err = register_netdev(netdev);
|
|
|
|
|
|
|
|
if (err) {
|
2013-01-15 15:28:30 +08:00
|
|
|
dev_err(&pdev->dev, "Failed to register adapter\n");
|
2009-10-13 15:15:51 +08:00
|
|
|
goto err_register;
|
|
|
|
}
|
|
|
|
|
2010-07-16 05:51:14 +08:00
|
|
|
vmxnet3_check_link(adapter, false);
|
2009-10-13 15:15:51 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register:
|
2016-06-17 01:51:57 +08:00
|
|
|
if (VMXNET3_VERSION_GE_3(adapter)) {
|
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_CoalesceScheme),
|
|
|
|
adapter->coal_conf, adapter->coal_conf_pa);
|
|
|
|
}
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_free_intr_resources(adapter);
|
|
|
|
err_ver:
|
|
|
|
vmxnet3_free_pci_resources(adapter);
|
|
|
|
err_alloc_pci:
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef VMXNET3_RSS
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
|
|
|
|
adapter->rss_conf, adapter->rss_conf_pa);
|
2010-11-19 18:55:24 +08:00
|
|
|
err_alloc_rss:
|
|
|
|
#endif
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
|
|
|
|
adapter->pm_conf, adapter->pm_conf_pa);
|
2009-10-13 15:15:51 +08:00
|
|
|
err_alloc_pm:
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
|
|
|
|
adapter->queue_desc_pa);
|
2009-10-13 15:15:51 +08:00
|
|
|
err_alloc_queue_desc:
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_DriverShared),
|
|
|
|
adapter->shared, adapter->shared_pa);
|
2009-10-13 15:15:51 +08:00
|
|
|
err_alloc_shared:
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
|
|
|
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
2018-05-14 20:14:34 +08:00
|
|
|
err_set_mask:
|
2009-10-13 15:15:51 +08:00
|
|
|
free_netdev(netdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-12-03 22:24:16 +08:00
|
|
|
static void
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_remove_device(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
2010-11-19 18:55:24 +08:00
|
|
|
int size = 0;
|
|
|
|
int num_rx_queues;
|
|
|
|
|
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
if (enable_mq)
|
|
|
|
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
|
|
|
|
(int)num_online_cpus());
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
num_rx_queues = 1;
|
2011-07-07 15:25:52 +08:00
|
|
|
num_rx_queues = rounddown_pow_of_two(num_rx_queues);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2010-12-12 23:45:14 +08:00
|
|
|
cancel_work_sync(&adapter->work);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
unregister_netdev(netdev);
|
|
|
|
|
|
|
|
vmxnet3_free_intr_resources(adapter);
|
|
|
|
vmxnet3_free_pci_resources(adapter);
|
2016-06-17 01:51:57 +08:00
|
|
|
if (VMXNET3_VERSION_GE_3(adapter)) {
|
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_CoalesceScheme),
|
|
|
|
adapter->coal_conf, adapter->coal_conf_pa);
|
|
|
|
}
|
2010-11-19 18:55:24 +08:00
|
|
|
#ifdef VMXNET3_RSS
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
|
|
|
|
adapter->rss_conf, adapter->rss_conf_pa);
|
2010-11-19 18:55:24 +08:00
|
|
|
#endif
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
|
|
|
|
adapter->pm_conf, adapter->pm_conf_pa);
|
2010-11-19 18:55:24 +08:00
|
|
|
|
|
|
|
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
|
|
|
|
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
|
2013-08-24 00:33:49 +08:00
|
|
|
dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
|
|
|
|
adapter->queue_desc_pa);
|
|
|
|
dma_free_coherent(&adapter->pdev->dev,
|
|
|
|
sizeof(struct Vmxnet3_DriverShared),
|
|
|
|
adapter->shared, adapter->shared_pa);
|
|
|
|
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
|
|
|
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
2009-10-13 15:15:51 +08:00
|
|
|
free_netdev(netdev);
|
|
|
|
}
|
|
|
|
|
2015-06-20 04:36:02 +08:00
|
|
|
static void vmxnet3_shutdown_device(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Reset_work may be in the middle of resetting the device, wait for its
|
|
|
|
* completion.
|
|
|
|
*/
|
|
|
|
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
|
2018-05-17 11:46:41 +08:00
|
|
|
usleep_range(1000, 2000);
|
2015-06-20 04:36:02 +08:00
|
|
|
|
|
|
|
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
|
|
|
|
&adapter->state)) {
|
|
|
|
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_QUIESCE_DEV);
|
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
vmxnet3_disable_all_intrs(adapter);
|
|
|
|
|
|
|
|
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
|
|
|
|
}
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_suspend(struct device *device)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pci_dev(device);
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct Vmxnet3_PMConf *pmConf;
|
|
|
|
struct ethhdr *ehdr;
|
|
|
|
struct arphdr *ahdr;
|
|
|
|
u8 *arpreq;
|
|
|
|
struct in_device *in_dev;
|
|
|
|
struct in_ifaddr *ifa;
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
if (!netif_running(netdev))
|
|
|
|
return 0;
|
|
|
|
|
2011-01-14 22:59:52 +08:00
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
napi_disable(&adapter->rx_queue[i].napi);
|
|
|
|
|
2009-10-13 15:15:51 +08:00
|
|
|
vmxnet3_disable_all_intrs(adapter);
|
|
|
|
vmxnet3_free_irqs(adapter);
|
|
|
|
vmxnet3_free_intr_resources(adapter);
|
|
|
|
|
|
|
|
netif_device_detach(netdev);
|
2010-11-19 18:55:24 +08:00
|
|
|
netif_tx_stop_all_queues(netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
/* Create wake-up filters. */
|
|
|
|
pmConf = adapter->pm_conf;
|
|
|
|
memset(pmConf, 0, sizeof(*pmConf));
|
|
|
|
|
|
|
|
if (adapter->wol & WAKE_UCAST) {
|
|
|
|
pmConf->filters[i].patternSize = ETH_ALEN;
|
|
|
|
pmConf->filters[i].maskSize = 1;
|
|
|
|
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
|
|
|
|
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
|
|
|
|
|
2010-10-22 02:05:32 +08:00
|
|
|
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
|
2009-10-13 15:15:51 +08:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (adapter->wol & WAKE_ARP) {
|
|
|
|
in_dev = in_dev_get(netdev);
|
|
|
|
if (!in_dev)
|
|
|
|
goto skip_arp;
|
|
|
|
|
|
|
|
ifa = (struct in_ifaddr *)in_dev->ifa_list;
|
|
|
|
if (!ifa)
|
|
|
|
goto skip_arp;
|
|
|
|
|
|
|
|
pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
|
|
|
|
sizeof(struct arphdr) + /* ARP header */
|
|
|
|
2 * ETH_ALEN + /* 2 Ethernet addresses*/
|
|
|
|
2 * sizeof(u32); /*2 IPv4 addresses */
|
|
|
|
pmConf->filters[i].maskSize =
|
|
|
|
(pmConf->filters[i].patternSize - 1) / 8 + 1;
|
|
|
|
|
|
|
|
/* ETH_P_ARP in Ethernet header. */
|
|
|
|
ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
|
|
|
|
ehdr->h_proto = htons(ETH_P_ARP);
|
|
|
|
|
|
|
|
/* ARPOP_REQUEST in ARP header. */
|
|
|
|
ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
|
|
|
|
ahdr->ar_op = htons(ARPOP_REQUEST);
|
|
|
|
arpreq = (u8 *)(ahdr + 1);
|
|
|
|
|
|
|
|
/* The Unicast IPv4 address in 'tip' field. */
|
|
|
|
arpreq += 2 * ETH_ALEN + sizeof(u32);
|
|
|
|
*(u32 *)arpreq = ifa->ifa_address;
|
|
|
|
|
|
|
|
/* The mask for the relevant bits. */
|
|
|
|
pmConf->filters[i].mask[0] = 0x00;
|
|
|
|
pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
|
|
|
|
pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
|
|
|
|
pmConf->filters[i].mask[3] = 0x00;
|
|
|
|
pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
|
|
|
|
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
|
|
|
|
in_dev_put(in_dev);
|
|
|
|
|
2010-10-22 02:05:32 +08:00
|
|
|
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
|
2009-10-13 15:15:51 +08:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
skip_arp:
|
|
|
|
if (adapter->wol & WAKE_MAGIC)
|
2010-10-22 02:05:32 +08:00
|
|
|
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
pmConf->numFilters = i;
|
|
|
|
|
2009-11-16 21:41:33 +08:00
|
|
|
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
|
|
|
|
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
|
|
|
|
*pmConf));
|
2013-08-24 00:33:49 +08:00
|
|
|
adapter->shared->devRead.pmConfDesc.confPA =
|
|
|
|
cpu_to_le64(adapter->pm_conf_pa);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
|
|
VMXNET3_CMD_UPDATE_PMCFG);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
pci_save_state(pdev);
|
|
|
|
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
|
|
|
|
adapter->wol);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmxnet3_resume(struct device *device)
|
|
|
|
{
|
2015-01-10 07:19:14 +08:00
|
|
|
int err;
|
2011-01-14 22:59:57 +08:00
|
|
|
unsigned long flags;
|
2009-10-13 15:15:51 +08:00
|
|
|
struct pci_dev *pdev = to_pci_dev(device);
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
|
|
if (!netif_running(netdev))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pci_set_power_state(pdev, PCI_D0);
|
|
|
|
pci_restore_state(pdev);
|
|
|
|
err = pci_enable_device_mem(pdev);
|
|
|
|
if (err != 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
pci_enable_wake(pdev, PCI_D0, 0);
|
|
|
|
|
2015-01-10 07:19:14 +08:00
|
|
|
vmxnet3_alloc_intr_resources(adapter);
|
|
|
|
|
|
|
|
/* During hibernate and suspend, device has to be reinitialized as the
|
|
|
|
* device state need not be preserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Need not check adapter state as other reset tasks cannot run during
|
|
|
|
* device resume.
|
|
|
|
*/
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
2015-01-10 07:19:14 +08:00
|
|
|
VMXNET3_CMD_QUIESCE_DEV);
|
2011-01-14 22:59:57 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
2015-01-10 07:19:14 +08:00
|
|
|
vmxnet3_tq_cleanup_all(adapter);
|
|
|
|
vmxnet3_rq_cleanup_all(adapter);
|
|
|
|
|
|
|
|
vmxnet3_reset_dev(adapter);
|
|
|
|
err = vmxnet3_activate_dev(adapter);
|
|
|
|
if (err != 0) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"failed to re-activate on resume, error: %d", err);
|
|
|
|
vmxnet3_force_close(adapter);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
netif_device_attach(netdev);
|
2009-10-13 15:15:51 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-15 10:00:08 +08:00
|
|
|
static const struct dev_pm_ops vmxnet3_pm_ops = {
|
2009-10-13 15:15:51 +08:00
|
|
|
.suspend = vmxnet3_suspend,
|
|
|
|
.resume = vmxnet3_resume,
|
2015-01-10 07:19:14 +08:00
|
|
|
.freeze = vmxnet3_suspend,
|
|
|
|
.restore = vmxnet3_resume,
|
2009-10-13 15:15:51 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static struct pci_driver vmxnet3_driver = {
|
|
|
|
.name = vmxnet3_driver_name,
|
|
|
|
.id_table = vmxnet3_pciid_table,
|
|
|
|
.probe = vmxnet3_probe_device,
|
2012-12-03 22:24:16 +08:00
|
|
|
.remove = vmxnet3_remove_device,
|
2015-06-20 04:36:02 +08:00
|
|
|
.shutdown = vmxnet3_shutdown_device,
|
2009-10-13 15:15:51 +08:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
.driver.pm = &vmxnet3_pm_ops,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static int __init
|
|
|
|
vmxnet3_init_module(void)
|
|
|
|
{
|
2013-01-15 15:28:30 +08:00
|
|
|
pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
|
2009-10-13 15:15:51 +08:00
|
|
|
VMXNET3_DRIVER_VERSION_REPORT);
|
|
|
|
return pci_register_driver(&vmxnet3_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(vmxnet3_init_module);
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmxnet3_exit_module(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&vmxnet3_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_exit(vmxnet3_exit_module);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("VMware, Inc.");
|
|
|
|
MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
|