2006-09-13 23:44:31 +08:00
|
|
|
/*
|
2012-01-27 21:36:01 +08:00
|
|
|
* linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
|
2006-09-13 23:44:31 +08:00
|
|
|
*
|
|
|
|
* eHEA ethernet device driver for IBM eServer System p
|
|
|
|
*
|
|
|
|
* (C) Copyright IBM Corp. 2006
|
|
|
|
*
|
|
|
|
* Authors:
|
2008-02-01 10:20:49 +08:00
|
|
|
* Christoph Raisch <raisch@de.ibm.com>
|
|
|
|
* Jan-Bernd Themann <themann@de.ibm.com>
|
|
|
|
* Thomas Klein <tklein@de.ibm.com>
|
2006-09-13 23:44:31 +08:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
|
|
|
* any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
*/
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
ehea: Introduce the use of the managed version of kzalloc
This patch moves data allocated using kzalloc to managed data allocated
using devm_kzalloc and cleans now unnecessary kfrees in probe and remove
functions. Also, linux/device.h is added to make sure the devm_*()
routine declarations are unambiguously available.
The following Coccinelle semantic patch was used for making the change:
@platform@
identifier p, probefn, removefn;
@@
struct platform_driver p = {
.probe = probefn,
.remove = removefn,
};
@prb@
identifier platform.probefn, pdev;
expression e, e1, e2;
@@
probefn(struct platform_device *pdev, ...) {
<+...
- e = kzalloc(e1, e2)
+ e = devm_kzalloc(&pdev->dev, e1, e2)
...
?-kfree(e);
...+>
}
@rem depends on prb@
identifier platform.removefn;
expression e;
@@
removefn(...) {
<...
- kfree(e);
...>
}
Signed-off-by: Himangi Saraogi <himangi774@gmail.com>
Compile-Tested-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-28 22:23:09 +08:00
|
|
|
#include <linux/device.h>
|
2006-09-13 23:44:31 +08:00
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <linux/if.h>
|
|
|
|
#include <linux/list.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2006-09-13 23:44:31 +08:00
|
|
|
#include <linux/if_ether.h>
|
2007-10-26 20:37:28 +08:00
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/reboot.h>
|
2008-05-07 20:43:36 +08:00
|
|
|
#include <linux/memory.h>
|
2008-02-13 23:18:33 +08:00
|
|
|
#include <asm/kexec.h>
|
2008-03-29 05:41:26 +08:00
|
|
|
#include <linux/mutex.h>
|
2011-05-21 03:50:29 +08:00
|
|
|
#include <linux/prefetch.h>
|
2007-10-26 20:37:28 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
#include <net/ip.h>
|
|
|
|
|
|
|
|
#include "ehea.h"
|
|
|
|
#include "ehea_qmr.h"
|
|
|
|
#include "ehea_phyp.h"
|
|
|
|
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
|
|
|
|
MODULE_DESCRIPTION("IBM eServer HEA Driver");
|
|
|
|
MODULE_VERSION(DRV_VERSION);
|
|
|
|
|
|
|
|
|
|
|
|
static int msg_level = -1;
|
|
|
|
static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
|
|
|
|
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
|
|
|
|
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
|
|
|
|
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
|
2011-10-14 13:30:59 +08:00
|
|
|
static int use_mcs = 1;
|
2008-02-01 10:20:49 +08:00
|
|
|
static int prop_carrier_state;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
module_param(msg_level, int, 0);
|
|
|
|
module_param(rq1_entries, int, 0);
|
|
|
|
module_param(rq2_entries, int, 0);
|
|
|
|
module_param(rq3_entries, int, 0);
|
|
|
|
module_param(sq_entries, int, 0);
|
2007-09-07 18:30:17 +08:00
|
|
|
module_param(prop_carrier_state, int, 0);
|
2007-03-01 01:34:10 +08:00
|
|
|
module_param(use_mcs, int, 0);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
MODULE_PARM_DESC(msg_level, "msg_level");
|
2007-09-07 18:30:17 +08:00
|
|
|
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
|
|
|
|
"port to stack. 1:yes, 0:no. Default = 0 ");
|
2006-09-13 23:44:31 +08:00
|
|
|
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
|
2013-02-16 04:19:19 +08:00
|
|
|
"[2^x - 1], x = [7..14]. Default = "
|
2006-09-13 23:44:31 +08:00
|
|
|
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
|
|
|
|
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
|
2013-02-16 04:19:19 +08:00
|
|
|
"[2^x - 1], x = [7..14]. Default = "
|
2006-09-13 23:44:31 +08:00
|
|
|
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
|
|
|
|
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
|
2013-02-16 04:19:19 +08:00
|
|
|
"[2^x - 1], x = [7..14]. Default = "
|
2006-09-13 23:44:31 +08:00
|
|
|
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
|
|
|
|
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
|
2013-02-16 04:19:19 +08:00
|
|
|
"[2^x - 1], x = [7..14]. Default = "
|
2006-09-13 23:44:31 +08:00
|
|
|
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
|
2011-10-14 13:30:59 +08:00
|
|
|
MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
|
|
|
|
"Default = 1");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
static int port_name_cnt;
|
2007-07-11 22:32:00 +08:00
|
|
|
static LIST_HEAD(adapter_list);
|
2009-01-06 08:06:02 +08:00
|
|
|
static unsigned long ehea_driver_flags;
|
2008-03-29 05:41:26 +08:00
|
|
|
static DEFINE_MUTEX(dlpar_mem_lock);
|
2012-01-13 16:06:32 +08:00
|
|
|
static struct ehea_fw_handle_array ehea_fw_handles;
|
|
|
|
static struct ehea_bcmc_reg_array ehea_bcmc_regs;
|
2008-02-13 23:18:33 +08:00
|
|
|
|
2007-04-26 17:56:13 +08:00
|
|
|
|
2013-04-21 10:51:08 +08:00
|
|
|
static int ehea_probe_adapter(struct platform_device *dev);
|
2007-04-26 17:56:13 +08:00
|
|
|
|
2012-12-03 22:23:10 +08:00
|
|
|
static int ehea_remove(struct platform_device *dev);
|
2007-04-26 17:56:13 +08:00
|
|
|
|
2015-03-18 02:40:26 +08:00
|
|
|
static const struct of_device_id ehea_module_device_table[] = {
|
2013-09-14 05:52:01 +08:00
|
|
|
{
|
|
|
|
.name = "lhea",
|
|
|
|
.compatible = "IBM,lhea",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.type = "network",
|
|
|
|
.compatible = "IBM,lhea-ethernet",
|
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, ehea_module_device_table);
|
|
|
|
|
2015-03-18 02:40:26 +08:00
|
|
|
static const struct of_device_id ehea_device_table[] = {
|
2007-04-26 17:56:13 +08:00
|
|
|
{
|
|
|
|
.name = "lhea",
|
|
|
|
.compatible = "IBM,lhea",
|
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2013-04-21 10:51:08 +08:00
|
|
|
static struct platform_driver ehea_driver = {
|
2010-04-14 07:13:02 +08:00
|
|
|
.driver = {
|
|
|
|
.name = "ehea",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.of_match_table = ehea_device_table,
|
|
|
|
},
|
2007-04-26 17:56:13 +08:00
|
|
|
.probe = ehea_probe_adapter,
|
|
|
|
.remove = ehea_remove,
|
|
|
|
};
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
void ehea_dump(void *adr, int len, char *msg)
|
|
|
|
{
|
2006-09-13 23:44:31 +08:00
|
|
|
int x;
|
|
|
|
unsigned char *deb = adr;
|
|
|
|
for (x = 0; x < len; x += 16) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
|
|
|
|
msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
|
2006-09-13 23:44:31 +08:00
|
|
|
deb += 16;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static void ehea_schedule_port_reset(struct ehea_port *port)
|
2008-07-03 22:18:51 +08:00
|
|
|
{
|
|
|
|
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
|
|
|
|
schedule_work(&port->reset_task);
|
|
|
|
}
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
static void ehea_update_firmware_handles(void)
|
|
|
|
{
|
|
|
|
struct ehea_fw_handle_entry *arr = NULL;
|
|
|
|
struct ehea_adapter *adapter;
|
|
|
|
int num_adapters = 0;
|
|
|
|
int num_ports = 0;
|
|
|
|
int num_portres = 0;
|
|
|
|
int i = 0;
|
|
|
|
int num_fw_handles, k, l;
|
|
|
|
|
|
|
|
/* Determine number of handles */
|
2009-03-14 04:50:40 +08:00
|
|
|
mutex_lock(&ehea_fw_handles.lock);
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
list_for_each_entry(adapter, &adapter_list, list) {
|
|
|
|
num_adapters++;
|
|
|
|
|
|
|
|
for (k = 0; k < EHEA_MAX_PORTS; k++) {
|
|
|
|
struct ehea_port *port = adapter->port[k];
|
|
|
|
|
|
|
|
if (!port || (port->state != EHEA_PORT_UP))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
num_ports++;
|
2011-10-14 13:31:01 +08:00
|
|
|
num_portres += port->num_def_qps;
|
2008-02-13 23:18:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
|
|
|
|
num_ports * EHEA_NUM_PORT_FW_HANDLES +
|
|
|
|
num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
|
|
|
|
|
|
|
|
if (num_fw_handles) {
|
2010-08-11 15:02:48 +08:00
|
|
|
arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
|
2008-02-13 23:18:33 +08:00
|
|
|
if (!arr)
|
2009-03-14 04:50:40 +08:00
|
|
|
goto out; /* Keep the existing array */
|
2008-02-13 23:18:33 +08:00
|
|
|
} else
|
|
|
|
goto out_update;
|
|
|
|
|
|
|
|
list_for_each_entry(adapter, &adapter_list, list) {
|
2009-03-14 04:50:40 +08:00
|
|
|
if (num_adapters == 0)
|
|
|
|
break;
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
for (k = 0; k < EHEA_MAX_PORTS; k++) {
|
|
|
|
struct ehea_port *port = adapter->port[k];
|
|
|
|
|
2009-12-03 15:58:21 +08:00
|
|
|
if (!port || (port->state != EHEA_PORT_UP) ||
|
|
|
|
(num_ports == 0))
|
2008-02-13 23:18:33 +08:00
|
|
|
continue;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (l = 0; l < port->num_def_qps; l++) {
|
2008-02-13 23:18:33 +08:00
|
|
|
struct ehea_port_res *pr = &port->port_res[l];
|
|
|
|
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = pr->qp->fw_handle;
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = pr->send_cq->fw_handle;
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = pr->recv_cq->fw_handle;
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = pr->eq->fw_handle;
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = pr->send_mr.handle;
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = pr->recv_mr.handle;
|
|
|
|
}
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = port->qp_eq->fw_handle;
|
2009-03-14 04:50:40 +08:00
|
|
|
num_ports--;
|
2008-02-13 23:18:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = adapter->neq->fw_handle;
|
|
|
|
|
|
|
|
if (adapter->mr.handle) {
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i++].fwh = adapter->mr.handle;
|
|
|
|
}
|
2009-03-14 04:50:40 +08:00
|
|
|
num_adapters--;
|
2008-02-13 23:18:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out_update:
|
|
|
|
kfree(ehea_fw_handles.arr);
|
|
|
|
ehea_fw_handles.arr = arr;
|
|
|
|
ehea_fw_handles.num_entries = i;
|
2009-03-14 04:50:40 +08:00
|
|
|
out:
|
|
|
|
mutex_unlock(&ehea_fw_handles.lock);
|
2008-02-13 23:18:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_update_bcmc_registrations(void)
|
|
|
|
{
|
2009-03-14 04:50:40 +08:00
|
|
|
unsigned long flags;
|
2008-02-13 23:18:33 +08:00
|
|
|
struct ehea_bcmc_reg_entry *arr = NULL;
|
|
|
|
struct ehea_adapter *adapter;
|
|
|
|
struct ehea_mc_list *mc_entry;
|
|
|
|
int num_registrations = 0;
|
|
|
|
int i = 0;
|
|
|
|
int k;
|
|
|
|
|
2009-03-14 04:50:40 +08:00
|
|
|
spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
/* Determine number of registrations */
|
|
|
|
list_for_each_entry(adapter, &adapter_list, list)
|
|
|
|
for (k = 0; k < EHEA_MAX_PORTS; k++) {
|
|
|
|
struct ehea_port *port = adapter->port[k];
|
|
|
|
|
|
|
|
if (!port || (port->state != EHEA_PORT_UP))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
num_registrations += 2; /* Broadcast registrations */
|
|
|
|
|
|
|
|
list_for_each_entry(mc_entry, &port->mc_list->list,list)
|
|
|
|
num_registrations += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_registrations) {
|
2010-08-11 15:02:48 +08:00
|
|
|
arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
|
2008-02-13 23:18:33 +08:00
|
|
|
if (!arr)
|
2009-03-14 04:50:40 +08:00
|
|
|
goto out; /* Keep the existing array */
|
2008-02-13 23:18:33 +08:00
|
|
|
} else
|
|
|
|
goto out_update;
|
|
|
|
|
|
|
|
list_for_each_entry(adapter, &adapter_list, list) {
|
|
|
|
for (k = 0; k < EHEA_MAX_PORTS; k++) {
|
|
|
|
struct ehea_port *port = adapter->port[k];
|
|
|
|
|
|
|
|
if (!port || (port->state != EHEA_PORT_UP))
|
|
|
|
continue;
|
|
|
|
|
2009-03-14 04:50:40 +08:00
|
|
|
if (num_registrations == 0)
|
|
|
|
goto out_update;
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i].port_id = port->logical_port_id;
|
|
|
|
arr[i].reg_type = EHEA_BCMC_BROADCAST |
|
|
|
|
EHEA_BCMC_UNTAGGED;
|
|
|
|
arr[i++].macaddr = port->mac_addr;
|
|
|
|
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i].port_id = port->logical_port_id;
|
|
|
|
arr[i].reg_type = EHEA_BCMC_BROADCAST |
|
|
|
|
EHEA_BCMC_VLANID_ALL;
|
|
|
|
arr[i++].macaddr = port->mac_addr;
|
2009-03-14 04:50:40 +08:00
|
|
|
num_registrations -= 2;
|
2008-02-13 23:18:33 +08:00
|
|
|
|
|
|
|
list_for_each_entry(mc_entry,
|
|
|
|
&port->mc_list->list, list) {
|
2009-03-14 04:50:40 +08:00
|
|
|
if (num_registrations == 0)
|
|
|
|
goto out_update;
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i].port_id = port->logical_port_id;
|
2012-04-25 15:32:11 +08:00
|
|
|
arr[i].reg_type = EHEA_BCMC_MULTICAST |
|
2008-02-13 23:18:33 +08:00
|
|
|
EHEA_BCMC_UNTAGGED;
|
2012-04-25 15:32:11 +08:00
|
|
|
if (mc_entry->macaddr == 0)
|
|
|
|
arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
|
2008-02-13 23:18:33 +08:00
|
|
|
arr[i++].macaddr = mc_entry->macaddr;
|
|
|
|
|
|
|
|
arr[i].adh = adapter->handle;
|
|
|
|
arr[i].port_id = port->logical_port_id;
|
2012-04-25 15:32:11 +08:00
|
|
|
arr[i].reg_type = EHEA_BCMC_MULTICAST |
|
2008-02-13 23:18:33 +08:00
|
|
|
EHEA_BCMC_VLANID_ALL;
|
2012-04-25 15:32:11 +08:00
|
|
|
if (mc_entry->macaddr == 0)
|
|
|
|
arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
|
2008-02-13 23:18:33 +08:00
|
|
|
arr[i++].macaddr = mc_entry->macaddr;
|
2009-03-14 04:50:40 +08:00
|
|
|
num_registrations -= 2;
|
2008-02-13 23:18:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_update:
|
|
|
|
kfree(ehea_bcmc_regs.arr);
|
|
|
|
ehea_bcmc_regs.arr = arr;
|
|
|
|
ehea_bcmc_regs.num_entries = i;
|
2009-03-14 04:50:40 +08:00
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
|
2008-02-13 23:18:33 +08:00
|
|
|
}
|
|
|
|
|
2017-01-07 11:12:52 +08:00
|
|
|
static void ehea_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
2011-09-26 18:11:03 +08:00
|
|
|
u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
int i;
|
|
|
|
|
2011-09-26 18:11:03 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
|
|
|
rx_packets += port->port_res[i].rx_packets;
|
|
|
|
rx_bytes += port->port_res[i].rx_bytes;
|
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2011-09-26 18:11:03 +08:00
|
|
|
tx_packets += port->port_res[i].tx_packets;
|
|
|
|
tx_bytes += port->port_res[i].tx_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->tx_packets = tx_packets;
|
|
|
|
stats->rx_bytes = rx_bytes;
|
|
|
|
stats->tx_bytes = tx_bytes;
|
|
|
|
stats->rx_packets = rx_packets;
|
|
|
|
|
2012-03-07 03:46:41 +08:00
|
|
|
stats->multicast = port->stats.multicast;
|
|
|
|
stats->rx_errors = port->stats.rx_errors;
|
2011-09-26 18:11:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_update_stats(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ehea_port *port =
|
|
|
|
container_of(work, struct ehea_port, stats_work.work);
|
|
|
|
struct net_device *dev = port->netdev;
|
2011-10-14 13:31:09 +08:00
|
|
|
struct rtnl_link_stats64 *stats = &port->stats;
|
2011-09-26 18:11:03 +08:00
|
|
|
struct hcp_ehea_port_cb2 *cb2;
|
|
|
|
u64 hret;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2010-06-30 19:59:12 +08:00
|
|
|
cb2 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb2) {
|
2011-09-26 18:11:03 +08:00
|
|
|
netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
|
|
|
|
goto resched;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
H_PORT_CB2, H_PORT_CB2_ALL, cb2);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "query_ehea_port failed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_herr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (netif_msg_hw(port))
|
|
|
|
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
|
|
|
|
|
|
|
|
stats->multicast = cb2->rxmcp;
|
|
|
|
stats->rx_errors = cb2->rxuerr;
|
|
|
|
|
|
|
|
out_herr:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb2);
|
2011-09-26 18:11:03 +08:00
|
|
|
resched:
|
2011-11-23 08:13:54 +08:00
|
|
|
schedule_delayed_work(&port->stats_work,
|
|
|
|
round_jiffies_relative(msecs_to_jiffies(1000)));
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
|
|
|
|
{
|
|
|
|
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
|
|
|
|
struct net_device *dev = pr->port->netdev;
|
|
|
|
int max_index_mask = pr->rq1_skba.len - 1;
|
2007-10-01 22:33:18 +08:00
|
|
|
int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
|
|
|
|
int adder = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
int i;
|
|
|
|
|
2007-10-01 22:33:18 +08:00
|
|
|
pr->rq1_skba.os_skbs = 0;
|
|
|
|
|
|
|
|
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
|
2008-04-04 21:04:53 +08:00
|
|
|
if (nr_of_wqes > 0)
|
|
|
|
pr->rq1_skba.index = index;
|
2007-10-01 22:33:18 +08:00
|
|
|
pr->rq1_skba.os_skbs = fill_wqes;
|
2006-09-13 23:44:31 +08:00
|
|
|
return;
|
2007-10-01 22:33:18 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-10-01 22:33:18 +08:00
|
|
|
for (i = 0; i < fill_wqes; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!skb_arr_rq1[index]) {
|
|
|
|
skb_arr_rq1[index] = netdev_alloc_skb(dev,
|
|
|
|
EHEA_L_PKT_SIZE);
|
|
|
|
if (!skb_arr_rq1[index]) {
|
2007-10-01 22:33:18 +08:00
|
|
|
pr->rq1_skba.os_skbs = fill_wqes - i;
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
index--;
|
|
|
|
index &= max_index_mask;
|
2007-10-01 22:33:18 +08:00
|
|
|
adder++;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2007-10-01 22:33:18 +08:00
|
|
|
|
|
|
|
if (adder == 0)
|
|
|
|
return;
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
/* Ring doorbell */
|
2007-10-01 22:33:18 +08:00
|
|
|
ehea_update_rq1a(pr->qp, adder);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2009-01-22 06:45:57 +08:00
|
|
|
static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
|
|
|
|
struct net_device *dev = pr->port->netdev;
|
|
|
|
int i;
|
|
|
|
|
2010-11-26 15:26:27 +08:00
|
|
|
if (nr_rq1a > pr->rq1_skba.len) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
|
2010-11-26 15:26:27 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_rq1a; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
|
2013-03-08 23:03:25 +08:00
|
|
|
if (!skb_arr_rq1[i])
|
2009-01-22 06:45:57 +08:00
|
|
|
break;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
/* Ring doorbell */
|
2011-01-11 15:45:57 +08:00
|
|
|
ehea_update_rq1a(pr->qp, i - 1);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_refill_rq_def(struct ehea_port_res *pr,
|
|
|
|
struct ehea_q_skb_arr *q_skba, int rq_nr,
|
|
|
|
int num_wqes, int wqe_type, int packet_size)
|
|
|
|
{
|
|
|
|
struct net_device *dev = pr->port->netdev;
|
|
|
|
struct ehea_qp *qp = pr->qp;
|
|
|
|
struct sk_buff **skb_arr = q_skba->arr;
|
|
|
|
struct ehea_rwqe *rwqe;
|
|
|
|
int i, index, max_index_mask, fill_wqes;
|
2007-10-01 22:33:18 +08:00
|
|
|
int adder = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
fill_wqes = q_skba->os_skbs + num_wqes;
|
2007-10-01 22:33:18 +08:00
|
|
|
q_skba->os_skbs = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-10-01 22:33:18 +08:00
|
|
|
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
|
|
|
|
q_skba->os_skbs = fill_wqes;
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
2007-10-01 22:33:18 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
index = q_skba->index;
|
|
|
|
max_index_mask = q_skba->len - 1;
|
|
|
|
for (i = 0; i < fill_wqes; i++) {
|
2007-10-01 22:33:18 +08:00
|
|
|
u64 tmp_addr;
|
2009-10-13 13:34:20 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
skb = netdev_alloc_skb_ip_align(dev, packet_size);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!skb) {
|
|
|
|
q_skba->os_skbs = fill_wqes - i;
|
2009-01-22 06:45:57 +08:00
|
|
|
if (q_skba->os_skbs == q_skba->len - 2) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(pr->port->netdev,
|
|
|
|
"rq%i ran dry - no mem for skb\n",
|
|
|
|
rq_nr);
|
2009-01-22 06:45:57 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_arr[index] = skb;
|
2007-10-01 22:33:18 +08:00
|
|
|
tmp_addr = ehea_map_vaddr(skb->data);
|
|
|
|
if (tmp_addr == -1) {
|
2014-03-16 08:25:48 +08:00
|
|
|
dev_consume_skb_any(skb);
|
2007-10-01 22:33:18 +08:00
|
|
|
q_skba->os_skbs = fill_wqes - i;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
rwqe = ehea_get_next_rwqe(qp, rq_nr);
|
|
|
|
rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
|
2007-07-02 19:00:46 +08:00
|
|
|
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
|
2006-09-13 23:44:31 +08:00
|
|
|
rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
|
2007-10-01 22:33:18 +08:00
|
|
|
rwqe->sg_list[0].vaddr = tmp_addr;
|
2006-09-13 23:44:31 +08:00
|
|
|
rwqe->sg_list[0].len = packet_size;
|
|
|
|
rwqe->data_segments = 1;
|
|
|
|
|
|
|
|
index++;
|
|
|
|
index &= max_index_mask;
|
2007-10-01 22:33:18 +08:00
|
|
|
adder++;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
q_skba->index = index;
|
2007-10-01 22:33:18 +08:00
|
|
|
if (adder == 0)
|
|
|
|
goto out;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
/* Ring doorbell */
|
|
|
|
iosync();
|
|
|
|
if (rq_nr == 2)
|
2007-10-01 22:33:18 +08:00
|
|
|
ehea_update_rq2a(pr->qp, adder);
|
2006-09-13 23:44:31 +08:00
|
|
|
else
|
2007-10-01 22:33:18 +08:00
|
|
|
ehea_update_rq3a(pr->qp, adder);
|
2007-07-11 22:32:00 +08:00
|
|
|
out:
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
|
|
|
|
{
|
|
|
|
return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
|
|
|
|
nr_of_wqes, EHEA_RWQE2_TYPE,
|
2009-10-13 13:34:20 +08:00
|
|
|
EHEA_RQ2_PKT_SIZE);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
|
|
|
|
{
|
|
|
|
return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
|
|
|
|
nr_of_wqes, EHEA_RWQE3_TYPE,
|
2009-10-13 13:34:20 +08:00
|
|
|
EHEA_MAX_PACKET_SIZE);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
|
|
|
|
{
|
|
|
|
*rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
|
|
|
|
if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
|
|
|
|
return 0;
|
|
|
|
if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
|
|
|
|
(cqe->header_length == 0))
|
|
|
|
return 0;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ehea_fill_skb(struct net_device *dev,
|
2011-10-14 13:30:59 +08:00
|
|
|
struct sk_buff *skb, struct ehea_cqe *cqe,
|
|
|
|
struct ehea_port_res *pr)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
int length = cqe->num_bytes_transfered - 4; /*remove CRC */
|
|
|
|
|
|
|
|
skb_put(skb, length);
|
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
2010-10-07 21:17:33 +08:00
|
|
|
|
|
|
|
/* The packet was not an IPV4 packet so a complemented checksum was
|
|
|
|
calculated. The value is found in the Internet Checksum field. */
|
|
|
|
if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
|
|
|
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
|
|
|
skb->csum = csum_unfold(~cqe->inet_checksum_value);
|
|
|
|
} else
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
2011-10-14 13:30:59 +08:00
|
|
|
|
|
|
|
skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
|
|
|
|
int arr_len,
|
|
|
|
struct ehea_cqe *cqe)
|
|
|
|
{
|
|
|
|
int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
void *pref;
|
|
|
|
int x;
|
|
|
|
|
|
|
|
x = skb_index + 1;
|
|
|
|
x &= (arr_len - 1);
|
|
|
|
|
|
|
|
pref = skb_array[x];
|
2009-05-05 02:06:37 +08:00
|
|
|
if (pref) {
|
|
|
|
prefetchw(pref);
|
|
|
|
prefetchw(pref + EHEA_CACHE_LINE);
|
|
|
|
|
|
|
|
pref = (skb_array[x]->data);
|
|
|
|
prefetch(pref);
|
|
|
|
prefetch(pref + EHEA_CACHE_LINE);
|
|
|
|
prefetch(pref + EHEA_CACHE_LINE * 2);
|
|
|
|
prefetch(pref + EHEA_CACHE_LINE * 3);
|
|
|
|
}
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
skb = skb_array[skb_index];
|
|
|
|
skb_array[skb_index] = NULL;
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
|
|
|
|
int arr_len, int wqe_index)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
void *pref;
|
|
|
|
int x;
|
|
|
|
|
|
|
|
x = wqe_index + 1;
|
|
|
|
x &= (arr_len - 1);
|
|
|
|
|
|
|
|
pref = skb_array[x];
|
2009-05-05 02:06:37 +08:00
|
|
|
if (pref) {
|
|
|
|
prefetchw(pref);
|
|
|
|
prefetchw(pref + EHEA_CACHE_LINE);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2009-05-05 02:06:37 +08:00
|
|
|
pref = (skb_array[x]->data);
|
|
|
|
prefetchw(pref);
|
|
|
|
prefetchw(pref + EHEA_CACHE_LINE);
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
skb = skb_array[wqe_index];
|
|
|
|
skb_array[wqe_index] = NULL;
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
|
|
|
|
struct ehea_cqe *cqe, int *processed_rq2,
|
|
|
|
int *processed_rq3)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2007-03-24 00:18:53 +08:00
|
|
|
if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
|
|
|
|
pr->p_stats.err_tcp_cksum++;
|
|
|
|
if (cqe->status & EHEA_CQE_STAT_ERR_IP)
|
|
|
|
pr->p_stats.err_ip_cksum++;
|
|
|
|
if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
|
|
|
|
pr->p_stats.err_frame_crc++;
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
if (rq == 2) {
|
|
|
|
*processed_rq2 += 1;
|
|
|
|
skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
} else if (rq == 3) {
|
|
|
|
*processed_rq3 += 1;
|
|
|
|
skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
|
2007-11-22 00:42:27 +08:00
|
|
|
if (netif_msg_rx_err(pr->port)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Critical receive error for QP %d. Resetting port.\n",
|
|
|
|
pr->qp->init_attr.qp_nr);
|
2007-11-22 00:42:27 +08:00
|
|
|
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
|
|
|
}
|
2008-07-03 22:18:51 +08:00
|
|
|
ehea_schedule_port_reset(pr->port);
|
2006-09-13 23:44:31 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
static int ehea_proc_rwqes(struct net_device *dev,
|
|
|
|
struct ehea_port_res *pr,
|
|
|
|
int budget)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2007-03-01 01:34:10 +08:00
|
|
|
struct ehea_port *port = pr->port;
|
2006-09-13 23:44:31 +08:00
|
|
|
struct ehea_qp *qp = pr->qp;
|
|
|
|
struct ehea_cqe *cqe;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
|
|
|
|
struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
|
|
|
|
struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
|
|
|
|
int skb_arr_rq1_len = pr->rq1_skba.len;
|
|
|
|
int skb_arr_rq2_len = pr->rq2_skba.len;
|
|
|
|
int skb_arr_rq3_len = pr->rq3_skba.len;
|
|
|
|
int processed, processed_rq1, processed_rq2, processed_rq3;
|
2010-10-27 16:45:14 +08:00
|
|
|
u64 processed_bytes = 0;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
int wqe_index, last_wqe_index, rq, port_reset;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
|
|
|
|
last_wqe_index = 0;
|
|
|
|
|
|
|
|
cqe = ehea_poll_rq1(qp, &wqe_index);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
while ((processed < budget) && cqe) {
|
2006-09-13 23:44:31 +08:00
|
|
|
ehea_inc_rq1(qp);
|
|
|
|
processed_rq1++;
|
|
|
|
processed++;
|
|
|
|
if (netif_msg_rx_status(port))
|
|
|
|
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
|
|
|
|
|
|
|
last_wqe_index = wqe_index;
|
|
|
|
rmb();
|
|
|
|
if (!ehea_check_cqe(cqe, &rq)) {
|
2008-02-01 10:20:49 +08:00
|
|
|
if (rq == 1) {
|
|
|
|
/* LL RQ1 */
|
2006-09-13 23:44:31 +08:00
|
|
|
skb = get_skb_by_index_ll(skb_arr_rq1,
|
|
|
|
skb_arr_rq1_len,
|
|
|
|
wqe_index);
|
|
|
|
if (unlikely(!skb)) {
|
2010-12-21 02:35:25 +08:00
|
|
|
netif_info(port, rx_err, dev,
|
2010-12-14 02:05:14 +08:00
|
|
|
"LL rq1: skb=NULL\n");
|
2007-03-01 01:34:10 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
skb = netdev_alloc_skb(dev,
|
2006-09-13 23:44:31 +08:00
|
|
|
EHEA_L_PKT_SIZE);
|
2013-03-08 23:03:25 +08:00
|
|
|
if (!skb)
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2008-02-01 10:20:49 +08:00
|
|
|
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
|
2007-07-02 19:00:46 +08:00
|
|
|
cqe->num_bytes_transfered - 4);
|
2011-10-14 13:30:59 +08:00
|
|
|
ehea_fill_skb(dev, skb, cqe, pr);
|
2008-02-01 10:20:49 +08:00
|
|
|
} else if (rq == 2) {
|
|
|
|
/* RQ2 */
|
2006-09-13 23:44:31 +08:00
|
|
|
skb = get_skb_by_index(skb_arr_rq2,
|
|
|
|
skb_arr_rq2_len, cqe);
|
|
|
|
if (unlikely(!skb)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_err(port, rx_err, dev,
|
|
|
|
"rq2: skb=NULL\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-10-14 13:30:59 +08:00
|
|
|
ehea_fill_skb(dev, skb, cqe, pr);
|
2006-09-13 23:44:31 +08:00
|
|
|
processed_rq2++;
|
2008-02-01 10:20:49 +08:00
|
|
|
} else {
|
|
|
|
/* RQ3 */
|
2006-09-13 23:44:31 +08:00
|
|
|
skb = get_skb_by_index(skb_arr_rq3,
|
|
|
|
skb_arr_rq3_len, cqe);
|
|
|
|
if (unlikely(!skb)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_err(port, rx_err, dev,
|
|
|
|
"rq3: skb=NULL\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-10-14 13:30:59 +08:00
|
|
|
ehea_fill_skb(dev, skb, cqe, pr);
|
2006-09-13 23:44:31 +08:00
|
|
|
processed_rq3++;
|
|
|
|
}
|
|
|
|
|
2010-10-27 16:45:14 +08:00
|
|
|
processed_bytes += skb->len;
|
2011-10-14 13:31:11 +08:00
|
|
|
|
|
|
|
if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
|
2013-04-23 07:24:19 +08:00
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
|
|
|
cqe->vlan_tag);
|
2011-10-14 13:31:11 +08:00
|
|
|
|
|
|
|
napi_gro_receive(&pr->napi, skb);
|
2007-03-01 01:34:10 +08:00
|
|
|
} else {
|
2007-03-24 00:18:53 +08:00
|
|
|
pr->p_stats.poll_receive_errors++;
|
2006-09-13 23:44:31 +08:00
|
|
|
port_reset = ehea_treat_poll_error(pr, rq, cqe,
|
|
|
|
&processed_rq2,
|
|
|
|
&processed_rq3);
|
|
|
|
if (port_reset)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cqe = ehea_poll_rq1(qp, &wqe_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
pr->rx_packets += processed;
|
2010-10-27 16:45:14 +08:00
|
|
|
pr->rx_bytes += processed_bytes;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
|
|
|
|
ehea_refill_rq2(pr, processed_rq2);
|
|
|
|
ehea_refill_rq3(pr, processed_rq3);
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
return processed;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2010-08-17 13:49:12 +08:00
|
|
|
#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
|
|
|
|
|
|
|
|
static void reset_sq_restart_flag(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2010-08-17 13:49:12 +08:00
|
|
|
struct ehea_port_res *pr = &port->port_res[i];
|
|
|
|
pr->sq_restart_flag = 0;
|
|
|
|
}
|
2010-10-05 21:16:23 +08:00
|
|
|
wake_up(&port->restart_wq);
|
2010-08-17 13:49:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void check_sqs(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
struct ehea_swqe *swqe;
|
|
|
|
int swqe_index;
|
2018-09-19 22:23:15 +08:00
|
|
|
int i;
|
2010-08-17 13:49:12 +08:00
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2010-08-17 13:49:12 +08:00
|
|
|
struct ehea_port_res *pr = &port->port_res[i];
|
2010-10-05 21:16:23 +08:00
|
|
|
int ret;
|
2010-08-17 13:49:12 +08:00
|
|
|
swqe = ehea_get_swqe(pr->qp, &swqe_index);
|
|
|
|
memset(swqe, 0, SWQE_HEADER_SIZE);
|
|
|
|
atomic_dec(&pr->swqe_avail);
|
|
|
|
|
|
|
|
swqe->tx_control |= EHEA_SWQE_PURGE;
|
|
|
|
swqe->wr_id = SWQE_RESTART_CHECK;
|
|
|
|
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
|
|
|
|
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
|
|
|
|
swqe->immediate_data_length = 80;
|
|
|
|
|
|
|
|
ehea_post_swqe(pr->qp, swqe);
|
|
|
|
|
2010-10-05 21:16:23 +08:00
|
|
|
ret = wait_event_timeout(port->restart_wq,
|
|
|
|
pr->sq_restart_flag == 0,
|
|
|
|
msecs_to_jiffies(100));
|
|
|
|
|
|
|
|
if (!ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("HW/SW queues out of sync\n");
|
2010-10-05 21:16:23 +08:00
|
|
|
ehea_schedule_port_reset(pr->port);
|
|
|
|
return;
|
2010-08-17 13:49:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2007-03-24 00:18:53 +08:00
|
|
|
struct sk_buff *skb;
|
2006-09-13 23:44:31 +08:00
|
|
|
struct ehea_cq *send_cq = pr->send_cq;
|
|
|
|
struct ehea_cqe *cqe;
|
2007-03-01 01:34:10 +08:00
|
|
|
int quota = my_quota;
|
2006-09-13 23:44:31 +08:00
|
|
|
int cqe_counter = 0;
|
|
|
|
int swqe_av = 0;
|
2007-03-24 00:18:53 +08:00
|
|
|
int index;
|
2011-10-14 13:30:59 +08:00
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
|
|
|
|
pr - &pr->port->port_res[0]);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
cqe = ehea_poll_cq(send_cq);
|
2008-02-01 10:20:49 +08:00
|
|
|
while (cqe && (quota > 0)) {
|
2007-03-01 01:34:10 +08:00
|
|
|
ehea_inc_cq(send_cq);
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
cqe_counter++;
|
|
|
|
rmb();
|
2010-08-17 13:49:12 +08:00
|
|
|
|
|
|
|
if (cqe->wr_id == SWQE_RESTART_CHECK) {
|
|
|
|
pr->sq_restart_flag = 1;
|
|
|
|
swqe_av++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Bad send completion status=0x%04X\n",
|
|
|
|
cqe->status);
|
2010-04-21 07:10:55 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
if (netif_msg_tx_err(pr->port))
|
|
|
|
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
|
2010-04-21 07:10:55 +08:00
|
|
|
|
|
|
|
if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Resetting port\n");
|
2010-04-21 07:10:55 +08:00
|
|
|
ehea_schedule_port_reset(pr->port);
|
|
|
|
break;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (netif_msg_tx_done(pr->port))
|
|
|
|
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
|
|
|
|
|
|
|
if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
|
2007-03-24 00:18:53 +08:00
|
|
|
== EHEA_SWQE2_TYPE)) {
|
|
|
|
|
|
|
|
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
|
|
|
|
skb = pr->sq_skba.arr[index];
|
2014-03-16 08:25:48 +08:00
|
|
|
dev_consume_skb_any(skb);
|
2007-03-24 00:18:53 +08:00
|
|
|
pr->sq_skba.arr[index] = NULL;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
|
|
|
|
quota--;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
|
|
|
cqe = ehea_poll_cq(send_cq);
|
2010-05-18 13:47:34 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ehea_update_feca(send_cq, cqe_counter);
|
|
|
|
atomic_add(swqe_av, &pr->swqe_avail);
|
|
|
|
|
2011-10-14 13:30:59 +08:00
|
|
|
if (unlikely(netif_tx_queue_stopped(txq) &&
|
|
|
|
(atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
|
|
|
|
__netif_tx_lock(txq, smp_processor_id());
|
|
|
|
if (netif_tx_queue_stopped(txq) &&
|
|
|
|
(atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
|
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
__netif_tx_unlock(txq);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2011-10-14 13:30:59 +08:00
|
|
|
|
2010-10-05 21:16:22 +08:00
|
|
|
wake_up(&pr->port->swqe_avail_wq);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
return cqe;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
#define EHEA_POLL_MAX_CQES 65535
|
2007-03-01 01:34:10 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
static int ehea_poll(struct napi_struct *napi, int budget)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2008-02-01 10:20:49 +08:00
|
|
|
struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
|
|
|
|
napi);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
struct net_device *dev = pr->port->netdev;
|
2007-03-01 01:34:10 +08:00
|
|
|
struct ehea_cqe *cqe;
|
|
|
|
struct ehea_cqe *cqe_skb = NULL;
|
2011-10-14 13:31:00 +08:00
|
|
|
int wqe_index;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
int rx = 0;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
|
2011-10-14 13:31:00 +08:00
|
|
|
rx += ehea_proc_rwqes(dev, pr, budget - rx);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
|
2011-10-14 13:31:00 +08:00
|
|
|
while (rx != budget) {
|
2009-01-20 08:43:59 +08:00
|
|
|
napi_complete(napi);
|
2007-03-01 01:34:10 +08:00
|
|
|
ehea_reset_cq_ep(pr->recv_cq);
|
|
|
|
ehea_reset_cq_ep(pr->send_cq);
|
|
|
|
ehea_reset_cq_n1(pr->recv_cq);
|
|
|
|
ehea_reset_cq_n1(pr->send_cq);
|
2010-06-15 13:35:16 +08:00
|
|
|
rmb();
|
2007-03-01 01:34:10 +08:00
|
|
|
cqe = ehea_poll_rq1(pr->qp, &wqe_index);
|
|
|
|
cqe_skb = ehea_poll_cq(pr->send_cq);
|
|
|
|
|
2007-03-23 00:50:24 +08:00
|
|
|
if (!cqe && !cqe_skb)
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
return rx;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2009-01-20 08:43:59 +08:00
|
|
|
if (!napi_reschedule(napi))
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
return rx;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
|
|
|
|
rx += ehea_proc_rwqes(dev, pr, budget - rx);
|
|
|
|
}
|
2007-03-23 00:50:24 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
return rx;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2007-07-23 22:05:03 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void ehea_netpoll(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
int i;
|
2007-07-23 22:05:03 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++)
|
2009-01-20 08:43:59 +08:00
|
|
|
napi_schedule(&port->port_res[i].napi);
|
2007-07-23 22:05:03 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_port_res *pr = param;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2009-01-20 08:43:59 +08:00
|
|
|
napi_schedule(&pr->napi);
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = param;
|
|
|
|
struct ehea_eqe *eqe;
|
2007-02-09 16:10:51 +08:00
|
|
|
struct ehea_qp *qp;
|
2006-09-13 23:44:31 +08:00
|
|
|
u32 qp_token;
|
2010-04-21 07:10:55 +08:00
|
|
|
u64 resource_type, aer, aerr;
|
|
|
|
int reset_port = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
eqe = ehea_poll_eq(port->qp_eq);
|
2007-01-22 19:54:50 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
while (eqe) {
|
|
|
|
qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
|
|
|
|
eqe->entry, qp_token);
|
2007-02-09 16:10:51 +08:00
|
|
|
|
|
|
|
qp = port->port_res[qp_token].qp;
|
2010-04-21 07:10:55 +08:00
|
|
|
|
|
|
|
resource_type = ehea_error_data(port->adapter, qp->fw_handle,
|
|
|
|
&aer, &aerr);
|
|
|
|
|
|
|
|
if (resource_type == EHEA_AER_RESTYPE_QP) {
|
|
|
|
if ((aer & EHEA_AER_RESET_MASK) ||
|
|
|
|
(aerr & EHEA_AERR_RESET_MASK))
|
|
|
|
reset_port = 1;
|
|
|
|
} else
|
|
|
|
reset_port = 1; /* Reset in case of CQ or EQ error */
|
|
|
|
|
2007-01-22 19:54:50 +08:00
|
|
|
eqe = ehea_poll_eq(port->qp_eq);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2010-04-21 07:10:55 +08:00
|
|
|
if (reset_port) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Resetting port\n");
|
2010-04-21 07:10:55 +08:00
|
|
|
ehea_schedule_port_reset(port);
|
|
|
|
}
|
2007-02-09 16:10:51 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
|
|
|
|
int logical_port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++)
|
2007-01-22 19:55:20 +08:00
|
|
|
if (adapter->port[i])
|
2007-07-02 19:00:46 +08:00
|
|
|
if (adapter->port[i]->logical_port_id == logical_port)
|
2007-01-22 19:55:20 +08:00
|
|
|
return adapter->port[i];
|
2006-09-13 23:44:31 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ehea_sense_port_attr(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u64 hret;
|
|
|
|
struct hcp_ehea_port_cb0 *cb0;
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
/* may be called via ehea_neq_tasklet() */
|
2009-01-22 06:45:33 +08:00
|
|
|
cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
|
2008-02-01 10:20:49 +08:00
|
|
|
if (!cb0) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb0\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id, H_PORT_CB0,
|
|
|
|
EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
|
|
|
|
cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MAC address */
|
|
|
|
port->mac_addr = cb0->port_mac_addr << 16;
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EADDRNOTAVAIL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Port speed */
|
|
|
|
switch (cb0->port_speed) {
|
|
|
|
case H_SPEED_10M_H:
|
|
|
|
port->port_speed = EHEA_SPEED_10M;
|
|
|
|
port->full_duplex = 0;
|
|
|
|
break;
|
|
|
|
case H_SPEED_10M_F:
|
|
|
|
port->port_speed = EHEA_SPEED_10M;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
case H_SPEED_100M_H:
|
|
|
|
port->port_speed = EHEA_SPEED_100M;
|
|
|
|
port->full_duplex = 0;
|
|
|
|
break;
|
|
|
|
case H_SPEED_100M_F:
|
|
|
|
port->port_speed = EHEA_SPEED_100M;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
case H_SPEED_1G_F:
|
|
|
|
port->port_speed = EHEA_SPEED_1G;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
case H_SPEED_10G_F:
|
|
|
|
port->port_speed = EHEA_SPEED_10G;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
port->port_speed = 0;
|
|
|
|
port->full_duplex = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-01-22 19:53:20 +08:00
|
|
|
port->autoneg = 1;
|
2007-03-01 01:34:10 +08:00
|
|
|
port->num_mcs = cb0->num_default_qps;
|
2007-01-22 19:53:20 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
/* Number of default QPs */
|
2007-03-01 01:34:10 +08:00
|
|
|
if (use_mcs)
|
|
|
|
port->num_def_qps = cb0->num_default_qps;
|
|
|
|
else
|
|
|
|
port->num_def_qps = 1;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
if (!port->num_def_qps) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out_free:
|
|
|
|
if (ret || netif_msg_probe(port))
|
|
|
|
ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb0);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
|
|
|
|
{
|
|
|
|
struct hcp_ehea_port_cb4 *cb4;
|
|
|
|
u64 hret;
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb4) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb4\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb4->port_speed = port_speed;
|
|
|
|
|
|
|
|
netif_carrier_off(port->netdev);
|
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
|
|
|
|
if (hret == H_SUCCESS) {
|
|
|
|
port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
H_PORT_CB4, H_PORT_CB4_SPEED,
|
|
|
|
cb4);
|
|
|
|
if (hret == H_SUCCESS) {
|
|
|
|
switch (cb4->port_speed) {
|
|
|
|
case H_SPEED_10M_H:
|
|
|
|
port->port_speed = EHEA_SPEED_10M;
|
|
|
|
port->full_duplex = 0;
|
|
|
|
break;
|
|
|
|
case H_SPEED_10M_F:
|
|
|
|
port->port_speed = EHEA_SPEED_10M;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
case H_SPEED_100M_H:
|
|
|
|
port->port_speed = EHEA_SPEED_100M;
|
|
|
|
port->full_duplex = 0;
|
|
|
|
break;
|
|
|
|
case H_SPEED_100M_F:
|
|
|
|
port->port_speed = EHEA_SPEED_100M;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
case H_SPEED_1G_F:
|
|
|
|
port->port_speed = EHEA_SPEED_1G;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
case H_SPEED_10G_F:
|
|
|
|
port->port_speed = EHEA_SPEED_10G;
|
|
|
|
port->full_duplex = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
port->port_speed = 0;
|
|
|
|
port->full_duplex = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Failed sensing port speed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (hret == H_AUTHORITY) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Hypervisor denied setting port speed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EPERM;
|
|
|
|
} else {
|
|
|
|
ret = -EIO;
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Failed setting port speed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
}
|
2007-09-07 18:30:17 +08:00
|
|
|
if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
|
|
|
|
netif_carrier_on(port->netdev);
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb4);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 ec;
|
|
|
|
u8 portnum;
|
|
|
|
struct ehea_port *port;
|
2010-12-14 02:05:14 +08:00
|
|
|
struct net_device *dev;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
|
|
|
|
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
|
|
|
|
port = ehea_get_port(adapter, portnum);
|
2016-05-18 04:28:54 +08:00
|
|
|
if (!port) {
|
|
|
|
netdev_err(NULL, "unknown portnum %x\n", portnum);
|
|
|
|
return;
|
|
|
|
}
|
2010-12-14 02:05:14 +08:00
|
|
|
dev = port->netdev;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
switch (ec) {
|
|
|
|
case EHEA_EC_PORTSTATE_CHG: /* port state change */
|
|
|
|
|
|
|
|
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
if (!netif_carrier_ok(dev)) {
|
2006-10-25 19:11:42 +08:00
|
|
|
ret = ehea_sense_port_attr(port);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "failed resensing port attributes\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, link, dev,
|
|
|
|
"Logical port up: %dMbps %s Duplex\n",
|
|
|
|
port->port_speed,
|
|
|
|
port->full_duplex == 1 ?
|
|
|
|
"Full" : "Half");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_carrier_on(dev);
|
|
|
|
netif_wake_queue(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
} else
|
2010-12-14 02:05:14 +08:00
|
|
|
if (netif_carrier_ok(dev)) {
|
|
|
|
netif_info(port, link, dev,
|
|
|
|
"Logical port down\n");
|
|
|
|
netif_carrier_off(dev);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_disable(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
|
2007-09-07 18:30:17 +08:00
|
|
|
port->phy_link = EHEA_PHY_LINK_UP;
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, link, dev,
|
|
|
|
"Physical port up\n");
|
2007-09-07 18:30:17 +08:00
|
|
|
if (prop_carrier_state)
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_carrier_on(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
} else {
|
2007-09-07 18:30:17 +08:00
|
|
|
port->phy_link = EHEA_PHY_LINK_DOWN;
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, link, dev,
|
|
|
|
"Physical port down\n");
|
2007-09-07 18:30:17 +08:00
|
|
|
if (prop_carrier_state)
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_carrier_off(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(dev,
|
|
|
|
"External switch port is primary port\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
else
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(dev,
|
|
|
|
"External switch port is backup port\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
break;
|
|
|
|
case EHEA_EC_ADAPTER_MALFUNC:
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "Adapter malfunction\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
case EHEA_EC_PORT_MALFUNC:
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(dev, "Port malfunction\n");
|
|
|
|
netif_carrier_off(dev);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_disable(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
|
2006-09-13 23:44:31 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_neq_tasklet(unsigned long data)
|
|
|
|
{
|
2008-02-01 10:20:49 +08:00
|
|
|
struct ehea_adapter *adapter = (struct ehea_adapter *)data;
|
2006-09-13 23:44:31 +08:00
|
|
|
struct ehea_eqe *eqe;
|
|
|
|
u64 event_mask;
|
|
|
|
|
|
|
|
eqe = ehea_poll_eq(adapter->neq);
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_debug("eqe=%p\n", eqe);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
while (eqe) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
|
2006-09-13 23:44:31 +08:00
|
|
|
ehea_parse_eqe(adapter, eqe->entry);
|
|
|
|
eqe = ehea_poll_eq(adapter->neq);
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_debug("next eqe=%p\n", eqe);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
|
|
|
|
| EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
|
|
|
|
| EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
|
|
|
|
|
|
|
|
ehea_h_reset_events(adapter->handle,
|
|
|
|
adapter->neq->fw_handle, event_mask);
|
|
|
|
}
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
static irqreturn_t ehea_interrupt_neq(int irq, void *param)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_adapter *adapter = param;
|
|
|
|
tasklet_hi_schedule(&adapter->neq_tasklet);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int ehea_fill_port_res(struct ehea_port_res *pr)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
|
|
|
|
|
2011-01-11 15:45:57 +08:00
|
|
|
ehea_init_fill_rq1(pr, pr->rq1_skba.len);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2009-01-22 06:45:57 +08:00
|
|
|
ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_reg_interrupts(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_port_res *pr;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
|
|
|
|
snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
|
|
|
|
dev->name);
|
|
|
|
|
2007-09-26 17:45:51 +08:00
|
|
|
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
|
2006-09-13 23:44:31 +08:00
|
|
|
ehea_qp_aff_irq_handler,
|
2013-09-12 11:46:11 +08:00
|
|
|
0, port->int_aff_name, port);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
|
|
|
|
port->qp_eq->attr.ist1);
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free_qpeq;
|
|
|
|
}
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, ifup, dev,
|
|
|
|
"irq_handle 0x%X for function qp_aff_irq_handler registered\n",
|
|
|
|
port->qp_eq->attr.ist1);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
pr = &port->port_res[i];
|
|
|
|
snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
|
2007-03-01 01:34:10 +08:00
|
|
|
"%s-queue%d", dev->name, i);
|
2007-09-26 17:45:51 +08:00
|
|
|
ret = ibmebus_request_irq(pr->eq->attr.ist1,
|
2007-03-01 01:34:10 +08:00
|
|
|
ehea_recv_irq_handler,
|
2013-09-12 11:46:11 +08:00
|
|
|
0, pr->int_send_name, pr);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
|
|
|
|
i, pr->eq->attr.ist1);
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free_req;
|
|
|
|
}
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, ifup, dev,
|
|
|
|
"irq_handle 0x%X for function ehea_queue_int %d registered\n",
|
|
|
|
pr->eq->attr.ist1, i);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
out_free_req:
|
|
|
|
while (--i >= 0) {
|
2007-03-01 01:34:10 +08:00
|
|
|
u32 ist = port->port_res[i].eq->attr.ist1;
|
2007-09-26 17:45:51 +08:00
|
|
|
ibmebus_free_irq(ist, &port->port_res[i]);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
out_free_qpeq:
|
2007-09-26 17:45:51 +08:00
|
|
|
ibmebus_free_irq(port->qp_eq->attr.ist1, port);
|
2006-09-13 23:44:31 +08:00
|
|
|
i = port->num_def_qps;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_free_interrupts(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_port_res *pr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* send */
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
pr = &port->port_res[i];
|
2007-09-26 17:45:51 +08:00
|
|
|
ibmebus_free_irq(pr->eq->attr.ist1, pr);
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, intr, dev,
|
|
|
|
"free send irq for res %d with handle 0x%X\n",
|
|
|
|
i, pr->eq->attr.ist1);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* associated events */
|
2007-09-26 17:45:51 +08:00
|
|
|
ibmebus_free_irq(port->qp_eq->attr.ist1, port);
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, intr, dev,
|
|
|
|
"associated event interrupt for handle 0x%X freed\n",
|
|
|
|
port->qp_eq->attr.ist1);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_configure_port(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
u64 hret, mask;
|
|
|
|
struct hcp_ehea_port_cb0 *cb0;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
2009-01-22 06:45:33 +08:00
|
|
|
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
|
|
|
|
| EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
|
|
|
|
| EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
|
|
|
|
| EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
|
|
|
|
| EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
|
|
|
|
PXLY_RC_VLAN_FILTER)
|
|
|
|
| EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
|
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
for (i = 0; i < port->num_mcs; i++)
|
|
|
|
if (use_mcs)
|
|
|
|
cb0->default_qpn_arr[i] =
|
|
|
|
port->port_res[i].qp->init_attr.qp_nr;
|
|
|
|
else
|
|
|
|
cb0->default_qpn_arr[i] =
|
|
|
|
port->port_res[0].qp->init_attr.qp_nr;
|
2007-03-23 00:50:24 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
if (netif_msg_ifup(port))
|
|
|
|
ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
|
|
|
|
|
|
|
|
mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
|
|
|
|
| EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
|
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
H_PORT_CB0, mask, cb0);
|
|
|
|
ret = -EIO;
|
|
|
|
if (hret != H_SUCCESS)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out_free:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb0);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_gen_smrs(struct ehea_port_res *pr)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2007-03-23 00:50:24 +08:00
|
|
|
int ret;
|
2006-09-13 23:44:31 +08:00
|
|
|
struct ehea_adapter *adapter = pr->port->adapter;
|
|
|
|
|
2007-03-23 00:50:24 +08:00
|
|
|
ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
|
|
|
|
if (ret)
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
|
2007-03-23 00:50:24 +08:00
|
|
|
ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2007-03-23 00:50:24 +08:00
|
|
|
out_free:
|
|
|
|
ehea_rem_mr(&pr->send_mr);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Generating SMRS failed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_rem_smrs(struct ehea_port_res *pr)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2009-12-03 15:58:21 +08:00
|
|
|
if ((ehea_rem_mr(&pr->send_mr)) ||
|
|
|
|
(ehea_rem_mr(&pr->recv_mr)))
|
2007-03-23 00:50:24 +08:00
|
|
|
return -EIO;
|
|
|
|
else
|
|
|
|
return 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
|
|
|
|
{
|
2008-02-01 10:20:49 +08:00
|
|
|
int arr_size = sizeof(void *) * max_q_entries;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2010-11-22 08:15:06 +08:00
|
|
|
q_skba->arr = vzalloc(arr_size);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!q_skba->arr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
q_skba->len = max_q_entries;
|
|
|
|
q_skba->index = 0;
|
|
|
|
q_skba->os_skbs = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
|
|
|
|
struct port_res_cfg *pr_cfg, int queue_token)
|
|
|
|
{
|
|
|
|
struct ehea_adapter *adapter = port->adapter;
|
|
|
|
enum ehea_eq_type eq_type = EHEA_EQ;
|
|
|
|
struct ehea_qp_init_attr *init_attr = NULL;
|
|
|
|
int ret = -EIO;
|
2010-10-27 16:45:14 +08:00
|
|
|
u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
|
|
|
|
|
|
|
|
tx_bytes = pr->tx_bytes;
|
|
|
|
tx_packets = pr->tx_packets;
|
|
|
|
rx_bytes = pr->rx_bytes;
|
|
|
|
rx_packets = pr->rx_packets;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
memset(pr, 0, sizeof(struct ehea_port_res));
|
|
|
|
|
2010-10-27 16:45:14 +08:00
|
|
|
pr->tx_bytes = rx_bytes;
|
|
|
|
pr->tx_packets = tx_packets;
|
|
|
|
pr->rx_bytes = rx_bytes;
|
|
|
|
pr->rx_packets = rx_packets;
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
pr->port = port;
|
|
|
|
|
2007-03-01 01:34:10 +08:00
|
|
|
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
|
|
|
|
if (!pr->eq) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("create_eq failed (eq)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
|
2007-03-01 01:34:10 +08:00
|
|
|
pr->eq->fw_handle,
|
2006-09-13 23:44:31 +08:00
|
|
|
port->logical_port_id);
|
|
|
|
if (!pr->recv_cq) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("create_cq failed (cq_recv)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
|
2007-03-01 01:34:10 +08:00
|
|
|
pr->eq->fw_handle,
|
2006-09-13 23:44:31 +08:00
|
|
|
port->logical_port_id);
|
|
|
|
if (!pr->send_cq) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("create_cq failed (cq_send)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (netif_msg_ifup(port))
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
|
|
|
|
pr->send_cq->attr.act_nr_of_cqes,
|
|
|
|
pr->recv_cq->attr.act_nr_of_cqes);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
|
|
|
|
if (!init_attr) {
|
|
|
|
ret = -ENOMEM;
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for ehea_qp_init_attr\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_attr->low_lat_rq1 = 1;
|
|
|
|
init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
|
|
|
|
init_attr->rq_count = 3;
|
|
|
|
init_attr->qp_token = queue_token;
|
|
|
|
init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
|
|
|
|
init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
|
|
|
|
init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
|
|
|
|
init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
|
|
|
|
init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
|
|
|
|
init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
|
|
|
|
init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
|
|
|
|
init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
|
|
|
|
init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
|
|
|
|
init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
|
|
|
|
init_attr->port_nr = port->logical_port_id;
|
|
|
|
init_attr->send_cq_handle = pr->send_cq->fw_handle;
|
|
|
|
init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
|
|
|
|
init_attr->aff_eq_handle = port->qp_eq->fw_handle;
|
|
|
|
|
|
|
|
pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
|
|
|
|
if (!pr->qp) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("create_qp failed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (netif_msg_ifup(port))
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
|
|
|
|
init_attr->qp_nr,
|
|
|
|
init_attr->act_nr_send_wqes,
|
|
|
|
init_attr->act_nr_rwqes_rq1,
|
|
|
|
init_attr->act_nr_rwqes_rq2,
|
|
|
|
init_attr->act_nr_rwqes_rq3);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2008-04-04 21:04:53 +08:00
|
|
|
pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
|
|
|
|
|
|
|
|
ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
|
2006-09-13 23:44:31 +08:00
|
|
|
ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
|
|
|
|
ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
|
|
|
|
ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
|
|
|
|
if (ehea_gen_smrs(pr) != 0) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
|
|
|
|
|
|
|
|
kfree(init_attr);
|
2007-03-01 01:34:10 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree(init_attr);
|
|
|
|
vfree(pr->sq_skba.arr);
|
|
|
|
vfree(pr->rq1_skba.arr);
|
|
|
|
vfree(pr->rq2_skba.arr);
|
|
|
|
vfree(pr->rq3_skba.arr);
|
|
|
|
ehea_destroy_qp(pr->qp);
|
|
|
|
ehea_destroy_cq(pr->send_cq);
|
|
|
|
ehea_destroy_cq(pr->recv_cq);
|
2007-03-01 01:34:10 +08:00
|
|
|
ehea_destroy_eq(pr->eq);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
|
2009-08-05 02:48:39 +08:00
|
|
|
if (pr->qp)
|
|
|
|
netif_napi_del(&pr->napi);
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_destroy_qp(pr->qp);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
ehea_destroy_cq(pr->send_cq);
|
|
|
|
ehea_destroy_cq(pr->recv_cq);
|
2007-03-01 01:34:10 +08:00
|
|
|
ehea_destroy_eq(pr->eq);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
for (i = 0; i < pr->rq1_skba.len; i++)
|
|
|
|
if (pr->rq1_skba.arr[i])
|
|
|
|
dev_kfree_skb(pr->rq1_skba.arr[i]);
|
|
|
|
|
|
|
|
for (i = 0; i < pr->rq2_skba.len; i++)
|
|
|
|
if (pr->rq2_skba.arr[i])
|
|
|
|
dev_kfree_skb(pr->rq2_skba.arr[i]);
|
|
|
|
|
|
|
|
for (i = 0; i < pr->rq3_skba.len; i++)
|
|
|
|
if (pr->rq3_skba.arr[i])
|
|
|
|
dev_kfree_skb(pr->rq3_skba.arr[i]);
|
|
|
|
|
|
|
|
for (i = 0; i < pr->sq_skba.len; i++)
|
|
|
|
if (pr->sq_skba.arr[i])
|
|
|
|
dev_kfree_skb(pr->sq_skba.arr[i]);
|
|
|
|
|
|
|
|
vfree(pr->rq1_skba.arr);
|
|
|
|
vfree(pr->rq2_skba.arr);
|
|
|
|
vfree(pr->rq3_skba.arr);
|
|
|
|
vfree(pr->sq_skba.arr);
|
|
|
|
ret = ehea_rem_smrs(pr);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:06 +08:00
|
|
|
static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
|
|
|
|
u32 lkey)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2010-04-15 06:59:40 +08:00
|
|
|
int skb_data_size = skb_headlen(skb);
|
2006-09-13 23:44:31 +08:00
|
|
|
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
|
|
|
|
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
|
2011-10-14 13:31:06 +08:00
|
|
|
unsigned int immediate_len = SWQE2_MAX_IMM;
|
|
|
|
|
|
|
|
swqe->descriptors = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:06 +08:00
|
|
|
if (skb_is_gso(skb)) {
|
|
|
|
swqe->tx_control |= EHEA_SWQE_TSO;
|
|
|
|
swqe->mss = skb_shinfo(skb)->gso_size;
|
|
|
|
/*
|
|
|
|
* For TSO packets we only copy the headers into the
|
|
|
|
* immediate area.
|
|
|
|
*/
|
|
|
|
immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:06 +08:00
|
|
|
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
|
|
|
|
skb_copy_from_linear_data(skb, imm_data, immediate_len);
|
|
|
|
swqe->immediate_data_length = immediate_len;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:06 +08:00
|
|
|
if (skb_data_size > immediate_len) {
|
2006-09-13 23:44:31 +08:00
|
|
|
sg1entry->l_key = lkey;
|
2011-10-14 13:31:06 +08:00
|
|
|
sg1entry->len = skb_data_size - immediate_len;
|
2007-08-06 19:55:44 +08:00
|
|
|
sg1entry->vaddr =
|
2011-10-14 13:31:06 +08:00
|
|
|
ehea_map_vaddr(skb->data + immediate_len);
|
2006-09-13 23:44:31 +08:00
|
|
|
swqe->descriptors++;
|
|
|
|
}
|
|
|
|
} else {
|
2007-03-28 05:55:52 +08:00
|
|
|
skb_copy_from_linear_data(skb, imm_data, skb_data_size);
|
2006-09-13 23:44:31 +08:00
|
|
|
swqe->immediate_data_length = skb_data_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct ehea_swqe *swqe, u32 lkey)
|
|
|
|
{
|
|
|
|
struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
|
|
|
|
skb_frag_t *frag;
|
|
|
|
int nfrags, sg1entry_contains_frag_data, i;
|
|
|
|
|
|
|
|
nfrags = skb_shinfo(skb)->nr_frags;
|
|
|
|
sg1entry = &swqe->u.immdata_desc.sg_entry;
|
2008-02-01 10:20:49 +08:00
|
|
|
sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
|
2006-09-13 23:44:31 +08:00
|
|
|
sg1entry_contains_frag_data = 0;
|
|
|
|
|
2011-10-14 13:31:06 +08:00
|
|
|
write_swqe2_immediate(skb, swqe, lkey);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
/* write descriptors */
|
|
|
|
if (nfrags > 0) {
|
|
|
|
if (swqe->descriptors == 0) {
|
|
|
|
/* sg1entry not yet used */
|
|
|
|
frag = &skb_shinfo(skb)->frags[0];
|
|
|
|
|
|
|
|
/* copy sg1entry data */
|
|
|
|
sg1entry->l_key = lkey;
|
2011-10-19 05:00:24 +08:00
|
|
|
sg1entry->len = skb_frag_size(frag);
|
2007-08-06 19:55:44 +08:00
|
|
|
sg1entry->vaddr =
|
2011-10-10 09:11:38 +08:00
|
|
|
ehea_map_vaddr(skb_frag_address(frag));
|
2006-09-13 23:44:31 +08:00
|
|
|
swqe->descriptors++;
|
|
|
|
sg1entry_contains_frag_data = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
|
|
|
|
|
|
|
|
frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
sgentry = &sg_list[i - sg1entry_contains_frag_data];
|
|
|
|
|
|
|
|
sgentry->l_key = lkey;
|
2011-10-25 22:16:10 +08:00
|
|
|
sgentry->len = skb_frag_size(frag);
|
2011-10-10 09:11:38 +08:00
|
|
|
sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
|
2006-09-13 23:44:31 +08:00
|
|
|
swqe->descriptors++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
u64 hret;
|
|
|
|
u8 reg_type;
|
|
|
|
|
|
|
|
/* De/Register untagged packets */
|
|
|
|
reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
|
|
|
|
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
reg_type, port->mac_addr, 0, hcallid);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("%sregistering bc address failed (tagged)\n",
|
|
|
|
hcallid == H_REG_BCMC ? "" : "de");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out_herr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* De/Register VLAN packets */
|
|
|
|
reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
|
|
|
|
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
reg_type, port->mac_addr, 0, hcallid);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("%sregistering bc address failed (vlan)\n",
|
|
|
|
hcallid == H_REG_BCMC ? "" : "de");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
out_herr:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_set_mac_addr(struct net_device *dev, void *sa)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct sockaddr *mac_addr = sa;
|
|
|
|
struct hcp_ehea_port_cb0 *cb0;
|
|
|
|
int ret;
|
|
|
|
u64 hret;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(mac_addr->sa_data)) {
|
|
|
|
ret = -EADDRNOTAVAIL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb0) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb0\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
|
|
|
|
|
|
|
|
cb0->port_mac_addr = cb0->port_mac_addr >> 16;
|
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id, H_PORT_CB0,
|
|
|
|
EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
|
|
|
|
|
|
|
|
/* Deregister old MAC in pHYP */
|
2008-06-09 22:17:37 +08:00
|
|
|
if (port->state == EHEA_PORT_UP) {
|
|
|
|
ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
|
|
|
if (ret)
|
|
|
|
goto out_upregs;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
port->mac_addr = cb0->port_mac_addr << 16;
|
|
|
|
|
|
|
|
/* Register new MAC in pHYP */
|
2008-06-09 22:17:37 +08:00
|
|
|
if (port->state == EHEA_PORT_UP) {
|
|
|
|
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
|
|
|
if (ret)
|
|
|
|
goto out_upregs;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ret = 0;
|
2008-02-13 23:18:33 +08:00
|
|
|
|
|
|
|
out_upregs:
|
|
|
|
ehea_update_bcmc_registrations();
|
2006-09-13 23:44:31 +08:00
|
|
|
out_free:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb0);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_promiscuous_error(u64 hret, int enable)
|
|
|
|
{
|
2007-01-22 19:54:20 +08:00
|
|
|
if (hret == H_AUTHORITY)
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Hypervisor denied %sabling promiscuous mode\n",
|
|
|
|
enable == 1 ? "en" : "dis");
|
2007-01-22 19:54:20 +08:00
|
|
|
else
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed %sabling promiscuous mode\n",
|
|
|
|
enable == 1 ? "en" : "dis");
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_promiscuous(struct net_device *dev, int enable)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct hcp_ehea_port_cb7 *cb7;
|
|
|
|
u64 hret;
|
|
|
|
|
2010-10-07 21:14:50 +08:00
|
|
|
if (enable == port->promisc)
|
2006-09-13 23:44:31 +08:00
|
|
|
return;
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb7) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb7\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Modify Pxs_DUCQPN in CB7 */
|
|
|
|
cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
|
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_port(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
|
|
|
|
if (hret) {
|
|
|
|
ehea_promiscuous_error(hret, enable);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
port->promisc = enable;
|
|
|
|
out:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb7);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
|
|
|
|
u32 hcallid)
|
|
|
|
{
|
|
|
|
u64 hret;
|
|
|
|
u8 reg_type;
|
|
|
|
|
2012-04-25 15:32:11 +08:00
|
|
|
reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
|
|
|
|
if (mc_mac_addr == 0)
|
|
|
|
reg_type |= EHEA_BCMC_SCOPE_ALL;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
reg_type, mc_mac_addr, 0, hcallid);
|
|
|
|
if (hret)
|
|
|
|
goto out;
|
|
|
|
|
2012-04-25 15:32:11 +08:00
|
|
|
reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
|
|
|
|
if (mc_mac_addr == 0)
|
|
|
|
reg_type |= EHEA_BCMC_SCOPE_ALL;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
|
|
|
|
port->logical_port_id,
|
|
|
|
reg_type, mc_mac_addr, 0, hcallid);
|
|
|
|
out:
|
|
|
|
return hret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_drop_multicast_list(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_mc_list *mc_entry = port->mc_list;
|
|
|
|
struct list_head *pos;
|
|
|
|
struct list_head *temp;
|
|
|
|
int ret = 0;
|
|
|
|
u64 hret;
|
|
|
|
|
|
|
|
list_for_each_safe(pos, temp, &(port->mc_list->list)) {
|
|
|
|
mc_entry = list_entry(pos, struct ehea_mc_list, list);
|
|
|
|
|
|
|
|
hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
|
|
|
|
H_DEREG_BCMC);
|
|
|
|
if (hret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed deregistering mcast MAC\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_del(pos);
|
|
|
|
kfree(mc_entry);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_allmulti(struct net_device *dev, int enable)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
u64 hret;
|
|
|
|
|
|
|
|
if (!port->allmulti) {
|
|
|
|
if (enable) {
|
|
|
|
/* Enable ALLMULTI */
|
|
|
|
ehea_drop_multicast_list(dev);
|
|
|
|
hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
|
|
|
|
if (!hret)
|
|
|
|
port->allmulti = 1;
|
|
|
|
else
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev,
|
|
|
|
"failed enabling IFF_ALLMULTI\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2012-04-25 15:32:11 +08:00
|
|
|
} else {
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!enable) {
|
|
|
|
/* Disable ALLMULTI */
|
|
|
|
hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
|
|
|
|
if (!hret)
|
|
|
|
port->allmulti = 0;
|
|
|
|
else
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev,
|
|
|
|
"failed disabling IFF_ALLMULTI\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2012-04-25 15:32:11 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_mc_list *ehea_mcl_entry;
|
|
|
|
u64 hret;
|
|
|
|
|
2006-10-25 19:11:42 +08:00
|
|
|
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
|
2013-02-04 01:43:58 +08:00
|
|
|
if (!ehea_mcl_entry)
|
2006-09-13 23:44:31 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ehea_mcl_entry->list);
|
|
|
|
|
|
|
|
memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
|
|
|
|
|
|
|
|
hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
|
|
|
|
H_REG_BCMC);
|
|
|
|
if (!hret)
|
|
|
|
list_add(&ehea_mcl_entry->list, &port->mc_list->list);
|
|
|
|
else {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed registering mcast MAC\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
kfree(ehea_mcl_entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_set_multicast_list(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
2010-04-02 05:22:57 +08:00
|
|
|
struct netdev_hw_addr *ha;
|
2010-02-22 17:22:26 +08:00
|
|
|
int ret;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2012-04-25 15:32:12 +08:00
|
|
|
ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
ehea_allmulti(dev, 1);
|
2008-02-13 23:18:33 +08:00
|
|
|
goto out;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
ehea_allmulti(dev, 0);
|
|
|
|
|
2010-02-08 12:30:35 +08:00
|
|
|
if (!netdev_mc_empty(dev)) {
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_drop_multicast_list(dev);
|
|
|
|
if (ret) {
|
|
|
|
/* Dropping the current multicast list failed.
|
|
|
|
* Enabling ALL_MULTI is the best we can do.
|
|
|
|
*/
|
|
|
|
ehea_allmulti(dev, 1);
|
|
|
|
}
|
|
|
|
|
2010-02-08 12:30:35 +08:00
|
|
|
if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
|
|
|
|
port->adapter->max_mc_mac);
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-04-02 05:22:57 +08:00
|
|
|
netdev_for_each_mc_addr(ha, dev)
|
|
|
|
ehea_add_multicast_entry(port, ha->addr);
|
2008-02-01 10:20:49 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
out:
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_update_bcmc_registrations();
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:05 +08:00
|
|
|
static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2011-10-14 13:31:05 +08:00
|
|
|
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
|
2007-07-02 19:00:46 +08:00
|
|
|
|
2014-08-25 22:34:51 +08:00
|
|
|
if (vlan_get_protocol(skb) != htons(ETH_P_IP))
|
2011-10-14 13:31:05 +08:00
|
|
|
return;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:05 +08:00
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
|
swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:05 +08:00
|
|
|
swqe->ip_start = skb_network_offset(skb);
|
|
|
|
swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:05 +08:00
|
|
|
switch (ip_hdr(skb)->protocol) {
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
|
swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-10-14 13:31:05 +08:00
|
|
|
swqe->tcp_offset = swqe->ip_end + 1 +
|
|
|
|
offsetof(struct udphdr, check);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
|
swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
|
|
|
|
|
|
|
|
swqe->tcp_offset = swqe->ip_end + 1 +
|
|
|
|
offsetof(struct tcphdr, check);
|
|
|
|
break;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2011-10-14 13:31:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct ehea_swqe *swqe, u32 lkey)
|
|
|
|
{
|
|
|
|
swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
|
|
|
|
|
|
|
|
xmit_common(skb, swqe);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
write_swqe2_data(skb, dev, swqe, lkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct ehea_swqe *swqe)
|
|
|
|
{
|
|
|
|
u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
|
|
|
|
|
2011-10-14 13:31:05 +08:00
|
|
|
xmit_common(skb, swqe);
|
2007-07-02 19:00:46 +08:00
|
|
|
|
2011-10-14 13:31:07 +08:00
|
|
|
if (!skb->data_len)
|
2007-03-28 05:55:52 +08:00
|
|
|
skb_copy_from_linear_data(skb, imm_data, skb->len);
|
2011-10-14 13:31:07 +08:00
|
|
|
else
|
|
|
|
skb_copy_bits(skb, 0, imm_data, skb->len);
|
2011-10-14 13:31:05 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
swqe->immediate_data_length = skb->len;
|
2014-03-16 08:25:48 +08:00
|
|
|
dev_consume_skb_any(skb);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2018-09-18 14:35:47 +08:00
|
|
|
static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_swqe *swqe;
|
|
|
|
u32 lkey;
|
|
|
|
int swqe_index;
|
2007-03-01 01:34:10 +08:00
|
|
|
struct ehea_port_res *pr;
|
2011-10-14 13:30:59 +08:00
|
|
|
struct netdev_queue *txq;
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2011-10-14 13:30:59 +08:00
|
|
|
pr = &port->port_res[skb_get_queue_mapping(skb)];
|
|
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
2007-03-01 01:34:10 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
swqe = ehea_get_swqe(pr->qp, &swqe_index);
|
|
|
|
memset(swqe, 0, SWQE_HEADER_SIZE);
|
|
|
|
atomic_dec(&pr->swqe_avail);
|
|
|
|
|
2015-01-14 00:13:44 +08:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
2010-10-27 03:21:07 +08:00
|
|
|
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
|
2015-01-14 00:13:44 +08:00
|
|
|
swqe->vlan_tag = skb_vlan_tag_get(skb);
|
2010-10-27 03:21:07 +08:00
|
|
|
}
|
|
|
|
|
2010-10-27 16:45:14 +08:00
|
|
|
pr->tx_packets++;
|
|
|
|
pr->tx_bytes += skb->len;
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
if (skb->len <= SWQE3_MAX_IMM) {
|
|
|
|
u32 sig_iv = port->sig_comp_iv;
|
|
|
|
u32 swqe_num = pr->swqe_id_counter;
|
|
|
|
ehea_xmit3(skb, dev, swqe);
|
|
|
|
swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
|
|
|
|
| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
|
|
|
|
if (pr->swqe_ll_count >= (sig_iv - 1)) {
|
|
|
|
swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
|
|
|
|
sig_iv);
|
|
|
|
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
|
|
|
|
pr->swqe_ll_count = 0;
|
|
|
|
} else
|
|
|
|
pr->swqe_ll_count += 1;
|
|
|
|
} else {
|
|
|
|
swqe->wr_id =
|
|
|
|
EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
|
|
|
|
| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
|
2007-03-24 00:18:53 +08:00
|
|
|
| EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
|
2006-09-13 23:44:31 +08:00
|
|
|
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
|
|
|
|
pr->sq_skba.arr[pr->sq_skba.index] = skb;
|
|
|
|
|
|
|
|
pr->sq_skba.index++;
|
|
|
|
pr->sq_skba.index &= (pr->sq_skba.len - 1);
|
|
|
|
|
|
|
|
lkey = pr->send_mr.lkey;
|
|
|
|
ehea_xmit2(skb, dev, swqe, lkey);
|
2007-03-24 00:18:53 +08:00
|
|
|
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
pr->swqe_id_counter += 1;
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, tx_queued, dev,
|
|
|
|
"post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
|
|
|
|
if (netif_msg_tx_queued(port))
|
2006-10-05 22:53:14 +08:00
|
|
|
ehea_dump(swqe, 512, "swqe");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-10-01 22:33:18 +08:00
|
|
|
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_stop_queue(txq);
|
2007-10-01 22:33:18 +08:00
|
|
|
swqe->tx_control |= EHEA_SWQE_PURGE;
|
|
|
|
}
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ehea_post_swqe(pr->qp, swqe);
|
|
|
|
|
|
|
|
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
|
2011-10-14 13:30:59 +08:00
|
|
|
pr->p_stats.queue_stopped++;
|
|
|
|
netif_tx_stop_queue(txq);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2007-10-01 22:33:18 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_adapter *adapter = port->adapter;
|
|
|
|
struct hcp_ehea_port_cb1 *cb1;
|
|
|
|
int index;
|
|
|
|
u64 hret;
|
2011-12-09 08:52:37 +08:00
|
|
|
int err = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb1) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb1\n");
|
2011-12-09 08:52:37 +08:00
|
|
|
err = -ENOMEM;
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
|
|
|
|
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_port failed\n");
|
2011-12-09 08:52:37 +08:00
|
|
|
err = -EINVAL;
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = (vid / 64);
|
2007-06-07 02:53:16 +08:00
|
|
|
cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
|
|
|
|
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
|
2011-12-09 08:52:37 +08:00
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("modify_ehea_port failed\n");
|
2011-12-09 08:52:37 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb1);
|
2011-12-09 08:52:37 +08:00
|
|
|
return err;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_adapter *adapter = port->adapter;
|
|
|
|
struct hcp_ehea_port_cb1 *cb1;
|
|
|
|
int index;
|
|
|
|
u64 hret;
|
2011-12-09 08:52:37 +08:00
|
|
|
int err = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb1) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb1\n");
|
2011-12-09 08:52:37 +08:00
|
|
|
err = -ENOMEM;
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
|
|
|
|
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_port failed\n");
|
2011-12-09 08:52:37 +08:00
|
|
|
err = -EINVAL;
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = (vid / 64);
|
2007-06-07 02:53:16 +08:00
|
|
|
cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
|
|
|
|
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
|
2011-12-09 08:52:37 +08:00
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("modify_ehea_port failed\n");
|
2011-12-09 08:52:37 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb1);
|
2011-12-09 08:52:37 +08:00
|
|
|
return err;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
int ret = -EIO;
|
|
|
|
u64 hret;
|
|
|
|
u16 dummy16 = 0;
|
|
|
|
u64 dummy64 = 0;
|
2008-02-01 10:20:49 +08:00
|
|
|
struct hcp_modify_qp_cb0 *cb0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb0) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_qp failed (1)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
|
|
|
|
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
|
|
|
|
&dummy64, &dummy64, &dummy16, &dummy16);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("modify_ehea_qp failed (1)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_qp failed (2)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
|
|
|
|
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
|
|
|
|
&dummy64, &dummy64, &dummy16, &dummy16);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("modify_ehea_qp failed (2)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_qp failed (3)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
|
|
|
|
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
|
|
|
|
&dummy64, &dummy64, &dummy16, &dummy16);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("modify_ehea_qp failed (3)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_qp failed (4)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb0);
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
struct port_res_cfg pr_cfg, pr_cfg_small_rx;
|
|
|
|
enum ehea_eq_type eq_type = EHEA_EQ;
|
|
|
|
|
|
|
|
port->qp_eq = ehea_create_eq(port->adapter, eq_type,
|
|
|
|
EHEA_MAX_ENTRIES_EQ, 1);
|
|
|
|
if (!port->qp_eq) {
|
|
|
|
ret = -EINVAL;
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("ehea_create_eq failed (qp_eq)\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_kill_eq;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
|
2007-03-01 01:34:10 +08:00
|
|
|
pr_cfg.max_entries_scq = sq_entries * 2;
|
2006-09-13 23:44:31 +08:00
|
|
|
pr_cfg.max_entries_sq = sq_entries;
|
|
|
|
pr_cfg.max_entries_rq1 = rq1_entries;
|
|
|
|
pr_cfg.max_entries_rq2 = rq2_entries;
|
|
|
|
pr_cfg.max_entries_rq3 = rq3_entries;
|
|
|
|
|
|
|
|
pr_cfg_small_rx.max_entries_rcq = 1;
|
|
|
|
pr_cfg_small_rx.max_entries_scq = sq_entries;
|
|
|
|
pr_cfg_small_rx.max_entries_sq = sq_entries;
|
|
|
|
pr_cfg_small_rx.max_entries_rq1 = 1;
|
|
|
|
pr_cfg_small_rx.max_entries_rq2 = 1;
|
|
|
|
pr_cfg_small_rx.max_entries_rq3 = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < def_qps; i++) {
|
|
|
|
ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
|
|
|
|
if (ret)
|
|
|
|
goto out_clean_pr;
|
|
|
|
}
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = def_qps; i < def_qps; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_init_port_res(port, &port->port_res[i],
|
|
|
|
&pr_cfg_small_rx, i);
|
|
|
|
if (ret)
|
|
|
|
goto out_clean_pr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_clean_pr:
|
|
|
|
while (--i >= 0)
|
|
|
|
ehea_clean_portres(port, &port->port_res[i]);
|
|
|
|
|
|
|
|
out_kill_eq:
|
|
|
|
ehea_destroy_eq(port->qp_eq);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_clean_all_portres(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int i;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++)
|
2006-09-13 23:44:31 +08:00
|
|
|
ret |= ehea_clean_portres(port, &port->port_res[i]);
|
|
|
|
|
|
|
|
ret |= ehea_destroy_eq(port->qp_eq);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-08-06 19:55:14 +08:00
|
|
|
static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
|
2007-04-26 17:56:43 +08:00
|
|
|
{
|
2007-08-06 19:55:14 +08:00
|
|
|
if (adapter->active_ports)
|
|
|
|
return;
|
2007-04-26 17:56:43 +08:00
|
|
|
|
|
|
|
ehea_rem_mr(&adapter->mr);
|
|
|
|
}
|
|
|
|
|
2007-08-06 19:55:14 +08:00
|
|
|
static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
|
2007-04-26 17:56:43 +08:00
|
|
|
{
|
2007-08-06 19:55:14 +08:00
|
|
|
if (adapter->active_ports)
|
|
|
|
return 0;
|
2007-04-26 17:56:43 +08:00
|
|
|
|
|
|
|
return ehea_reg_kernel_mr(adapter, &adapter->mr);
|
|
|
|
}
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
static int ehea_up(struct net_device *dev)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (port->state == EHEA_PORT_UP)
|
|
|
|
return 0;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
ret = ehea_port_res_setup(port, port->num_def_qps);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "port_res_failed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set default QP for this port */
|
|
|
|
ret = ehea_configure_port(port);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_clean_pr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ehea_reg_interrupts(dev);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
|
2007-07-18 23:34:09 +08:00
|
|
|
goto out_clean_pr;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "activate_qp failed\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free_irqs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_fill_port_res(&port->port_res[i]);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "out_free_irqs\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out_free_irqs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_free_irqs;
|
|
|
|
}
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
port->state = EHEA_PORT_UP;
|
2008-02-13 23:18:33 +08:00
|
|
|
|
|
|
|
ret = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
out_free_irqs:
|
|
|
|
ehea_free_interrupts(dev);
|
|
|
|
|
|
|
|
out_clean_pr:
|
|
|
|
ehea_clean_all_portres(port);
|
|
|
|
out:
|
2007-07-11 22:32:00 +08:00
|
|
|
if (ret)
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(dev, "Failed starting. ret=%i\n", ret);
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_update_bcmc_registrations();
|
|
|
|
ehea_update_firmware_handles();
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
static void port_napi_disable(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++)
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
napi_disable(&port->port_res[i].napi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void port_napi_enable(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++)
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
napi_enable(&port->port_res[i].napi);
|
|
|
|
}
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
static int ehea_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_lock(&port->port_lock);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, ifup, dev, "enabling port\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2016-11-03 18:16:20 +08:00
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_up(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
if (!ret) {
|
|
|
|
port_napi_enable(port);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_start_all_queues(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_unlock(&port->port_lock);
|
2011-11-23 08:13:54 +08:00
|
|
|
schedule_delayed_work(&port->stats_work,
|
|
|
|
round_jiffies_relative(msecs_to_jiffies(1000)));
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_down(struct net_device *dev)
|
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
int ret;
|
2006-09-13 23:44:31 +08:00
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (port->state == EHEA_PORT_DOWN)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ehea_drop_multicast_list(dev);
|
2012-04-25 15:32:11 +08:00
|
|
|
ehea_allmulti(dev, 0);
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ehea_free_interrupts(dev);
|
|
|
|
|
|
|
|
port->state = EHEA_PORT_DOWN;
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_update_bcmc_registrations();
|
|
|
|
|
2007-07-11 22:32:00 +08:00
|
|
|
ret = ehea_clean_all_portres(port);
|
|
|
|
if (ret)
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_update_firmware_handles();
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, ifdown, dev, "disabling port\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2008-07-03 22:18:51 +08:00
|
|
|
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
|
2008-06-12 17:22:02 +08:00
|
|
|
cancel_work_sync(&port->reset_task);
|
2011-09-26 18:11:03 +08:00
|
|
|
cancel_delayed_work_sync(&port->stats_work);
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_lock(&port->port_lock);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_stop_all_queues(dev);
|
2007-10-24 17:53:34 +08:00
|
|
|
port_napi_disable(port);
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_down(dev);
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_unlock(&port->port_lock);
|
2008-07-03 22:18:51 +08:00
|
|
|
clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-19 04:50:39 +08:00
|
|
|
static void ehea_purge_sq(struct ehea_qp *orig_qp)
|
2007-10-01 22:33:18 +08:00
|
|
|
{
|
|
|
|
struct ehea_qp qp = *orig_qp;
|
|
|
|
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
|
|
|
|
struct ehea_swqe *swqe;
|
|
|
|
int wqe_index;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
|
|
|
|
swqe = ehea_get_swqe(&qp, &wqe_index);
|
|
|
|
swqe->tx_control |= EHEA_SWQE_PURGE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-19 04:50:39 +08:00
|
|
|
static void ehea_flush_sq(struct ehea_port *port)
|
2008-04-04 21:04:53 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < port->num_def_qps; i++) {
|
2008-04-04 21:04:53 +08:00
|
|
|
struct ehea_port_res *pr = &port->port_res[i];
|
|
|
|
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
|
2010-10-05 21:16:22 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = wait_event_timeout(port->swqe_avail_wq,
|
|
|
|
atomic_read(&pr->swqe_avail) >= swqe_max,
|
|
|
|
msecs_to_jiffies(100));
|
|
|
|
|
|
|
|
if (!ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("WARNING: sq not flushed completely\n");
|
2010-10-05 21:16:22 +08:00
|
|
|
break;
|
2008-04-04 21:04:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_stop_qps(struct net_device *dev)
|
2007-10-01 22:33:18 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_adapter *adapter = port->adapter;
|
2008-02-01 10:20:49 +08:00
|
|
|
struct hcp_modify_qp_cb0 *cb0;
|
2007-10-01 22:33:18 +08:00
|
|
|
int ret = -EIO;
|
|
|
|
int dret;
|
|
|
|
int i;
|
|
|
|
u64 hret;
|
|
|
|
u64 dummy64 = 0;
|
|
|
|
u16 dummy16 = 0;
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
|
2007-10-01 22:33:18 +08:00
|
|
|
if (!cb0) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < (port->num_def_qps); i++) {
|
2007-10-01 22:33:18 +08:00
|
|
|
struct ehea_port_res *pr = &port->port_res[i];
|
|
|
|
struct ehea_qp *qp = pr->qp;
|
|
|
|
|
|
|
|
/* Purge send queue */
|
|
|
|
ehea_purge_sq(qp);
|
|
|
|
|
|
|
|
/* Disable queue pair */
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
|
|
|
|
cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_qp failed (1)\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
|
|
|
|
cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
|
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
|
|
|
|
1), cb0, &dummy64,
|
|
|
|
&dummy64, &dummy16, &dummy16);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("modify_ehea_qp failed (1)\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
|
|
|
|
cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("query_ehea_qp failed (2)\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* deregister shared memory regions */
|
|
|
|
dret = ehea_rem_smrs(pr);
|
|
|
|
if (dret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("unreg shared memory region failed\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb0);
|
2007-10-01 22:33:18 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
|
2007-10-01 22:33:18 +08:00
|
|
|
{
|
|
|
|
struct ehea_qp qp = *orig_qp;
|
|
|
|
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
|
|
|
|
struct ehea_rwqe *rwqe;
|
|
|
|
struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
|
|
|
|
struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u32 lkey = pr->recv_mr.lkey;
|
|
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
|
|
|
|
rwqe = ehea_get_next_rwqe(&qp, 2);
|
|
|
|
rwqe->sg_list[0].l_key = lkey;
|
|
|
|
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
|
|
|
|
skb = skba_rq2[index];
|
|
|
|
if (skb)
|
|
|
|
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
|
|
|
|
rwqe = ehea_get_next_rwqe(&qp, 3);
|
|
|
|
rwqe->sg_list[0].l_key = lkey;
|
|
|
|
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
|
|
|
|
skb = skba_rq3[index];
|
|
|
|
if (skb)
|
|
|
|
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_restart_qps(struct net_device *dev)
|
2007-10-01 22:33:18 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
struct ehea_adapter *adapter = port->adapter;
|
|
|
|
int ret = 0;
|
|
|
|
int i;
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
struct hcp_modify_qp_cb0 *cb0;
|
2007-10-01 22:33:18 +08:00
|
|
|
u64 hret;
|
|
|
|
u64 dummy64 = 0;
|
|
|
|
u16 dummy16 = 0;
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
|
2007-10-01 22:33:18 +08:00
|
|
|
if (!cb0) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-10-14 13:31:01 +08:00
|
|
|
for (i = 0; i < (port->num_def_qps); i++) {
|
2007-10-01 22:33:18 +08:00
|
|
|
struct ehea_port_res *pr = &port->port_res[i];
|
|
|
|
struct ehea_qp *qp = pr->qp;
|
|
|
|
|
|
|
|
ret = ehea_gen_smrs(pr);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "creation of shared memory regions failed\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ehea_update_rqs(qp, pr);
|
|
|
|
|
|
|
|
/* Enable queue pair */
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
|
|
|
|
cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "query_ehea_qp failed (1)\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
|
|
|
|
cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
|
|
|
|
|
|
|
|
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
|
|
|
|
1), cb0, &dummy64,
|
|
|
|
&dummy64, &dummy16, &dummy16);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "modify_ehea_qp failed (1)\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
|
|
|
|
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
|
|
|
|
cb0);
|
|
|
|
if (hret != H_SUCCESS) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "query_ehea_qp failed (2)\n");
|
2007-10-01 22:33:18 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* refill entire queue */
|
|
|
|
ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
|
|
|
|
ehea_refill_rq2(pr, 0);
|
|
|
|
ehea_refill_rq3(pr, 0);
|
|
|
|
}
|
|
|
|
out:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb0);
|
2007-10-01 22:33:18 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
static void ehea_reset_port(struct work_struct *work)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2006-11-22 22:57:56 +08:00
|
|
|
struct ehea_port *port =
|
|
|
|
container_of(work, struct ehea_port, reset_task);
|
|
|
|
struct net_device *dev = port->netdev;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2010-06-15 13:35:42 +08:00
|
|
|
mutex_lock(&dlpar_mem_lock);
|
2006-09-13 23:44:31 +08:00
|
|
|
port->resets++;
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_lock(&port->port_lock);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_disable(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
|
|
|
|
port_napi_disable(port);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-07-11 22:32:00 +08:00
|
|
|
ehea_down(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ret = ehea_up(dev);
|
2007-07-11 22:32:00 +08:00
|
|
|
if (ret)
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
|
2007-10-01 22:33:18 +08:00
|
|
|
ehea_set_multicast_list(dev);
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netif_info(port, timer, dev, "reset successful\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
port_napi_enable(port);
|
|
|
|
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_wake_all_queues(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_unlock(&port->port_lock);
|
2010-06-15 13:35:42 +08:00
|
|
|
mutex_unlock(&dlpar_mem_lock);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2010-12-12 23:45:14 +08:00
|
|
|
static void ehea_rereg_mrs(void)
|
2007-07-11 22:32:00 +08:00
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
struct ehea_adapter *adapter;
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("LPAR memory changed - re-initializing driver\n");
|
2007-07-11 22:32:00 +08:00
|
|
|
|
|
|
|
list_for_each_entry(adapter, &adapter_list, list)
|
|
|
|
if (adapter->active_ports) {
|
|
|
|
/* Shutdown all ports */
|
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++) {
|
|
|
|
struct ehea_port *port = adapter->port[i];
|
2008-03-29 05:41:28 +08:00
|
|
|
struct net_device *dev;
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2008-03-29 05:41:28 +08:00
|
|
|
if (!port)
|
|
|
|
continue;
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2008-03-29 05:41:28 +08:00
|
|
|
dev = port->netdev;
|
|
|
|
|
|
|
|
if (dev->flags & IFF_UP) {
|
|
|
|
mutex_lock(&port->port_lock);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_disable(dev);
|
2008-04-14 17:30:23 +08:00
|
|
|
ehea_flush_sq(port);
|
2008-03-29 05:41:28 +08:00
|
|
|
ret = ehea_stop_qps(dev);
|
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&port->port_lock);
|
|
|
|
goto out;
|
2007-07-11 22:32:00 +08:00
|
|
|
}
|
2008-03-29 05:41:28 +08:00
|
|
|
port_napi_disable(port);
|
|
|
|
mutex_unlock(&port->port_lock);
|
2007-07-11 22:32:00 +08:00
|
|
|
}
|
2010-08-17 13:49:12 +08:00
|
|
|
reset_sq_restart_flag(port);
|
2007-07-11 22:32:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unregister old memory region */
|
|
|
|
ret = ehea_rem_mr(&adapter->mr);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("unregister MR failed - driver inoperable!\n");
|
2007-07-11 22:32:00 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
|
|
|
|
|
|
|
list_for_each_entry(adapter, &adapter_list, list)
|
|
|
|
if (adapter->active_ports) {
|
|
|
|
/* Register new memory region */
|
|
|
|
ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("register MR failed - driver inoperable!\n");
|
2007-07-11 22:32:00 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Restart all ports */
|
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++) {
|
|
|
|
struct ehea_port *port = adapter->port[i];
|
|
|
|
|
|
|
|
if (port) {
|
|
|
|
struct net_device *dev = port->netdev;
|
|
|
|
|
|
|
|
if (dev->flags & IFF_UP) {
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_lock(&port->port_lock);
|
2007-10-01 22:33:18 +08:00
|
|
|
ret = ehea_restart_qps(dev);
|
2011-04-19 17:39:22 +08:00
|
|
|
if (!ret) {
|
|
|
|
check_sqs(port);
|
|
|
|
port_napi_enable(port);
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_tx_wake_all_queues(dev);
|
2011-04-19 17:39:22 +08:00
|
|
|
} else {
|
|
|
|
netdev_err(dev, "Unable to restart QPS\n");
|
|
|
|
}
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_unlock(&port->port_lock);
|
2007-07-11 22:32:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("re-initializing driver complete\n");
|
2007-07-11 22:32:00 +08:00
|
|
|
out:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
static void ehea_tx_watchdog(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
|
|
|
2007-10-01 22:33:18 +08:00
|
|
|
if (netif_carrier_ok(dev) &&
|
|
|
|
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
|
2008-07-03 22:18:51 +08:00
|
|
|
ehea_schedule_port_reset(port);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct hcp_query_ehea *cb;
|
|
|
|
u64 hret;
|
|
|
|
int ret;
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
cb = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hret = ehea_h_query_ehea(adapter->handle, cb);
|
|
|
|
|
|
|
|
if (hret != H_SUCCESS) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_herr;
|
|
|
|
}
|
|
|
|
|
|
|
|
adapter->max_mc_mac = cb->max_mc_mac - 1;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out_herr:
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct hcp_ehea_port_cb4 *cb4;
|
2007-03-01 01:34:02 +08:00
|
|
|
u64 hret;
|
|
|
|
int ret = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
*jumbo = 0;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
/* (Try to) enable *jumbo frames */
|
2009-01-22 06:45:33 +08:00
|
|
|
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!cb4) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("no mem for cb4\n");
|
2007-03-01 01:34:02 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
2006-09-13 23:44:31 +08:00
|
|
|
} else {
|
2007-03-01 01:34:02 +08:00
|
|
|
hret = ehea_h_query_ehea_port(port->adapter->handle,
|
2007-01-30 01:44:01 +08:00
|
|
|
port->logical_port_id,
|
|
|
|
H_PORT_CB4,
|
|
|
|
H_PORT_CB4_JUMBO, cb4);
|
|
|
|
if (hret == H_SUCCESS) {
|
|
|
|
if (cb4->jumbo_frame)
|
2007-03-01 01:34:02 +08:00
|
|
|
*jumbo = 1;
|
2007-01-30 01:44:01 +08:00
|
|
|
else {
|
|
|
|
cb4->jumbo_frame = 1;
|
2007-03-01 01:34:02 +08:00
|
|
|
hret = ehea_h_modify_ehea_port(port->adapter->
|
|
|
|
handle,
|
2007-01-30 01:44:01 +08:00
|
|
|
port->
|
2007-03-01 01:34:02 +08:00
|
|
|
logical_port_id,
|
2007-01-30 01:44:01 +08:00
|
|
|
H_PORT_CB4,
|
|
|
|
H_PORT_CB4_JUMBO,
|
|
|
|
cb4);
|
|
|
|
if (hret == H_SUCCESS)
|
2007-03-01 01:34:02 +08:00
|
|
|
*jumbo = 1;
|
2007-01-30 01:44:01 +08:00
|
|
|
}
|
2007-03-01 01:34:02 +08:00
|
|
|
} else
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
2009-01-22 06:45:33 +08:00
|
|
|
free_page((unsigned long)cb4);
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2007-03-01 01:34:02 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ehea_show_port_id(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
|
2007-08-22 22:20:58 +08:00
|
|
|
return sprintf(buf, "%d", port->logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
}
|
|
|
|
|
2018-03-24 07:34:44 +08:00
|
|
|
static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2012-12-03 22:23:10 +08:00
|
|
|
static void logical_port_release(struct device *dev)
|
2007-03-01 01:34:02 +08:00
|
|
|
{
|
|
|
|
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
|
2010-04-14 07:12:29 +08:00
|
|
|
of_node_put(port->ofdev.dev.of_node);
|
2007-03-01 01:34:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct device *ehea_register_port(struct ehea_port *port,
|
|
|
|
struct device_node *dn)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2010-04-14 07:12:29 +08:00
|
|
|
port->ofdev.dev.of_node = of_node_get(dn);
|
2007-09-26 17:45:51 +08:00
|
|
|
port->ofdev.dev.parent = &port->adapter->ofdev->dev;
|
2007-04-26 17:56:13 +08:00
|
|
|
port->ofdev.dev.bus = &ibmebus_bus_type;
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2009-01-27 13:12:58 +08:00
|
|
|
dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
|
2007-03-01 01:34:02 +08:00
|
|
|
port->ofdev.dev.release = logical_port_release;
|
|
|
|
|
|
|
|
ret = of_device_register(&port->ofdev);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed to register device. ret=%d\n", ret);
|
2007-03-01 01:34:02 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
|
2007-07-02 19:00:46 +08:00
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed to register attributes, ret=%d\n", ret);
|
2007-03-01 01:34:02 +08:00
|
|
|
goto out_unreg_of_dev;
|
|
|
|
}
|
2007-03-23 00:50:24 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
return &port->ofdev.dev;
|
|
|
|
|
|
|
|
out_unreg_of_dev:
|
|
|
|
of_device_unregister(&port->ofdev);
|
|
|
|
out:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_unregister_port(struct ehea_port *port)
|
|
|
|
{
|
|
|
|
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
|
|
|
|
of_device_unregister(&port->ofdev);
|
|
|
|
}
|
|
|
|
|
2009-01-22 06:43:59 +08:00
|
|
|
static const struct net_device_ops ehea_netdev_ops = {
|
|
|
|
.ndo_open = ehea_open,
|
|
|
|
.ndo_stop = ehea_stop,
|
|
|
|
.ndo_start_xmit = ehea_start_xmit,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = ehea_netpoll,
|
|
|
|
#endif
|
2011-10-14 13:31:09 +08:00
|
|
|
.ndo_get_stats64 = ehea_get_stats64,
|
2009-01-22 06:43:59 +08:00
|
|
|
.ndo_set_mac_address = ehea_set_mac_addr,
|
2009-07-10 01:54:35 +08:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
2011-08-16 14:29:01 +08:00
|
|
|
.ndo_set_rx_mode = ehea_set_multicast_list,
|
2009-01-22 06:43:59 +08:00
|
|
|
.ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
|
2009-04-15 06:18:00 +08:00
|
|
|
.ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
|
|
|
|
.ndo_tx_timeout = ehea_tx_watchdog,
|
2009-01-22 06:43:59 +08:00
|
|
|
};
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
|
2007-03-01 01:34:02 +08:00
|
|
|
u32 logical_port_id,
|
|
|
|
struct device_node *dn)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct ehea_port *port;
|
|
|
|
struct device *port_dev;
|
|
|
|
int jumbo;
|
|
|
|
|
|
|
|
/* allocate memory for the port structures */
|
2011-10-14 13:30:59 +08:00
|
|
|
dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
if (!dev) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = netdev_priv(dev);
|
|
|
|
|
2008-03-29 05:41:28 +08:00
|
|
|
mutex_init(&port->port_lock);
|
2007-03-01 01:34:02 +08:00
|
|
|
port->state = EHEA_PORT_DOWN;
|
|
|
|
port->sig_comp_iv = sq_entries / 10;
|
|
|
|
|
|
|
|
port->adapter = adapter;
|
|
|
|
port->netdev = dev;
|
|
|
|
port->logical_port_id = logical_port_id;
|
|
|
|
|
|
|
|
port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
|
|
|
|
|
|
|
|
port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
|
|
|
|
if (!port->mc_list) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_free_ethdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&port->mc_list->list);
|
|
|
|
|
|
|
|
ret = ehea_sense_port_attr(port);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_mc_list;
|
|
|
|
|
2011-10-14 13:30:59 +08:00
|
|
|
netif_set_real_num_rx_queues(dev, port->num_def_qps);
|
2011-10-14 13:31:01 +08:00
|
|
|
netif_set_real_num_tx_queues(dev, port->num_def_qps);
|
2011-10-14 13:30:59 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
port_dev = ehea_register_port(port, dn);
|
|
|
|
if (!port_dev)
|
|
|
|
goto out_free_mc_list;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(dev, port_dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
/* initialize net_device structure */
|
|
|
|
memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
|
|
|
|
|
2009-01-22 06:43:59 +08:00
|
|
|
dev->netdev_ops = &ehea_netdev_ops;
|
|
|
|
ehea_set_ethtool_ops(dev);
|
|
|
|
|
2013-04-23 07:34:34 +08:00
|
|
|
dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
|
|
|
|
NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
|
2013-12-03 00:51:13 +08:00
|
|
|
dev->features = NETIF_F_SG | NETIF_F_TSO |
|
2013-04-23 07:34:34 +08:00
|
|
|
NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
|
2011-10-14 13:31:03 +08:00
|
|
|
dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
|
|
|
|
NETIF_F_IP_CSUM;
|
2006-09-13 23:44:31 +08:00
|
|
|
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
|
|
|
|
|
2016-10-18 03:54:14 +08:00
|
|
|
/* MTU range: 68 - 9022 */
|
|
|
|
dev->min_mtu = ETH_MIN_MTU;
|
|
|
|
dev->max_mtu = EHEA_MAX_PACKET_SIZE;
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_WORK(&port->reset_task, ehea_reset_port);
|
2011-09-26 18:11:03 +08:00
|
|
|
INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2011-05-11 00:17:10 +08:00
|
|
|
init_waitqueue_head(&port->swqe_avail_wq);
|
|
|
|
init_waitqueue_head(&port->restart_wq);
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = register_netdev(dev);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("register_netdev failed. ret=%d\n", ret);
|
2008-02-13 23:18:33 +08:00
|
|
|
goto out_unreg_port;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
ret = ehea_get_jumboframe_status(port, &jumbo);
|
2007-03-23 00:50:24 +08:00
|
|
|
if (ret)
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_err(dev, "failed determining jumbo frame status\n");
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(dev, "Jumbo frames are %sabled\n",
|
|
|
|
jumbo == 1 ? "en" : "dis");
|
2007-01-30 01:44:01 +08:00
|
|
|
|
2007-07-11 22:32:00 +08:00
|
|
|
adapter->active_ports++;
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
return port;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
out_unreg_port:
|
|
|
|
ehea_unregister_port(port);
|
|
|
|
|
|
|
|
out_free_mc_list:
|
2006-09-13 23:44:31 +08:00
|
|
|
kfree(port->mc_list);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
out_free_ethdev:
|
|
|
|
free_netdev(dev);
|
|
|
|
|
|
|
|
out_err:
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("setting up logical port with id=%d failed, ret=%d\n",
|
|
|
|
logical_port_id, ret);
|
2007-03-01 01:34:02 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_shutdown_single_port(struct ehea_port *port)
|
|
|
|
{
|
2008-05-14 22:48:25 +08:00
|
|
|
struct ehea_adapter *adapter = port->adapter;
|
2010-12-12 23:45:14 +08:00
|
|
|
|
|
|
|
cancel_work_sync(&port->reset_task);
|
2011-09-26 18:11:03 +08:00
|
|
|
cancel_delayed_work_sync(&port->stats_work);
|
2007-03-01 01:34:02 +08:00
|
|
|
unregister_netdev(port->netdev);
|
|
|
|
ehea_unregister_port(port);
|
|
|
|
kfree(port->mc_list);
|
|
|
|
free_netdev(port->netdev);
|
2008-05-14 22:48:25 +08:00
|
|
|
adapter->active_ports--;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ehea_setup_ports(struct ehea_adapter *adapter)
|
|
|
|
{
|
2007-03-01 01:34:02 +08:00
|
|
|
struct device_node *lhea_dn;
|
|
|
|
struct device_node *eth_dn = NULL;
|
2007-07-02 19:00:46 +08:00
|
|
|
|
2007-05-01 11:51:32 +08:00
|
|
|
const u32 *dn_log_port_id;
|
2007-03-01 01:34:02 +08:00
|
|
|
int i = 0;
|
|
|
|
|
2010-04-14 07:12:29 +08:00
|
|
|
lhea_dn = adapter->ofdev->dev.of_node;
|
2007-03-23 00:49:42 +08:00
|
|
|
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
|
2007-03-23 00:50:24 +08:00
|
|
|
|
2007-05-01 11:54:02 +08:00
|
|
|
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
|
2007-07-02 19:00:46 +08:00
|
|
|
NULL);
|
2007-03-01 01:34:02 +08:00
|
|
|
if (!dn_log_port_id) {
|
2017-07-19 05:43:19 +08:00
|
|
|
pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
|
2007-03-01 01:34:02 +08:00
|
|
|
continue;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-04-26 17:56:43 +08:00
|
|
|
if (ehea_add_adapter_mr(adapter)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("creating MR failed\n");
|
2007-04-26 17:56:43 +08:00
|
|
|
of_node_put(eth_dn);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
adapter->port[i] = ehea_setup_single_port(adapter,
|
|
|
|
*dn_log_port_id,
|
|
|
|
eth_dn);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (adapter->port[i])
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(adapter->port[i]->netdev,
|
|
|
|
"logical port id #%d\n", *dn_log_port_id);
|
2007-04-26 17:56:43 +08:00
|
|
|
else
|
|
|
|
ehea_remove_adapter_mr(adapter);
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
i++;
|
2010-05-18 13:47:34 +08:00
|
|
|
}
|
2007-04-26 17:56:43 +08:00
|
|
|
return 0;
|
2007-03-01 01:34:02 +08:00
|
|
|
}
|
|
|
|
|
2007-03-23 00:50:24 +08:00
|
|
|
static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
|
|
|
|
u32 logical_port_id)
|
2007-03-01 01:34:02 +08:00
|
|
|
{
|
|
|
|
struct device_node *lhea_dn;
|
|
|
|
struct device_node *eth_dn = NULL;
|
2007-05-01 11:51:32 +08:00
|
|
|
const u32 *dn_log_port_id;
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2010-04-14 07:12:29 +08:00
|
|
|
lhea_dn = adapter->ofdev->dev.of_node;
|
2007-03-23 00:49:42 +08:00
|
|
|
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
|
2007-03-23 00:50:24 +08:00
|
|
|
|
2007-05-01 11:54:02 +08:00
|
|
|
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
|
2007-07-02 19:00:46 +08:00
|
|
|
NULL);
|
2007-03-01 01:34:02 +08:00
|
|
|
if (dn_log_port_id)
|
|
|
|
if (*dn_log_port_id == logical_port_id)
|
|
|
|
return eth_dn;
|
2010-05-18 13:47:34 +08:00
|
|
|
}
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ehea_probe_port(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2009-05-05 12:33:19 +08:00
|
|
|
struct ehea_adapter *adapter = dev_get_drvdata(dev);
|
2007-03-01 01:34:02 +08:00
|
|
|
struct ehea_port *port;
|
|
|
|
struct device_node *eth_dn = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
u32 logical_port_id;
|
|
|
|
|
2007-08-22 22:20:58 +08:00
|
|
|
sscanf(buf, "%d", &logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
port = ehea_get_port(adapter, logical_port_id);
|
|
|
|
|
|
|
|
if (port) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
|
|
|
|
logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
return -EINVAL;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
2007-03-23 00:50:24 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
if (!eth_dn) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("no logical port with id %d found\n", logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2007-03-23 00:50:24 +08:00
|
|
|
|
2007-04-26 17:56:43 +08:00
|
|
|
if (ehea_add_adapter_mr(adapter)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("creating MR failed\n");
|
2007-04-26 17:56:43 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-23 00:49:42 +08:00
|
|
|
of_node_put(eth_dn);
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
if (port) {
|
2008-02-01 10:20:49 +08:00
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++)
|
2007-03-01 01:34:02 +08:00
|
|
|
if (!adapter->port[i]) {
|
|
|
|
adapter->port[i] = port;
|
|
|
|
break;
|
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(port->netdev, "added: (logical port id=%d)\n",
|
|
|
|
logical_port_id);
|
2007-04-26 17:56:43 +08:00
|
|
|
} else {
|
|
|
|
ehea_remove_adapter_mr(adapter);
|
2007-03-23 00:50:24 +08:00
|
|
|
return -EIO;
|
2007-04-26 17:56:43 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
return (ssize_t) count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ehea_remove_port(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2009-05-05 12:33:19 +08:00
|
|
|
struct ehea_adapter *adapter = dev_get_drvdata(dev);
|
2007-03-01 01:34:02 +08:00
|
|
|
struct ehea_port *port;
|
|
|
|
int i;
|
|
|
|
u32 logical_port_id;
|
|
|
|
|
2007-08-22 22:20:58 +08:00
|
|
|
sscanf(buf, "%d", &logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
port = ehea_get_port(adapter, logical_port_id);
|
|
|
|
|
|
|
|
if (port) {
|
2010-12-14 02:05:14 +08:00
|
|
|
netdev_info(port->netdev, "removed: (logical port id=%d)\n",
|
|
|
|
logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
ehea_shutdown_single_port(port);
|
|
|
|
|
2008-02-01 10:20:49 +08:00
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++)
|
2007-03-01 01:34:02 +08:00
|
|
|
if (adapter->port[i] == port) {
|
|
|
|
adapter->port[i] = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("removing port with logical port id=%d failed. port not configured.\n",
|
|
|
|
logical_port_id);
|
2007-03-01 01:34:02 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-04-26 17:56:43 +08:00
|
|
|
ehea_remove_adapter_mr(adapter);
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
return (ssize_t) count;
|
|
|
|
}
|
|
|
|
|
2018-03-24 07:34:44 +08:00
|
|
|
static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
|
|
|
|
static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int ehea_create_device_sysfs(struct platform_device *dev)
|
2007-03-01 01:34:02 +08:00
|
|
|
{
|
2007-09-26 17:45:51 +08:00
|
|
|
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
|
2007-03-01 01:34:02 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2007-09-26 17:45:51 +08:00
|
|
|
ret = device_create_file(&dev->dev, &dev_attr_remove_port);
|
2007-03-01 01:34:02 +08:00
|
|
|
out:
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static void ehea_remove_device_sysfs(struct platform_device *dev)
|
2007-03-01 01:34:02 +08:00
|
|
|
{
|
2007-09-26 17:45:51 +08:00
|
|
|
device_remove_file(&dev->dev, &dev_attr_probe_port);
|
|
|
|
device_remove_file(&dev->dev, &dev_attr_remove_port);
|
2007-03-01 01:34:02 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 03:44:20 +08:00
|
|
|
static int ehea_reboot_notifier(struct notifier_block *nb,
|
|
|
|
unsigned long action, void *unused)
|
|
|
|
{
|
|
|
|
if (action == SYS_RESTART) {
|
|
|
|
pr_info("Reboot: freeing all eHEA resources\n");
|
|
|
|
ibmebus_unregister_driver(&ehea_driver);
|
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block ehea_reboot_nb = {
|
|
|
|
.notifier_call = ehea_reboot_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ehea_mem_notifier(struct notifier_block *nb,
|
|
|
|
unsigned long action, void *data)
|
|
|
|
{
|
|
|
|
int ret = NOTIFY_BAD;
|
|
|
|
struct memory_notify *arg = data;
|
|
|
|
|
|
|
|
mutex_lock(&dlpar_mem_lock);
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case MEM_CANCEL_OFFLINE:
|
|
|
|
pr_info("memory offlining canceled");
|
|
|
|
/* Fall through: re-add canceled memory block */
|
|
|
|
|
|
|
|
case MEM_ONLINE:
|
|
|
|
pr_info("memory is going online");
|
|
|
|
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
|
|
|
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
|
|
|
|
goto out_unlock;
|
|
|
|
ehea_rereg_mrs();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MEM_GOING_OFFLINE:
|
|
|
|
pr_info("memory is going offline");
|
|
|
|
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
|
|
|
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
|
|
|
|
goto out_unlock;
|
|
|
|
ehea_rereg_mrs();
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ehea_update_firmware_handles();
|
|
|
|
ret = NOTIFY_OK;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&dlpar_mem_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block ehea_mem_nb = {
|
|
|
|
.notifier_call = ehea_mem_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void ehea_crash_handler(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ehea_fw_handles.arr)
|
|
|
|
for (i = 0; i < ehea_fw_handles.num_entries; i++)
|
|
|
|
ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
|
|
|
|
ehea_fw_handles.arr[i].fwh,
|
|
|
|
FORCE_FREE);
|
|
|
|
|
|
|
|
if (ehea_bcmc_regs.arr)
|
|
|
|
for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
|
|
|
|
ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
|
|
|
|
ehea_bcmc_regs.arr[i].port_id,
|
|
|
|
ehea_bcmc_regs.arr[i].reg_type,
|
|
|
|
ehea_bcmc_regs.arr[i].macaddr,
|
|
|
|
0, H_DEREG_BCMC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static atomic_t ehea_memory_hooks_registered;
|
|
|
|
|
|
|
|
/* Register memory hooks on probe of first adapter */
|
|
|
|
static int ehea_register_memory_hooks(void)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-04-24 13:52:32 +08:00
|
|
|
if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
|
2015-02-16 03:44:20 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = ehea_create_busmap();
|
|
|
|
if (ret) {
|
|
|
|
pr_info("ehea_create_busmap failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = register_reboot_notifier(&ehea_reboot_nb);
|
|
|
|
if (ret) {
|
|
|
|
pr_info("register_reboot_notifier failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = register_memory_notifier(&ehea_mem_nb);
|
|
|
|
if (ret) {
|
|
|
|
pr_info("register_memory_notifier failed\n");
|
|
|
|
goto out2;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = crash_shutdown_register(ehea_crash_handler);
|
|
|
|
if (ret) {
|
|
|
|
pr_info("crash_shutdown_register failed\n");
|
|
|
|
goto out3;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out3:
|
|
|
|
unregister_memory_notifier(&ehea_mem_nb);
|
|
|
|
out2:
|
|
|
|
unregister_reboot_notifier(&ehea_reboot_nb);
|
|
|
|
out:
|
2015-04-24 13:52:32 +08:00
|
|
|
atomic_dec(&ehea_memory_hooks_registered);
|
2015-02-16 03:44:20 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ehea_unregister_memory_hooks(void)
|
|
|
|
{
|
2015-04-24 13:52:32 +08:00
|
|
|
/* Only remove the hooks if we've registered them */
|
|
|
|
if (atomic_read(&ehea_memory_hooks_registered) == 0)
|
2015-02-16 03:44:20 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
unregister_reboot_notifier(&ehea_reboot_nb);
|
|
|
|
if (crash_shutdown_unregister(ehea_crash_handler))
|
|
|
|
pr_info("failed unregistering crash handler\n");
|
|
|
|
unregister_memory_notifier(&ehea_mem_nb);
|
|
|
|
}
|
|
|
|
|
2013-04-21 10:51:08 +08:00
|
|
|
static int ehea_probe_adapter(struct platform_device *dev)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
struct ehea_adapter *adapter;
|
2007-05-01 11:51:32 +08:00
|
|
|
const u64 *adapter_handle;
|
2006-09-13 23:44:31 +08:00
|
|
|
int ret;
|
2012-04-23 11:46:29 +08:00
|
|
|
int i;
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2015-02-16 03:44:20 +08:00
|
|
|
ret = ehea_register_memory_hooks();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-04-14 07:12:29 +08:00
|
|
|
if (!dev || !dev->dev.of_node) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("Invalid ibmebus device probed\n");
|
2007-03-23 00:49:42 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
ehea: Introduce the use of the managed version of kzalloc
This patch moves data allocated using kzalloc to managed data allocated
using devm_kzalloc and cleans now unnecessary kfrees in probe and remove
functions. Also, linux/device.h is added to make sure the devm_*()
routine declarations are unambiguously available.
The following Coccinelle semantic patch was used for making the change:
@platform@
identifier p, probefn, removefn;
@@
struct platform_driver p = {
.probe = probefn,
.remove = removefn,
};
@prb@
identifier platform.probefn, pdev;
expression e, e1, e2;
@@
probefn(struct platform_device *pdev, ...) {
<+...
- e = kzalloc(e1, e2)
+ e = devm_kzalloc(&pdev->dev, e1, e2)
...
?-kfree(e);
...+>
}
@rem depends on prb@
identifier platform.removefn;
expression e;
@@
removefn(...) {
<...
- kfree(e);
...>
}
Signed-off-by: Himangi Saraogi <himangi774@gmail.com>
Compile-Tested-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-28 22:23:09 +08:00
|
|
|
adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
|
2006-09-13 23:44:31 +08:00
|
|
|
if (!adapter) {
|
|
|
|
ret = -ENOMEM;
|
2007-09-26 17:45:51 +08:00
|
|
|
dev_err(&dev->dev, "no mem for ehea_adapter\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-07-11 22:32:00 +08:00
|
|
|
list_add(&adapter->list, &adapter_list);
|
|
|
|
|
2007-09-26 17:45:51 +08:00
|
|
|
adapter->ofdev = dev;
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2010-04-14 07:12:29 +08:00
|
|
|
adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
|
2007-07-02 19:00:46 +08:00
|
|
|
NULL);
|
2007-01-22 19:52:20 +08:00
|
|
|
if (adapter_handle)
|
|
|
|
adapter->handle = *adapter_handle;
|
|
|
|
|
|
|
|
if (!adapter->handle) {
|
2007-09-26 17:45:51 +08:00
|
|
|
dev_err(&dev->dev, "failed getting handle for adapter"
|
2017-07-19 05:43:19 +08:00
|
|
|
" '%pOF'\n", dev->dev.of_node);
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -ENODEV;
|
|
|
|
goto out_free_ad;
|
|
|
|
}
|
|
|
|
|
|
|
|
adapter->pd = EHEA_PD_ID;
|
|
|
|
|
2013-05-23 08:52:31 +08:00
|
|
|
platform_set_drvdata(dev, adapter);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
|
|
|
|
/* initialize adapter and ports */
|
|
|
|
/* get adapter properties */
|
|
|
|
ret = ehea_sense_adapter_attr(adapter);
|
|
|
|
if (ret) {
|
2007-10-18 18:06:30 +08:00
|
|
|
dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
|
2007-04-26 17:56:43 +08:00
|
|
|
goto out_free_ad;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
adapter->neq = ehea_create_eq(adapter,
|
|
|
|
EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
|
|
|
|
if (!adapter->neq) {
|
2007-03-23 00:49:42 +08:00
|
|
|
ret = -EIO;
|
2007-10-18 18:06:30 +08:00
|
|
|
dev_err(&dev->dev, "NEQ creation failed\n");
|
2007-04-26 17:56:43 +08:00
|
|
|
goto out_free_ad;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
|
|
|
|
(unsigned long)adapter);
|
|
|
|
|
2007-03-23 00:49:42 +08:00
|
|
|
ret = ehea_create_device_sysfs(dev);
|
|
|
|
if (ret)
|
2012-04-23 11:46:29 +08:00
|
|
|
goto out_kill_eq;
|
2007-03-01 01:34:02 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ehea_setup_ports(adapter);
|
|
|
|
if (ret) {
|
2007-10-18 18:06:30 +08:00
|
|
|
dev_err(&dev->dev, "setup_ports failed\n");
|
2007-03-01 01:34:02 +08:00
|
|
|
goto out_rem_dev_sysfs;
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
2012-04-23 11:46:29 +08:00
|
|
|
ret = ibmebus_request_irq(adapter->neq->attr.ist1,
|
2013-09-12 11:46:11 +08:00
|
|
|
ehea_interrupt_neq, 0,
|
2012-04-23 11:46:29 +08:00
|
|
|
"ehea_neq", adapter);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
|
|
|
|
goto out_shutdown_ports;
|
|
|
|
}
|
|
|
|
|
2012-05-10 12:00:53 +08:00
|
|
|
/* Handle any events that might be pending. */
|
|
|
|
tasklet_hi_schedule(&adapter->neq_tasklet);
|
2012-04-23 11:46:29 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
|
2012-04-23 11:46:29 +08:00
|
|
|
out_shutdown_ports:
|
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++)
|
|
|
|
if (adapter->port[i]) {
|
|
|
|
ehea_shutdown_single_port(adapter->port[i]);
|
|
|
|
adapter->port[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
out_rem_dev_sysfs:
|
|
|
|
ehea_remove_device_sysfs(dev);
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
out_kill_eq:
|
|
|
|
ehea_destroy_eq(adapter->neq);
|
|
|
|
|
|
|
|
out_free_ad:
|
2009-02-12 05:47:57 +08:00
|
|
|
list_del(&adapter->list);
|
2008-02-13 23:18:33 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_update_firmware_handles();
|
2009-03-14 04:50:40 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-03 22:23:10 +08:00
|
|
|
static int ehea_remove(struct platform_device *dev)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
2013-05-23 08:52:31 +08:00
|
|
|
struct ehea_adapter *adapter = platform_get_drvdata(dev);
|
2006-09-13 23:44:31 +08:00
|
|
|
int i;
|
|
|
|
|
2007-03-01 01:34:02 +08:00
|
|
|
for (i = 0; i < EHEA_MAX_PORTS; i++)
|
2006-09-13 23:44:31 +08:00
|
|
|
if (adapter->port[i]) {
|
|
|
|
ehea_shutdown_single_port(adapter->port[i]);
|
|
|
|
adapter->port[i] = NULL;
|
|
|
|
}
|
2007-03-01 01:34:02 +08:00
|
|
|
|
|
|
|
ehea_remove_device_sysfs(dev);
|
|
|
|
|
2007-09-26 17:45:51 +08:00
|
|
|
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
|
2007-01-30 01:44:41 +08:00
|
|
|
tasklet_kill(&adapter->neq_tasklet);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
|
|
|
ehea_destroy_eq(adapter->neq);
|
2007-04-26 17:56:43 +08:00
|
|
|
ehea_remove_adapter_mr(adapter);
|
2007-07-11 22:32:00 +08:00
|
|
|
list_del(&adapter->list);
|
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
ehea_update_firmware_handles();
|
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_module_parm(void)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
|
|
|
|
(rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Bad parameter: rq1_entries\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
|
|
|
|
(rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Bad parameter: rq2_entries\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
|
|
|
|
(rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Bad parameter: rq3_entries\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
|
|
|
|
(sq_entries > EHEA_MAX_ENTRIES_SQ)) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("Bad parameter: sq_entries\n");
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-09 17:03:09 +08:00
|
|
|
static ssize_t capabilities_show(struct device_driver *drv, char *buf)
|
2007-07-05 15:26:25 +08:00
|
|
|
{
|
|
|
|
return sprintf(buf, "%d", EHEA_CAPABILITIES);
|
|
|
|
}
|
|
|
|
|
2017-06-09 17:03:09 +08:00
|
|
|
static DRIVER_ATTR_RO(capabilities);
|
2007-07-05 15:26:25 +08:00
|
|
|
|
2012-01-13 16:06:32 +08:00
|
|
|
static int __init ehea_module_init(void)
|
2006-09-13 23:44:31 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
|
|
|
|
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
|
|
|
|
|
2008-03-29 05:41:26 +08:00
|
|
|
mutex_init(&ehea_fw_handles.lock);
|
2008-07-03 22:18:45 +08:00
|
|
|
spin_lock_init(&ehea_bcmc_regs.lock);
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = check_module_parm();
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2007-07-11 22:32:00 +08:00
|
|
|
|
2006-09-13 23:44:31 +08:00
|
|
|
ret = ibmebus_register_driver(&ehea_driver);
|
2007-07-05 15:26:25 +08:00
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed registering eHEA device driver on ebus\n");
|
2015-02-16 03:44:20 +08:00
|
|
|
goto out;
|
2007-07-05 15:26:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = driver_create_file(&ehea_driver.driver,
|
|
|
|
&driver_attr_capabilities);
|
|
|
|
if (ret) {
|
2010-12-14 02:05:14 +08:00
|
|
|
pr_err("failed to register capabilities attribute, ret=%d\n",
|
|
|
|
ret);
|
2015-02-16 03:44:20 +08:00
|
|
|
goto out2;
|
2007-07-05 15:26:25 +08:00
|
|
|
}
|
2006-09-13 23:44:31 +08:00
|
|
|
|
2008-02-13 23:18:33 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
out2:
|
2015-02-16 03:44:20 +08:00
|
|
|
ibmebus_unregister_driver(&ehea_driver);
|
2006-09-13 23:44:31 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ehea_module_exit(void)
|
|
|
|
{
|
2007-07-05 15:26:25 +08:00
|
|
|
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
|
2006-09-13 23:44:31 +08:00
|
|
|
ibmebus_unregister_driver(&ehea_driver);
|
2015-02-16 03:44:20 +08:00
|
|
|
ehea_unregister_memory_hooks();
|
2008-02-13 23:18:33 +08:00
|
|
|
kfree(ehea_fw_handles.arr);
|
|
|
|
kfree(ehea_bcmc_regs.arr);
|
2007-07-11 22:32:00 +08:00
|
|
|
ehea_destroy_busmap();
|
2006-09-13 23:44:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ehea_module_init);
|
|
|
|
module_exit(ehea_module_exit);
|