2019-05-29 22:17:59 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2006-05-16 14:38:09 +08:00
|
|
|
/*
|
2007-12-14 08:14:26 +08:00
|
|
|
* HighPoint RR3xxx/4xxx controller driver for Linux
|
2015-07-31 11:25:55 +08:00
|
|
|
* Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
|
2006-05-16 14:38:09 +08:00
|
|
|
*
|
|
|
|
* Please report bugs/comments/suggestions to linux@highpoint-tech.com
|
|
|
|
*
|
|
|
|
* For more information, visit http://www.highpoint-tech.com
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/spinlock.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/gfp.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2006-05-16 14:38:09 +08:00
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/div64.h>
|
|
|
|
#include <scsi/scsi_cmnd.h>
|
|
|
|
#include <scsi/scsi_device.h>
|
|
|
|
#include <scsi/scsi.h>
|
|
|
|
#include <scsi/scsi_tcq.h>
|
|
|
|
#include <scsi/scsi_host.h>
|
|
|
|
|
|
|
|
#include "hptiop.h"
|
|
|
|
|
|
|
|
MODULE_AUTHOR("HighPoint Technologies, Inc.");
|
2007-12-14 08:14:26 +08:00
|
|
|
MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
static char driver_name[] = "hptiop";
|
2007-12-14 08:14:26 +08:00
|
|
|
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
|
2015-07-31 11:25:55 +08:00
|
|
|
static const char driver_ver[] = "v1.10.0";
|
2007-12-14 08:14:26 +08:00
|
|
|
|
|
|
|
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
|
|
|
|
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
|
|
|
struct hpt_iop_request_scsi_command *req);
|
|
|
|
static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
|
|
|
|
static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
|
2006-05-16 14:38:09 +08:00
|
|
|
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
u32 req = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < millisec; i++) {
|
2007-12-14 08:14:26 +08:00
|
|
|
req = readl(&hba->u.itl.iop->inbound_queue);
|
2006-05-16 14:38:09 +08:00
|
|
|
if (req != IOPMU_QUEUE_EMPTY)
|
|
|
|
break;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req != IOPMU_QUEUE_EMPTY) {
|
2007-12-14 08:14:26 +08:00
|
|
|
writel(req, &hba->u.itl.iop->outbound_queue);
|
|
|
|
readl(&hba->u.itl.iop->outbound_intstatus);
|
2006-05-16 14:38:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
|
|
|
|
{
|
|
|
|
return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
|
|
|
|
{
|
|
|
|
return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
2007-08-30 18:06:21 +08:00
|
|
|
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
|
2007-12-14 08:14:26 +08:00
|
|
|
hptiop_host_request_callback_itl(hba,
|
2006-05-16 14:38:09 +08:00
|
|
|
tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
|
|
|
|
else
|
2007-12-14 08:14:26 +08:00
|
|
|
hptiop_iop_request_callback_itl(hba, tag);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
u32 req;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
|
|
|
|
IOPMU_QUEUE_EMPTY) {
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
if (req & IOPMU_QUEUE_MASK_HOST_BITS)
|
2007-12-14 08:14:26 +08:00
|
|
|
hptiop_request_callback_itl(hba, req);
|
2006-05-16 14:38:09 +08:00
|
|
|
else {
|
|
|
|
struct hpt_iop_request_header __iomem * p;
|
|
|
|
|
|
|
|
p = (struct hpt_iop_request_header __iomem *)
|
2007-12-14 08:14:26 +08:00
|
|
|
((char __iomem *)hba->u.itl.iop + req);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
|
|
|
|
if (readl(&p->context))
|
2007-12-14 08:14:26 +08:00
|
|
|
hptiop_request_callback_itl(hba, req);
|
2006-05-16 14:38:09 +08:00
|
|
|
else
|
|
|
|
writel(1, &p->context);
|
|
|
|
}
|
|
|
|
else
|
2007-12-14 08:14:26 +08:00
|
|
|
hptiop_request_callback_itl(hba, req);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_intr_itl(struct hptiop_hba *hba)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
2007-12-14 08:14:26 +08:00
|
|
|
struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
|
2009-09-11 17:21:27 +08:00
|
|
|
void __iomem *plx = hba->u.itl.plx;
|
2006-05-16 14:38:09 +08:00
|
|
|
u32 status;
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-09-11 17:21:27 +08:00
|
|
|
if (plx && readl(plx + 0x11C5C) & 0xf)
|
|
|
|
writel(1, plx + 0x11C60);
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
status = readl(&iop->outbound_intstatus);
|
|
|
|
|
|
|
|
if (status & IOPMU_OUTBOUND_INT_MSG0) {
|
|
|
|
u32 msg = readl(&iop->outbound_msgaddr0);
|
2007-12-14 08:14:26 +08:00
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
dprintk("received outbound msg %x\n", msg);
|
|
|
|
writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
|
|
|
|
hptiop_message_callback(hba, msg);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
|
2007-12-14 08:14:26 +08:00
|
|
|
hptiop_drain_outbound_queue_itl(hba);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
|
|
|
|
{
|
|
|
|
u32 outbound_tail = readl(&mu->outbound_tail);
|
|
|
|
u32 outbound_head = readl(&mu->outbound_head);
|
|
|
|
|
|
|
|
if (outbound_tail != outbound_head) {
|
|
|
|
u64 p;
|
|
|
|
|
|
|
|
memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
|
|
|
|
outbound_tail++;
|
|
|
|
|
|
|
|
if (outbound_tail == MVIOP_QUEUE_LEN)
|
|
|
|
outbound_tail = 0;
|
|
|
|
writel(outbound_tail, &mu->outbound_tail);
|
|
|
|
return p;
|
|
|
|
} else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
|
|
|
|
u32 head = inbound_head + 1;
|
|
|
|
|
|
|
|
if (head == MVIOP_QUEUE_LEN)
|
|
|
|
head = 0;
|
|
|
|
|
|
|
|
memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
|
|
|
|
writel(head, &hba->u.mv.mu->inbound_head);
|
|
|
|
writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
|
|
|
|
&hba->u.mv.regs->inbound_doorbell);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
|
|
|
|
{
|
|
|
|
u32 req_type = (tag >> 5) & 0x7;
|
|
|
|
struct hpt_iop_request_scsi_command *req;
|
|
|
|
|
|
|
|
dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
|
|
|
|
|
|
|
|
BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
|
|
|
|
|
|
|
|
switch (req_type) {
|
|
|
|
case IOP_REQUEST_TYPE_GET_CONFIG:
|
|
|
|
case IOP_REQUEST_TYPE_SET_CONFIG:
|
|
|
|
hba->msg_done = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IOP_REQUEST_TYPE_SCSI_COMMAND:
|
|
|
|
req = hba->reqs[tag >> 8].req_virt;
|
|
|
|
if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
|
|
|
|
req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
|
|
|
|
|
|
|
|
hptiop_finish_scsi_req(hba, tag>>8, req);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iop_intr_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
u32 status;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
status = readl(&hba->u.mv.regs->outbound_doorbell);
|
|
|
|
writel(~status, &hba->u.mv.regs->outbound_doorbell);
|
|
|
|
|
|
|
|
if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
|
|
|
|
u32 msg;
|
|
|
|
msg = readl(&hba->u.mv.mu->outbound_msg);
|
|
|
|
dprintk("received outbound msg %x\n", msg);
|
|
|
|
hptiop_message_callback(hba, msg);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
|
|
|
|
u64 tag;
|
|
|
|
|
|
|
|
while ((tag = mv_outbound_read(hba->u.mv.mu)))
|
|
|
|
hptiop_request_callback_mv(hba, tag);
|
2006-05-16 14:38:09 +08:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
|
|
|
|
{
|
|
|
|
u32 req_type = _tag & 0xf;
|
|
|
|
struct hpt_iop_request_scsi_command *req;
|
|
|
|
|
|
|
|
switch (req_type) {
|
|
|
|
case IOP_REQUEST_TYPE_GET_CONFIG:
|
|
|
|
case IOP_REQUEST_TYPE_SET_CONFIG:
|
|
|
|
hba->msg_done = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IOP_REQUEST_TYPE_SCSI_COMMAND:
|
|
|
|
req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
|
|
|
|
if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
|
|
|
|
req->header.result = IOP_RESULT_SUCCESS;
|
|
|
|
hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iop_intr_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
u32 _tag, status, cptr, cur_rptr;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (hba->initialized)
|
|
|
|
writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
|
|
|
|
|
|
|
|
status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
|
|
|
|
if (status) {
|
|
|
|
writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
|
|
|
|
if (status & CPU_TO_F0_DRBL_MSG_BIT) {
|
|
|
|
u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
|
|
|
|
dprintk("received outbound msg %x\n", msg);
|
|
|
|
hptiop_message_callback(hba, msg);
|
|
|
|
}
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = readl(&(hba->u.mvfrey.mu->isr_cause));
|
|
|
|
if (status) {
|
|
|
|
writel(status, &(hba->u.mvfrey.mu->isr_cause));
|
|
|
|
do {
|
|
|
|
cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
|
|
|
|
cur_rptr = hba->u.mvfrey.outlist_rptr;
|
|
|
|
while (cur_rptr != cptr) {
|
|
|
|
cur_rptr++;
|
|
|
|
if (cur_rptr == hba->u.mvfrey.list_count)
|
|
|
|
cur_rptr = 0;
|
|
|
|
|
|
|
|
_tag = hba->u.mvfrey.outlist[cur_rptr].val;
|
|
|
|
BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
|
|
|
|
hptiop_request_callback_mvfrey(hba, _tag);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
hba->u.mvfrey.outlist_rptr = cur_rptr;
|
|
|
|
} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hba->initialized)
|
|
|
|
writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_send_sync_request_itl(struct hptiop_hba *hba,
|
2006-05-16 14:38:09 +08:00
|
|
|
void __iomem *_req, u32 millisec)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_header __iomem *req = _req;
|
|
|
|
u32 i;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
|
2006-05-16 14:38:09 +08:00
|
|
|
writel(0, &req->context);
|
2007-12-14 08:14:26 +08:00
|
|
|
writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
|
|
|
|
&hba->u.itl.iop->inbound_queue);
|
|
|
|
readl(&hba->u.itl.iop->outbound_intstatus);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
for (i = 0; i < millisec; i++) {
|
2007-12-14 08:14:26 +08:00
|
|
|
iop_intr_itl(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
if (readl(&req->context))
|
|
|
|
return 0;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_send_sync_request_mv(struct hptiop_hba *hba,
|
|
|
|
u32 size_bits, u32 millisec)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
2007-12-14 08:14:26 +08:00
|
|
|
struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
|
2006-05-16 14:38:09 +08:00
|
|
|
u32 i;
|
|
|
|
|
|
|
|
hba->msg_done = 0;
|
2007-12-14 08:14:26 +08:00
|
|
|
reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
|
|
|
|
mv_inbound_write(hba->u.mv.internal_req_phy |
|
|
|
|
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
|
|
|
|
|
|
|
|
for (i = 0; i < millisec; i++) {
|
|
|
|
iop_intr_mv(hba);
|
|
|
|
if (hba->msg_done)
|
|
|
|
return 0;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
|
|
|
|
u32 size_bits, u32 millisec)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_header *reqhdr =
|
|
|
|
hba->u.mvfrey.internal_req.req_virt;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
hba->msg_done = 0;
|
|
|
|
reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
|
|
|
|
hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
|
|
|
|
|
|
|
|
for (i = 0; i < millisec; i++) {
|
|
|
|
iop_intr_mvfrey(hba);
|
|
|
|
if (hba->msg_done)
|
|
|
|
break;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
return hba->msg_done ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
|
|
|
|
{
|
|
|
|
writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
|
|
|
|
readl(&hba->u.itl.iop->outbound_intstatus);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
|
|
|
|
{
|
|
|
|
writel(msg, &hba->u.mv.mu->inbound_msg);
|
|
|
|
writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
|
|
|
|
readl(&hba->u.mv.regs->inbound_doorbell);
|
|
|
|
}
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
|
|
|
|
{
|
|
|
|
writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
|
|
|
|
readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
|
|
|
|
{
|
|
|
|
u32 i;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->msg_done = 0;
|
2012-10-25 08:41:52 +08:00
|
|
|
hba->ops->disable_intr(hba);
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->ops->post_msg(hba, msg);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
for (i = 0; i < millisec; i++) {
|
|
|
|
spin_lock_irq(hba->host->host_lock);
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->ops->iop_intr(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
spin_unlock_irq(hba->host->host_lock);
|
|
|
|
if (hba->msg_done)
|
|
|
|
break;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
hba->ops->enable_intr(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
return hba->msg_done? 0 : -1;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_get_config_itl(struct hptiop_hba *hba,
|
2006-05-16 14:38:09 +08:00
|
|
|
struct hpt_iop_request_get_config *config)
|
|
|
|
{
|
|
|
|
u32 req32;
|
|
|
|
struct hpt_iop_request_get_config __iomem *req;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
req32 = readl(&hba->u.itl.iop->inbound_queue);
|
2006-05-16 14:38:09 +08:00
|
|
|
if (req32 == IOPMU_QUEUE_EMPTY)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
req = (struct hpt_iop_request_get_config __iomem *)
|
2007-12-14 08:14:26 +08:00
|
|
|
((unsigned long)hba->u.itl.iop + req32);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
writel(0, &req->header.flags);
|
|
|
|
writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
|
|
|
|
writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
|
|
|
|
writel(IOP_RESULT_PENDING, &req->header.result);
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
if (iop_send_sync_request_itl(hba, req, 20000)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
dprintk("Get config send cmd failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy_fromio(config, req, sizeof(*config));
|
2007-12-14 08:14:26 +08:00
|
|
|
writel(req32, &hba->u.itl.iop->outbound_queue);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iop_get_config_mv(struct hptiop_hba *hba,
|
|
|
|
struct hpt_iop_request_get_config *config)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
|
|
|
|
|
|
|
|
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
|
|
|
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
|
|
|
|
req->header.size =
|
|
|
|
cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
|
|
|
|
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
2008-03-31 01:36:26 +08:00
|
|
|
req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
|
|
|
|
req->header.context_hi32 = 0;
|
2007-12-14 08:14:26 +08:00
|
|
|
|
|
|
|
if (iop_send_sync_request_mv(hba, 0, 20000)) {
|
|
|
|
dprintk("Get config send cmd failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
|
2006-05-16 14:38:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int iop_get_config_mvfrey(struct hptiop_hba *hba,
|
|
|
|
struct hpt_iop_request_get_config *config)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
|
|
|
|
|
|
|
|
if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
|
|
|
|
info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
config->interface_version = info->interface_version;
|
|
|
|
config->firmware_version = info->firmware_version;
|
|
|
|
config->max_requests = info->max_requests;
|
|
|
|
config->request_size = info->request_size;
|
|
|
|
config->max_sg_count = info->max_sg_count;
|
|
|
|
config->data_transfer_length = info->data_transfer_length;
|
|
|
|
config->alignment_mask = info->alignment_mask;
|
|
|
|
config->max_devices = info->max_devices;
|
|
|
|
config->sdram_size = info->sdram_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_set_config_itl(struct hptiop_hba *hba,
|
2006-05-16 14:38:09 +08:00
|
|
|
struct hpt_iop_request_set_config *config)
|
|
|
|
{
|
|
|
|
u32 req32;
|
|
|
|
struct hpt_iop_request_set_config __iomem *req;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
req32 = readl(&hba->u.itl.iop->inbound_queue);
|
2006-05-16 14:38:09 +08:00
|
|
|
if (req32 == IOPMU_QUEUE_EMPTY)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
req = (struct hpt_iop_request_set_config __iomem *)
|
2007-12-14 08:14:26 +08:00
|
|
|
((unsigned long)hba->u.itl.iop + req32);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
|
|
|
|
(u8 *)config + sizeof(struct hpt_iop_request_header),
|
|
|
|
sizeof(struct hpt_iop_request_set_config) -
|
|
|
|
sizeof(struct hpt_iop_request_header));
|
|
|
|
|
|
|
|
writel(0, &req->header.flags);
|
|
|
|
writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
|
|
|
|
writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
|
|
|
|
writel(IOP_RESULT_PENDING, &req->header.result);
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
if (iop_send_sync_request_itl(hba, req, 20000)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
dprintk("Set config send cmd failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
writel(req32, &hba->u.itl.iop->outbound_queue);
|
2006-05-16 14:38:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int iop_set_config_mv(struct hptiop_hba *hba,
|
|
|
|
struct hpt_iop_request_set_config *config)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
2007-12-14 08:14:26 +08:00
|
|
|
struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
|
|
|
|
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
|
|
|
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
|
|
|
|
req->header.size =
|
|
|
|
cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
|
|
|
|
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
2008-03-31 01:36:26 +08:00
|
|
|
req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
|
|
|
|
req->header.context_hi32 = 0;
|
2007-12-14 08:14:26 +08:00
|
|
|
|
|
|
|
if (iop_send_sync_request_mv(hba, 0, 20000)) {
|
|
|
|
dprintk("Set config send cmd failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int iop_set_config_mvfrey(struct hptiop_hba *hba,
|
|
|
|
struct hpt_iop_request_set_config *config)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_set_config *req =
|
|
|
|
hba->u.mvfrey.internal_req.req_virt;
|
|
|
|
|
|
|
|
memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
|
|
|
|
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
|
|
|
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
|
|
|
|
req->header.size =
|
|
|
|
cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
|
|
|
|
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
|
|
|
req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
|
|
|
|
req->header.context_hi32 = 0;
|
|
|
|
|
|
|
|
if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
|
|
|
|
dprintk("Set config send cmd failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
2006-05-16 14:38:09 +08:00
|
|
|
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
|
2007-12-14 08:14:26 +08:00
|
|
|
&hba->u.itl.iop->outbound_intmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
|
|
|
|
&hba->u.mv.regs->outbound_intmask);
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
|
|
|
|
writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
|
|
|
|
writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int hptiop_initialize_iop(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
/* enable interrupts */
|
|
|
|
hba->ops->enable_intr(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
hba->initialized = 1;
|
|
|
|
|
|
|
|
/* start background tasks */
|
|
|
|
if (iop_send_sync_msg(hba,
|
|
|
|
IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
|
|
|
|
printk(KERN_ERR "scsi%d: fail to start background task\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
u32 mem_base_phy, length;
|
|
|
|
void __iomem *mem_base_virt;
|
2007-12-14 08:14:26 +08:00
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
struct pci_dev *pcidev = hba->pcidev;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
|
|
|
|
if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
printk(KERN_ERR "scsi%d: pci resource invalid\n",
|
|
|
|
hba->host->host_no);
|
2008-05-23 06:45:07 +08:00
|
|
|
return NULL;
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
mem_base_phy = pci_resource_start(pcidev, index);
|
|
|
|
length = pci_resource_len(pcidev, index);
|
2006-05-16 14:38:09 +08:00
|
|
|
mem_base_virt = ioremap(mem_base_phy, length);
|
|
|
|
|
|
|
|
if (!mem_base_virt) {
|
|
|
|
printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
|
|
|
|
hba->host->host_no);
|
2008-05-23 06:45:07 +08:00
|
|
|
return NULL;
|
2007-12-14 08:14:26 +08:00
|
|
|
}
|
|
|
|
return mem_base_virt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
2009-09-11 17:21:27 +08:00
|
|
|
struct pci_dev *pcidev = hba->pcidev;
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
|
2009-09-11 17:21:27 +08:00
|
|
|
if (hba->u.itl.iop == NULL)
|
2007-12-14 08:14:26 +08:00
|
|
|
return -1;
|
2009-09-11 17:21:27 +08:00
|
|
|
if ((pcidev->device & 0xff00) == 0x4400) {
|
|
|
|
hba->u.itl.plx = hba->u.itl.iop;
|
|
|
|
hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
|
|
|
|
if (hba->u.itl.iop == NULL) {
|
|
|
|
iounmap(hba->u.itl.plx);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2007-12-14 08:14:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
2009-09-11 17:21:27 +08:00
|
|
|
if (hba->u.itl.plx)
|
|
|
|
iounmap(hba->u.itl.plx);
|
2007-12-14 08:14:26 +08:00
|
|
|
iounmap(hba->u.itl.iop);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
|
2008-05-23 06:45:07 +08:00
|
|
|
if (hba->u.mv.regs == NULL)
|
2007-12-14 08:14:26 +08:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
|
2008-05-23 06:45:07 +08:00
|
|
|
if (hba->u.mv.mu == NULL) {
|
2007-12-14 08:14:26 +08:00
|
|
|
iounmap(hba->u.mv.regs);
|
2006-05-16 14:38:09 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
|
|
|
|
if (hba->u.mvfrey.config == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
|
|
|
|
if (hba->u.mvfrey.mu == NULL) {
|
|
|
|
iounmap(hba->u.mvfrey.config);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
iounmap(hba->u.mv.regs);
|
|
|
|
iounmap(hba->u.mv.mu);
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
iounmap(hba->u.mvfrey.config);
|
|
|
|
iounmap(hba->u.mvfrey.mu);
|
|
|
|
}
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
|
|
|
|
{
|
|
|
|
dprintk("iop message 0x%x\n", msg);
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
if (msg == IOPMU_INBOUND_MSG0_NOP ||
|
|
|
|
msg == IOPMU_INBOUND_MSG0_RESET_COMM)
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->msg_done = 1;
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
if (!hba->initialized)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (msg == IOPMU_INBOUND_MSG0_RESET) {
|
|
|
|
atomic_set(&hba->resetting, 0);
|
|
|
|
wake_up(&hba->reset_wq);
|
|
|
|
}
|
|
|
|
else if (msg <= IOPMU_INBOUND_MSG0_MAX)
|
|
|
|
hba->msg_done = 1;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static struct hptiop_request *get_req(struct hptiop_hba *hba)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
struct hptiop_request *ret;
|
|
|
|
|
|
|
|
dprintk("get_req : req=%p\n", hba->req_list);
|
|
|
|
|
|
|
|
ret = hba->req_list;
|
|
|
|
if (ret)
|
|
|
|
hba->req_list = ret->next;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
dprintk("free_req(%d, %p)\n", req->index, req);
|
|
|
|
req->next = hba->req_list;
|
|
|
|
hba->req_list = req;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
|
|
|
struct hpt_iop_request_scsi_command *req)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
struct scsi_cmnd *scp;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
|
2006-05-16 14:38:09 +08:00
|
|
|
"result=%d, context=0x%x tag=%d\n",
|
|
|
|
req, req->header.type, req->header.result,
|
|
|
|
req->header.context, tag);
|
|
|
|
|
|
|
|
BUG_ON(!req->header.result);
|
|
|
|
BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
|
|
|
|
|
|
|
|
scp = hba->reqs[tag].scp;
|
|
|
|
|
2007-05-14 19:25:31 +08:00
|
|
|
if (HPT_SCP(scp)->mapped)
|
|
|
|
scsi_dma_unmap(scp);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
switch (le32_to_cpu(req->header.result)) {
|
|
|
|
case IOP_RESULT_SUCCESS:
|
2007-12-14 08:14:26 +08:00
|
|
|
scsi_set_resid(scp,
|
|
|
|
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
|
2006-05-16 14:38:09 +08:00
|
|
|
scp->result = (DID_OK<<16);
|
|
|
|
break;
|
|
|
|
case IOP_RESULT_BAD_TARGET:
|
|
|
|
scp->result = (DID_BAD_TARGET<<16);
|
|
|
|
break;
|
|
|
|
case IOP_RESULT_BUSY:
|
|
|
|
scp->result = (DID_BUS_BUSY<<16);
|
|
|
|
break;
|
|
|
|
case IOP_RESULT_RESET:
|
|
|
|
scp->result = (DID_RESET<<16);
|
|
|
|
break;
|
|
|
|
case IOP_RESULT_FAIL:
|
|
|
|
scp->result = (DID_ERROR<<16);
|
|
|
|
break;
|
|
|
|
case IOP_RESULT_INVALID_REQUEST:
|
|
|
|
scp->result = (DID_ABORT<<16);
|
|
|
|
break;
|
2007-12-14 08:14:26 +08:00
|
|
|
case IOP_RESULT_CHECK_CONDITION:
|
|
|
|
scsi_set_resid(scp,
|
|
|
|
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
|
2006-05-16 14:38:09 +08:00
|
|
|
scp->result = SAM_STAT_CHECK_CONDITION;
|
2015-07-31 11:25:55 +08:00
|
|
|
memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
|
2012-10-25 08:41:52 +08:00
|
|
|
goto skip_resid;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
default:
|
2021-04-27 16:30:16 +08:00
|
|
|
scp->result = DID_ABORT << 16;
|
2006-05-16 14:38:09 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
scsi_set_resid(scp,
|
|
|
|
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
|
|
|
|
|
|
|
|
skip_resid:
|
2006-05-16 14:38:09 +08:00
|
|
|
dprintk("scsi_done(%p)\n", scp);
|
2021-10-08 04:28:32 +08:00
|
|
|
scsi_done(scp);
|
2006-05-16 14:38:09 +08:00
|
|
|
free_req(hba, &hba->reqs[tag]);
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_scsi_command *req;
|
|
|
|
u32 tag;
|
|
|
|
|
|
|
|
if (hba->iopintf_v2) {
|
|
|
|
tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
|
|
|
|
req = hba->reqs[tag].req_virt;
|
|
|
|
if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
|
|
|
|
req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
|
|
|
|
} else {
|
|
|
|
tag = _tag;
|
|
|
|
req = hba->reqs[tag].req_virt;
|
|
|
|
}
|
|
|
|
|
|
|
|
hptiop_finish_scsi_req(hba, tag, req);
|
|
|
|
}
|
|
|
|
|
2017-06-22 23:52:19 +08:00
|
|
|
static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
struct hpt_iop_request_header __iomem *req;
|
|
|
|
struct hpt_iop_request_ioctl_command __iomem *p;
|
|
|
|
struct hpt_ioctl_k *arg;
|
|
|
|
|
|
|
|
req = (struct hpt_iop_request_header __iomem *)
|
2007-12-14 08:14:26 +08:00
|
|
|
((unsigned long)hba->u.itl.iop + tag);
|
|
|
|
dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
|
2006-05-16 14:38:09 +08:00
|
|
|
"result=%d, context=0x%x tag=%d\n",
|
|
|
|
req, readl(&req->type), readl(&req->result),
|
|
|
|
readl(&req->context), tag);
|
|
|
|
|
|
|
|
BUG_ON(!readl(&req->result));
|
|
|
|
BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
|
|
|
|
|
|
|
|
p = (struct hpt_iop_request_ioctl_command __iomem *)req;
|
|
|
|
arg = (struct hpt_ioctl_k *)(unsigned long)
|
|
|
|
(readl(&req->context) |
|
|
|
|
((u64)readl(&req->context_hi32)<<32));
|
|
|
|
|
|
|
|
if (readl(&req->result) == IOP_RESULT_SUCCESS) {
|
|
|
|
arg->result = HPT_IOCTL_RESULT_OK;
|
|
|
|
|
|
|
|
if (arg->outbuf_size)
|
|
|
|
memcpy_fromio(arg->outbuf,
|
|
|
|
&p->buf[(readl(&p->inbuf_size) + 3)& ~3],
|
|
|
|
arg->outbuf_size);
|
|
|
|
|
|
|
|
if (arg->bytes_returned)
|
|
|
|
*arg->bytes_returned = arg->outbuf_size;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
arg->result = HPT_IOCTL_RESULT_FAILED;
|
|
|
|
|
|
|
|
arg->done(arg);
|
2007-12-14 08:14:26 +08:00
|
|
|
writel(tag, &hba->u.itl.iop->outbound_queue);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
static irqreturn_t hptiop_intr(int irq, void *dev_id)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
struct hptiop_hba *hba = dev_id;
|
|
|
|
int handled;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
2007-12-14 08:14:26 +08:00
|
|
|
handled = hba->ops->iop_intr(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
|
|
|
|
|
|
return handled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *host = scp->device->host;
|
|
|
|
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
2007-05-14 19:25:31 +08:00
|
|
|
struct scatterlist *sg;
|
|
|
|
int idx, nseg;
|
|
|
|
|
|
|
|
nseg = scsi_dma_map(scp);
|
|
|
|
BUG_ON(nseg < 0);
|
|
|
|
if (!nseg)
|
|
|
|
return 0;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2007-05-14 19:25:31 +08:00
|
|
|
HPT_SCP(scp)->sgcnt = nseg;
|
|
|
|
HPT_SCP(scp)->mapped = 1;
|
|
|
|
|
|
|
|
BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
|
|
|
|
|
|
|
|
scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
|
2012-10-25 08:41:52 +08:00
|
|
|
psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
|
|
|
|
hba->ops->host_phy_flag;
|
2007-05-14 19:25:31 +08:00
|
|
|
psg[idx].size = cpu_to_le32(sg_dma_len(sg));
|
|
|
|
psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
|
|
|
|
cpu_to_le32(1) : 0;
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
2007-05-14 19:25:31 +08:00
|
|
|
return HPT_SCP(scp)->sgcnt;
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static void hptiop_post_req_itl(struct hptiop_hba *hba,
|
|
|
|
struct hptiop_request *_req)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_header *reqhdr = _req->req_virt;
|
|
|
|
|
|
|
|
reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
|
|
|
|
(u32)_req->index);
|
|
|
|
reqhdr->context_hi32 = 0;
|
|
|
|
|
|
|
|
if (hba->iopintf_v2) {
|
|
|
|
u32 size, size_bits;
|
|
|
|
|
|
|
|
size = le32_to_cpu(reqhdr->size);
|
|
|
|
if (size < 256)
|
|
|
|
size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
|
|
|
|
else if (size < 512)
|
|
|
|
size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
|
|
|
|
else
|
|
|
|
size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
|
|
|
|
IOPMU_QUEUE_ADDR_HOST_BIT;
|
|
|
|
writel(_req->req_shifted_phy | size_bits,
|
|
|
|
&hba->u.itl.iop->inbound_queue);
|
|
|
|
} else
|
|
|
|
writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
|
|
|
|
&hba->u.itl.iop->inbound_queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_post_req_mv(struct hptiop_hba *hba,
|
|
|
|
struct hptiop_request *_req)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_header *reqhdr = _req->req_virt;
|
|
|
|
u32 size, size_bit;
|
|
|
|
|
|
|
|
reqhdr->context = cpu_to_le32(_req->index<<8 |
|
|
|
|
IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
|
|
|
|
reqhdr->context_hi32 = 0;
|
|
|
|
size = le32_to_cpu(reqhdr->size);
|
|
|
|
|
|
|
|
if (size <= 256)
|
|
|
|
size_bit = 0;
|
|
|
|
else if (size <= 256*2)
|
|
|
|
size_bit = 1;
|
|
|
|
else if (size <= 256*3)
|
|
|
|
size_bit = 2;
|
|
|
|
else
|
|
|
|
size_bit = 3;
|
|
|
|
|
|
|
|
mv_inbound_write((_req->req_shifted_phy << 5) |
|
|
|
|
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
|
|
|
|
struct hptiop_request *_req)
|
|
|
|
{
|
|
|
|
struct hpt_iop_request_header *reqhdr = _req->req_virt;
|
|
|
|
u32 index;
|
|
|
|
|
|
|
|
reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
|
|
|
|
IOP_REQUEST_FLAG_ADDR_BITS |
|
|
|
|
((_req->req_shifted_phy >> 11) & 0xffff0000));
|
|
|
|
reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
|
|
|
|
(_req->index << 4) | reqhdr->type);
|
|
|
|
reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
|
|
|
|
0xffffffff);
|
|
|
|
|
|
|
|
hba->u.mvfrey.inlist_wptr++;
|
|
|
|
index = hba->u.mvfrey.inlist_wptr & 0x3fff;
|
|
|
|
|
|
|
|
if (index == hba->u.mvfrey.list_count) {
|
|
|
|
index = 0;
|
|
|
|
hba->u.mvfrey.inlist_wptr &= ~0x3fff;
|
|
|
|
hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
hba->u.mvfrey.inlist[index].addr =
|
|
|
|
(dma_addr_t)_req->req_shifted_phy << 5;
|
|
|
|
hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
|
|
|
|
writel(hba->u.mvfrey.inlist_wptr,
|
|
|
|
&(hba->u.mvfrey.mu->inbound_write_ptr));
|
|
|
|
readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
u32 list_count = hba->u.mvfrey.list_count;
|
|
|
|
|
|
|
|
if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* wait 100ms for MCU ready */
|
|
|
|
msleep(100);
|
|
|
|
|
|
|
|
writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
|
|
|
|
&(hba->u.mvfrey.mu->inbound_base));
|
|
|
|
writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
|
|
|
|
&(hba->u.mvfrey.mu->inbound_base_high));
|
|
|
|
|
|
|
|
writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
|
|
|
|
&(hba->u.mvfrey.mu->outbound_base));
|
|
|
|
writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
|
|
|
|
&(hba->u.mvfrey.mu->outbound_base_high));
|
|
|
|
|
|
|
|
writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
|
|
|
|
&(hba->u.mvfrey.mu->outbound_shadow_base));
|
|
|
|
writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
|
|
|
|
&(hba->u.mvfrey.mu->outbound_shadow_base_high));
|
|
|
|
|
|
|
|
hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
|
|
|
|
*hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
|
|
|
|
hba->u.mvfrey.outlist_rptr = list_count - 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-08 04:46:14 +08:00
|
|
|
static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
struct Scsi_Host *host = scp->device->host;
|
|
|
|
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
|
|
|
struct hpt_iop_request_scsi_command *req;
|
|
|
|
int sg_count = 0;
|
|
|
|
struct hptiop_request *_req;
|
|
|
|
|
|
|
|
_req = get_req(hba);
|
|
|
|
if (_req == NULL) {
|
|
|
|
dprintk("hptiop_queuecmd : no free req\n");
|
2006-06-14 16:50:57 +08:00
|
|
|
return SCSI_MLQUEUE_HOST_BUSY;
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
_req->scp = scp;
|
|
|
|
|
2014-06-25 21:27:36 +08:00
|
|
|
dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
|
2006-05-16 14:38:09 +08:00
|
|
|
"req_index=%d, req=%p\n",
|
|
|
|
scp,
|
|
|
|
host->host_no, scp->device->channel,
|
|
|
|
scp->device->id, scp->device->lun,
|
2012-10-25 08:41:52 +08:00
|
|
|
cpu_to_be32(((u32 *)scp->cmnd)[0]),
|
|
|
|
cpu_to_be32(((u32 *)scp->cmnd)[1]),
|
|
|
|
cpu_to_be32(((u32 *)scp->cmnd)[2]),
|
|
|
|
cpu_to_be32(((u32 *)scp->cmnd)[3]),
|
2006-05-16 14:38:09 +08:00
|
|
|
_req->index, _req->req_virt);
|
|
|
|
|
|
|
|
scp->result = 0;
|
|
|
|
|
2015-07-31 11:25:55 +08:00
|
|
|
if (scp->device->channel ||
|
|
|
|
(scp->device->id > hba->max_devices) ||
|
|
|
|
((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
scp->result = DID_BAD_TARGET << 16;
|
|
|
|
free_req(hba, _req);
|
|
|
|
goto cmd_done;
|
|
|
|
}
|
|
|
|
|
2007-08-30 18:06:21 +08:00
|
|
|
req = _req->req_virt;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
/* build S/G table */
|
2007-05-14 19:25:31 +08:00
|
|
|
sg_count = hptiop_buildsgl(scp, req->sg_list);
|
|
|
|
if (!sg_count)
|
2006-05-16 14:38:09 +08:00
|
|
|
HPT_SCP(scp)->mapped = 0;
|
|
|
|
|
|
|
|
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
|
|
|
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
|
|
|
|
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
2007-05-14 19:25:31 +08:00
|
|
|
req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
|
2006-05-16 14:38:09 +08:00
|
|
|
req->channel = scp->device->channel;
|
|
|
|
req->target = scp->device->id;
|
|
|
|
req->lun = scp->device->lun;
|
2022-09-23 00:55:33 +08:00
|
|
|
req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count));
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->ops->post_req(hba, _req);
|
2006-05-16 14:38:09 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
cmd_done:
|
|
|
|
dprintk("scsi_done(scp=%p)\n", scp);
|
2021-10-08 04:28:32 +08:00
|
|
|
scsi_done(scp);
|
2006-05-16 14:38:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-16 15:10:29 +08:00
|
|
|
static DEF_SCSI_QCMD(hptiop_queuecommand)
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
static const char *hptiop_info(struct Scsi_Host *host)
|
|
|
|
{
|
|
|
|
return driver_name_long;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_reset_hba(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
if (atomic_xchg(&hba->resetting, 1) == 0) {
|
|
|
|
atomic_inc(&hba->reset_count);
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
wait_event_timeout(hba->reset_wq,
|
|
|
|
atomic_read(&hba->resetting) == 0, 60 * HZ);
|
|
|
|
|
|
|
|
if (atomic_read(&hba->resetting)) {
|
tree-wide: fix assorted typos all over the place
That is "success", "unknown", "through", "performance", "[re|un]mapping"
, "access", "default", "reasonable", "[con]currently", "temperature"
, "channel", "[un]used", "application", "example","hierarchy", "therefore"
, "[over|under]flow", "contiguous", "threshold", "enough" and others.
Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2009-11-14 23:09:05 +08:00
|
|
|
/* IOP is in unknown state, abort reset */
|
2006-05-16 14:38:09 +08:00
|
|
|
printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iop_send_sync_msg(hba,
|
|
|
|
IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
|
|
|
|
dprintk("scsi%d: fail to start background task\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_reset(struct scsi_cmnd *scp)
|
|
|
|
{
|
2017-08-25 19:57:04 +08:00
|
|
|
struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2017-08-25 19:57:04 +08:00
|
|
|
printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n",
|
|
|
|
scp->device->host->host_no, -1, -1);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
return hptiop_reset_hba(hba)? FAILED : SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
|
2014-11-13 22:08:42 +08:00
|
|
|
int queue_depth)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
2007-12-14 08:14:26 +08:00
|
|
|
struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
|
|
|
|
|
|
|
|
if (queue_depth > hba->max_requests)
|
|
|
|
queue_depth = hba->max_requests;
|
2014-11-13 22:08:42 +08:00
|
|
|
return scsi_change_queue_depth(sdev, queue_depth);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
static ssize_t hptiop_show_version(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
|
|
|
|
}
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
static ssize_t hptiop_show_fw_version(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
2008-02-22 07:13:36 +08:00
|
|
|
struct Scsi_Host *host = class_to_shost(dev);
|
2006-05-16 14:38:09 +08:00
|
|
|
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
|
|
|
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
|
|
|
|
hba->firmware_version >> 24,
|
|
|
|
(hba->firmware_version >> 16) & 0xff,
|
|
|
|
(hba->firmware_version >> 8) & 0xff,
|
|
|
|
hba->firmware_version & 0xff);
|
|
|
|
}
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
static struct device_attribute hptiop_attr_version = {
|
2006-05-16 14:38:09 +08:00
|
|
|
.attr = {
|
|
|
|
.name = "driver-version",
|
|
|
|
.mode = S_IRUGO,
|
|
|
|
},
|
|
|
|
.show = hptiop_show_version,
|
|
|
|
};
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
static struct device_attribute hptiop_attr_fw_version = {
|
2006-05-16 14:38:09 +08:00
|
|
|
.attr = {
|
|
|
|
.name = "firmware-version",
|
|
|
|
.mode = S_IRUGO,
|
|
|
|
},
|
|
|
|
.show = hptiop_show_fw_version,
|
|
|
|
};
|
|
|
|
|
2021-10-13 07:35:34 +08:00
|
|
|
static struct attribute *hptiop_host_attrs[] = {
|
|
|
|
&hptiop_attr_version.attr,
|
|
|
|
&hptiop_attr_fw_version.attr,
|
2006-05-16 14:38:09 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2021-10-13 07:35:34 +08:00
|
|
|
ATTRIBUTE_GROUPS(hptiop_host);
|
|
|
|
|
2024-04-09 22:37:39 +08:00
|
|
|
static int hptiop_device_configure(struct scsi_device *sdev,
|
|
|
|
struct queue_limits *lim)
|
2015-07-31 11:25:55 +08:00
|
|
|
{
|
|
|
|
if (sdev->type == TYPE_TAPE)
|
2024-04-09 22:37:39 +08:00
|
|
|
lim->max_hw_sectors = 8192;
|
2015-07-31 11:25:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-03-23 03:54:38 +08:00
|
|
|
static const struct scsi_host_template driver_template = {
|
2006-05-16 14:38:09 +08:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
.name = driver_name,
|
|
|
|
.queuecommand = hptiop_queuecommand,
|
2017-08-25 19:57:04 +08:00
|
|
|
.eh_host_reset_handler = hptiop_reset,
|
2006-05-16 14:38:09 +08:00
|
|
|
.info = hptiop_info,
|
|
|
|
.emulated = 0,
|
|
|
|
.proc_name = driver_name,
|
2021-10-13 07:35:34 +08:00
|
|
|
.shost_groups = hptiop_host_groups,
|
2024-04-09 22:37:39 +08:00
|
|
|
.device_configure = hptiop_device_configure,
|
2006-05-16 14:38:09 +08:00
|
|
|
.this_id = -1,
|
|
|
|
.change_queue_depth = hptiop_adjust_disk_queue_depth,
|
2022-02-19 03:50:51 +08:00
|
|
|
.cmd_size = sizeof(struct hpt_cmd_priv),
|
2006-05-16 14:38:09 +08:00
|
|
|
};
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
|
|
|
|
0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
|
|
|
|
if (hba->u.mv.internal_req)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
|
|
|
|
char *p;
|
|
|
|
dma_addr_t phy;
|
|
|
|
|
|
|
|
BUG_ON(hba->max_request_size == 0);
|
|
|
|
|
|
|
|
if (list_count == 0) {
|
|
|
|
BUG_ON(1);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_count >>= 16;
|
|
|
|
|
|
|
|
hba->u.mvfrey.list_count = list_count;
|
|
|
|
hba->u.mvfrey.internal_mem_size = 0x800 +
|
|
|
|
list_count * sizeof(struct mvfrey_inlist_entry) +
|
|
|
|
list_count * sizeof(struct mvfrey_outlist_entry) +
|
|
|
|
sizeof(int);
|
|
|
|
|
|
|
|
p = dma_alloc_coherent(&hba->pcidev->dev,
|
|
|
|
hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
|
|
|
|
if (!p)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
hba->u.mvfrey.internal_req.req_virt = p;
|
|
|
|
hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
|
|
|
|
hba->u.mvfrey.internal_req.scp = NULL;
|
|
|
|
hba->u.mvfrey.internal_req.next = NULL;
|
|
|
|
|
|
|
|
p += 0x800;
|
|
|
|
phy += 0x800;
|
|
|
|
|
|
|
|
hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
|
|
|
|
hba->u.mvfrey.inlist_phy = phy;
|
|
|
|
|
|
|
|
p += list_count * sizeof(struct mvfrey_inlist_entry);
|
|
|
|
phy += list_count * sizeof(struct mvfrey_inlist_entry);
|
|
|
|
|
|
|
|
hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
|
|
|
|
hba->u.mvfrey.outlist_phy = phy;
|
|
|
|
|
|
|
|
p += list_count * sizeof(struct mvfrey_outlist_entry);
|
|
|
|
phy += list_count * sizeof(struct mvfrey_outlist_entry);
|
|
|
|
|
|
|
|
hba->u.mvfrey.outlist_cptr = (__le32 *)p;
|
|
|
|
hba->u.mvfrey.outlist_cptr_phy = phy;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->u.mv.internal_req) {
|
|
|
|
dma_free_coherent(&hba->pcidev->dev, 0x800,
|
|
|
|
hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->u.mvfrey.internal_req.req_virt) {
|
|
|
|
dma_free_coherent(&hba->pcidev->dev,
|
|
|
|
hba->u.mvfrey.internal_mem_size,
|
|
|
|
hba->u.mvfrey.internal_req.req_virt,
|
|
|
|
(dma_addr_t)
|
|
|
|
hba->u.mvfrey.internal_req.req_shifted_phy << 5);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-12-22 05:08:55 +08:00
|
|
|
static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
2006-05-16 14:38:09 +08:00
|
|
|
{
|
|
|
|
struct Scsi_Host *host = NULL;
|
|
|
|
struct hptiop_hba *hba;
|
2012-06-14 15:47:07 +08:00
|
|
|
struct hptiop_adapter_ops *iop_ops;
|
2006-05-16 14:38:09 +08:00
|
|
|
struct hpt_iop_request_get_config iop_config;
|
|
|
|
struct hpt_iop_request_set_config set_config;
|
|
|
|
dma_addr_t start_phy;
|
|
|
|
void *start_virt;
|
|
|
|
u32 offset, i, req_size;
|
2019-02-18 15:34:26 +08:00
|
|
|
int rc;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
dprintk("hptiop_probe(%p)\n", pcidev);
|
|
|
|
|
|
|
|
if (pci_enable_device(pcidev)) {
|
|
|
|
printk(KERN_ERR "hptiop: fail to enable pci device\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
|
|
|
|
pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
|
|
|
|
pcidev->irq);
|
|
|
|
|
|
|
|
pci_set_master(pcidev);
|
|
|
|
|
|
|
|
/* Enable 64bit DMA if possible */
|
2012-06-14 15:47:07 +08:00
|
|
|
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
|
2019-02-18 15:34:26 +08:00
|
|
|
rc = dma_set_mask(&pcidev->dev,
|
|
|
|
DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
|
|
|
|
if (rc)
|
|
|
|
rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
|
|
|
|
|
|
|
|
if (rc) {
|
2018-10-18 21:10:18 +08:00
|
|
|
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
|
|
|
goto disable_pci_device;
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pci_request_regions(pcidev, driver_name)) {
|
|
|
|
printk(KERN_ERR "hptiop: pci_request_regions failed\n");
|
|
|
|
goto disable_pci_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
|
|
|
|
if (!host) {
|
|
|
|
printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
|
|
|
|
goto free_pci_regions;
|
|
|
|
}
|
|
|
|
|
|
|
|
hba = (struct hptiop_hba *)host->hostdata;
|
2015-07-31 11:25:55 +08:00
|
|
|
memset(hba, 0, sizeof(struct hptiop_hba));
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2012-06-14 15:47:07 +08:00
|
|
|
hba->ops = iop_ops;
|
2006-05-16 14:38:09 +08:00
|
|
|
hba->pcidev = pcidev;
|
|
|
|
hba->host = host;
|
|
|
|
hba->initialized = 0;
|
2007-08-30 18:06:21 +08:00
|
|
|
hba->iopintf_v2 = 0;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
atomic_set(&hba->resetting, 0);
|
|
|
|
atomic_set(&hba->reset_count, 0);
|
|
|
|
|
|
|
|
init_waitqueue_head(&hba->reset_wq);
|
|
|
|
init_waitqueue_head(&hba->ioctl_wq);
|
|
|
|
|
2015-07-31 11:25:55 +08:00
|
|
|
host->max_lun = 128;
|
2006-05-16 14:38:09 +08:00
|
|
|
host->max_channel = 0;
|
|
|
|
host->io_port = 0;
|
|
|
|
host->n_io_port = 0;
|
|
|
|
host->irq = pcidev->irq;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
if (hba->ops->map_pci_bar(hba))
|
2006-05-16 14:38:09 +08:00
|
|
|
goto free_scsi_host;
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
if (hba->ops->iop_wait_ready(hba, 20000)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
printk(KERN_ERR "scsi%d: firmware not ready\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto unmap_pci_bar;
|
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
if (hba->ops->family == MV_BASED_IOP) {
|
2007-12-14 08:14:26 +08:00
|
|
|
if (hba->ops->internal_memalloc(hba)) {
|
|
|
|
printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto unmap_pci_bar;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hba->ops->get_config(hba, &iop_config)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
printk(KERN_ERR "scsi%d: get config failed\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto unmap_pci_bar;
|
|
|
|
}
|
|
|
|
|
|
|
|
hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
|
|
|
|
HPTIOP_MAX_REQUESTS);
|
|
|
|
hba->max_devices = le32_to_cpu(iop_config.max_devices);
|
|
|
|
hba->max_request_size = le32_to_cpu(iop_config.request_size);
|
|
|
|
hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
|
|
|
|
hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
|
2007-08-30 18:06:21 +08:00
|
|
|
hba->interface_version = le32_to_cpu(iop_config.interface_version);
|
2006-05-16 14:38:09 +08:00
|
|
|
hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
if (hba->ops->family == MVFREY_BASED_IOP) {
|
|
|
|
if (hba->ops->internal_memalloc(hba)) {
|
|
|
|
printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto unmap_pci_bar;
|
|
|
|
}
|
|
|
|
if (hba->ops->reset_comm(hba)) {
|
|
|
|
printk(KERN_ERR "scsi%d: reset comm failed\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto unmap_pci_bar;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-08-30 18:06:21 +08:00
|
|
|
if (hba->firmware_version > 0x01020000 ||
|
|
|
|
hba->interface_version > 0x01020000)
|
|
|
|
hba->iopintf_v2 = 1;
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
|
|
|
|
host->max_id = le32_to_cpu(iop_config.max_devices);
|
|
|
|
host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
|
|
|
|
host->can_queue = le32_to_cpu(iop_config.max_requests);
|
|
|
|
host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
|
|
|
|
host->max_cmd_len = 16;
|
|
|
|
|
2023-05-23 05:18:13 +08:00
|
|
|
req_size = struct_size_t(struct hpt_iop_request_scsi_command,
|
|
|
|
sg_list, hba->max_sg_descriptors);
|
2007-08-30 18:06:21 +08:00
|
|
|
if ((req_size & 0x1f) != 0)
|
|
|
|
req_size = (req_size + 0x1f) & ~0x1f;
|
|
|
|
|
|
|
|
memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
|
2006-05-16 14:38:09 +08:00
|
|
|
set_config.iop_id = cpu_to_le32(host->host_no);
|
2007-08-30 18:06:21 +08:00
|
|
|
set_config.vbus_id = cpu_to_le16(host->host_no);
|
|
|
|
set_config.max_host_request_size = cpu_to_le16(req_size);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
if (hba->ops->set_config(hba, &set_config)) {
|
2006-05-16 14:38:09 +08:00
|
|
|
printk(KERN_ERR "scsi%d: set config failed\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto unmap_pci_bar;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_drvdata(pcidev, host);
|
|
|
|
|
2006-07-02 10:29:42 +08:00
|
|
|
if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
|
2006-05-16 14:38:09 +08:00
|
|
|
driver_name, hba)) {
|
|
|
|
printk(KERN_ERR "scsi%d: request irq %d failed\n",
|
|
|
|
hba->host->host_no, pcidev->irq);
|
2006-07-31 01:13:36 +08:00
|
|
|
goto unmap_pci_bar;
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate request mem */
|
|
|
|
|
|
|
|
dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
|
|
|
|
|
|
|
|
hba->req_size = req_size;
|
2015-07-31 11:25:55 +08:00
|
|
|
hba->req_list = NULL;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2015-07-31 11:25:55 +08:00
|
|
|
for (i = 0; i < hba->max_requests; i++) {
|
|
|
|
start_virt = dma_alloc_coherent(&pcidev->dev,
|
|
|
|
hba->req_size + 0x20,
|
|
|
|
&start_phy, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!start_virt) {
|
|
|
|
printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto free_request_mem;
|
|
|
|
}
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2015-07-31 11:25:55 +08:00
|
|
|
hba->dma_coherent[i] = start_virt;
|
|
|
|
hba->dma_coherent_handle[i] = start_phy;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2015-07-31 11:25:55 +08:00
|
|
|
if ((start_phy & 0x1f) != 0) {
|
|
|
|
offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
|
|
|
|
start_phy += offset;
|
|
|
|
start_virt += offset;
|
|
|
|
}
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
hba->reqs[i].next = NULL;
|
|
|
|
hba->reqs[i].req_virt = start_virt;
|
|
|
|
hba->reqs[i].req_shifted_phy = start_phy >> 5;
|
|
|
|
hba->reqs[i].index = i;
|
|
|
|
free_req(hba, &hba->reqs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable Interrupt and start background task */
|
|
|
|
if (hptiop_initialize_iop(hba))
|
|
|
|
goto free_request_mem;
|
|
|
|
|
2006-07-31 01:13:36 +08:00
|
|
|
if (scsi_add_host(host, &pcidev->dev)) {
|
|
|
|
printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
goto free_request_mem;
|
|
|
|
}
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
scsi_scan_host(host);
|
|
|
|
|
|
|
|
dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_request_mem:
|
2015-07-31 11:25:55 +08:00
|
|
|
for (i = 0; i < hba->max_requests; i++) {
|
|
|
|
if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
|
|
|
|
dma_free_coherent(&hba->pcidev->dev,
|
|
|
|
hba->req_size + 0x20,
|
|
|
|
hba->dma_coherent[i],
|
|
|
|
hba->dma_coherent_handle[i]);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
free_irq(hba->pcidev->irq, hba);
|
|
|
|
|
|
|
|
unmap_pci_bar:
|
2012-10-25 08:41:52 +08:00
|
|
|
hba->ops->internal_memfree(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->ops->unmap_pci_bar(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
free_scsi_host:
|
|
|
|
scsi_host_put(host);
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
free_pci_regions:
|
|
|
|
pci_release_regions(pcidev);
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
disable_pci_device:
|
|
|
|
pci_disable_device(pcidev);
|
|
|
|
|
2010-05-27 20:33:47 +08:00
|
|
|
dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
|
2006-05-16 14:38:09 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_shutdown(struct pci_dev *pcidev)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *host = pci_get_drvdata(pcidev);
|
|
|
|
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
|
|
|
|
|
|
|
dprintk("hptiop_shutdown(%p)\n", hba);
|
|
|
|
|
|
|
|
/* stop the iop */
|
|
|
|
if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
|
|
|
|
printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
|
|
|
|
hba->host->host_no);
|
|
|
|
|
|
|
|
/* disable all outbound interrupts */
|
2007-12-14 08:14:26 +08:00
|
|
|
hba->ops->disable_intr(hba);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
u32 int_mask;
|
|
|
|
|
|
|
|
int_mask = readl(&hba->u.itl.iop->outbound_intmask);
|
2006-05-16 14:38:09 +08:00
|
|
|
writel(int_mask |
|
|
|
|
IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
|
2007-12-14 08:14:26 +08:00
|
|
|
&hba->u.itl.iop->outbound_intmask);
|
|
|
|
readl(&hba->u.itl.iop->outbound_intmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
writel(0, &hba->u.mv.regs->outbound_intmask);
|
|
|
|
readl(&hba->u.mv.regs->outbound_intmask);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
|
|
|
|
{
|
|
|
|
writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
|
|
|
|
readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
|
|
|
|
writel(0, &(hba->u.mvfrey.mu->isr_enable));
|
|
|
|
readl(&(hba->u.mvfrey.mu->isr_enable));
|
|
|
|
writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
|
|
|
|
readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
|
|
|
|
}
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
static void hptiop_remove(struct pci_dev *pcidev)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *host = pci_get_drvdata(pcidev);
|
|
|
|
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
2015-07-31 11:25:55 +08:00
|
|
|
u32 i;
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
|
|
|
|
|
2006-06-14 16:50:57 +08:00
|
|
|
scsi_remove_host(host);
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
hptiop_shutdown(pcidev);
|
|
|
|
|
|
|
|
free_irq(hba->pcidev->irq, hba);
|
|
|
|
|
2015-07-31 11:25:55 +08:00
|
|
|
for (i = 0; i < hba->max_requests; i++) {
|
|
|
|
if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
|
|
|
|
dma_free_coherent(&hba->pcidev->dev,
|
|
|
|
hba->req_size + 0x20,
|
|
|
|
hba->dma_coherent[i],
|
|
|
|
hba->dma_coherent_handle[i]);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2006-05-16 14:38:09 +08:00
|
|
|
|
2012-10-25 08:41:52 +08:00
|
|
|
hba->ops->internal_memfree(hba);
|
2007-12-14 08:14:26 +08:00
|
|
|
|
|
|
|
hba->ops->unmap_pci_bar(hba);
|
2006-05-16 14:38:09 +08:00
|
|
|
|
|
|
|
pci_release_regions(hba->pcidev);
|
|
|
|
pci_set_drvdata(hba->pcidev, NULL);
|
|
|
|
pci_disable_device(hba->pcidev);
|
|
|
|
|
|
|
|
scsi_host_put(host);
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:14:26 +08:00
|
|
|
static struct hptiop_adapter_ops hptiop_itl_ops = {
|
2012-10-25 08:41:52 +08:00
|
|
|
.family = INTEL_BASED_IOP,
|
2007-12-14 08:14:26 +08:00
|
|
|
.iop_wait_ready = iop_wait_ready_itl,
|
2012-10-25 08:41:52 +08:00
|
|
|
.internal_memalloc = hptiop_internal_memalloc_itl,
|
|
|
|
.internal_memfree = hptiop_internal_memfree_itl,
|
2007-12-14 08:14:26 +08:00
|
|
|
.map_pci_bar = hptiop_map_pci_bar_itl,
|
|
|
|
.unmap_pci_bar = hptiop_unmap_pci_bar_itl,
|
|
|
|
.enable_intr = hptiop_enable_intr_itl,
|
|
|
|
.disable_intr = hptiop_disable_intr_itl,
|
|
|
|
.get_config = iop_get_config_itl,
|
|
|
|
.set_config = iop_set_config_itl,
|
|
|
|
.iop_intr = iop_intr_itl,
|
|
|
|
.post_msg = hptiop_post_msg_itl,
|
|
|
|
.post_req = hptiop_post_req_itl,
|
2012-06-14 15:47:07 +08:00
|
|
|
.hw_dma_bit_mask = 64,
|
2012-10-25 08:41:52 +08:00
|
|
|
.reset_comm = hptiop_reset_comm_itl,
|
|
|
|
.host_phy_flag = cpu_to_le64(0),
|
2007-12-14 08:14:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct hptiop_adapter_ops hptiop_mv_ops = {
|
2012-10-25 08:41:52 +08:00
|
|
|
.family = MV_BASED_IOP,
|
2007-12-14 08:14:26 +08:00
|
|
|
.iop_wait_ready = iop_wait_ready_mv,
|
|
|
|
.internal_memalloc = hptiop_internal_memalloc_mv,
|
|
|
|
.internal_memfree = hptiop_internal_memfree_mv,
|
|
|
|
.map_pci_bar = hptiop_map_pci_bar_mv,
|
|
|
|
.unmap_pci_bar = hptiop_unmap_pci_bar_mv,
|
|
|
|
.enable_intr = hptiop_enable_intr_mv,
|
|
|
|
.disable_intr = hptiop_disable_intr_mv,
|
|
|
|
.get_config = iop_get_config_mv,
|
|
|
|
.set_config = iop_set_config_mv,
|
|
|
|
.iop_intr = iop_intr_mv,
|
|
|
|
.post_msg = hptiop_post_msg_mv,
|
|
|
|
.post_req = hptiop_post_req_mv,
|
2012-06-14 15:47:07 +08:00
|
|
|
.hw_dma_bit_mask = 33,
|
2012-10-25 08:41:52 +08:00
|
|
|
.reset_comm = hptiop_reset_comm_mv,
|
|
|
|
.host_phy_flag = cpu_to_le64(0),
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
|
|
|
|
.family = MVFREY_BASED_IOP,
|
|
|
|
.iop_wait_ready = iop_wait_ready_mvfrey,
|
|
|
|
.internal_memalloc = hptiop_internal_memalloc_mvfrey,
|
|
|
|
.internal_memfree = hptiop_internal_memfree_mvfrey,
|
|
|
|
.map_pci_bar = hptiop_map_pci_bar_mvfrey,
|
|
|
|
.unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey,
|
|
|
|
.enable_intr = hptiop_enable_intr_mvfrey,
|
|
|
|
.disable_intr = hptiop_disable_intr_mvfrey,
|
|
|
|
.get_config = iop_get_config_mvfrey,
|
|
|
|
.set_config = iop_set_config_mvfrey,
|
|
|
|
.iop_intr = iop_intr_mvfrey,
|
|
|
|
.post_msg = hptiop_post_msg_mvfrey,
|
|
|
|
.post_req = hptiop_post_req_mvfrey,
|
|
|
|
.hw_dma_bit_mask = 64,
|
|
|
|
.reset_comm = hptiop_reset_comm_mvfrey,
|
|
|
|
.host_phy_flag = cpu_to_le64(1),
|
2007-12-14 08:14:26 +08:00
|
|
|
};
|
|
|
|
|
2006-05-16 14:38:09 +08:00
|
|
|
static struct pci_device_id hptiop_id_table[] = {
|
2007-12-14 08:14:26 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
|
2009-09-11 17:21:27 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
|
2007-12-14 08:14:26 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
|
2009-09-11 17:21:27 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
|
2007-12-14 08:14:26 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
|
2008-07-25 13:29:24 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
|
2009-09-11 17:21:27 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
|
2008-07-25 13:29:24 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
|
2009-09-11 17:21:27 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
|
2007-12-14 08:14:26 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
|
2012-10-25 08:41:52 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
2015-07-31 11:25:55 +08:00
|
|
|
{ PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
|
|
|
{ PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
2006-05-16 14:38:09 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, hptiop_id_table);
|
|
|
|
|
|
|
|
static struct pci_driver hptiop_pci_driver = {
|
|
|
|
.name = driver_name,
|
|
|
|
.id_table = hptiop_id_table,
|
|
|
|
.probe = hptiop_probe,
|
|
|
|
.remove = hptiop_remove,
|
|
|
|
.shutdown = hptiop_shutdown,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init hptiop_module_init(void)
|
|
|
|
{
|
|
|
|
printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
|
2006-07-31 01:13:36 +08:00
|
|
|
return pci_register_driver(&hptiop_pci_driver);
|
2006-05-16 14:38:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit hptiop_module_exit(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&hptiop_pci_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
module_init(hptiop_module_init);
|
|
|
|
module_exit(hptiop_module_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2007-08-30 18:06:21 +08:00
|
|
|
|